android_kernel_xiaomi_sm8350/drivers/cam_sync/cam_sync.c
Jigarkumar Zala f1135e91b9 msm: camara: sync: Add callback registration logic for synx
Synx driver is dependent on camera driver for registering callback
functions of sync driver. This change invert the logic, and camera
driver will register the callback functions for synx driver at the
time of sync driver probe.

Change-Id: I2dbee15ba4c7a9dde336a4bb1f2a4dddcd90348b
Signed-off-by: Jigarkumar Zala <jzala@codeaurora.org>
2019-07-09 10:34:33 -07:00

1119 lines
26 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/irqflags.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h>
#include "cam_sync_util.h"
#include "cam_debug_util.h"
#include "cam_common_util.h"
#ifdef CONFIG_MSM_GLOBAL_SYNX
#include <synx_api.h>
#endif
struct sync_device *sync_dev;
/*
* Flag to determine whether to enqueue cb of a
* signaled fence onto the workq or invoke it
* directly in the same context
*/
static bool trigger_cb_without_switch;
int cam_sync_create(int32_t *sync_obj, const char *name)
{
int rc;
long idx;
bool bit;
do {
idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
if (idx >= CAM_SYNC_MAX_OBJS)
return -ENOMEM;
CAM_DBG(CAM_SYNC, "Index location available at idx: %ld", idx);
bit = test_and_set_bit(idx, sync_dev->bitmap);
} while (bit);
spin_lock_bh(&sync_dev->row_spinlocks[idx]);
rc = cam_sync_init_row(sync_dev->sync_table, idx, name,
CAM_SYNC_TYPE_INDV);
if (rc) {
CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
idx);
clear_bit(idx, sync_dev->bitmap);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
*sync_obj = idx;
CAM_DBG(CAM_SYNC, "sync_obj: %i", *sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return rc;
}
int cam_sync_register_callback(sync_callback cb_func,
void *userdata, int32_t sync_obj)
{
struct sync_callback_info *sync_cb;
struct sync_table_row *row = NULL;
int status = 0;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0 || !cb_func)
return -EINVAL;
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
}
sync_cb = kzalloc(sizeof(*sync_cb), GFP_ATOMIC);
if (!sync_cb) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -ENOMEM;
}
/* Trigger callback if sync object is already in SIGNALED state */
if ((row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
row->state == CAM_SYNC_STATE_SIGNALED_ERROR) &&
(!row->remaining)) {
if (trigger_cb_without_switch) {
CAM_DBG(CAM_SYNC, "Invoke callback for sync object:%d",
sync_obj);
status = row->state;
kfree(sync_cb);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
cb_func(sync_obj, status, userdata);
} else {
sync_cb->callback_func = cb_func;
sync_cb->cb_data = userdata;
sync_cb->sync_obj = sync_obj;
INIT_WORK(&sync_cb->cb_dispatch_work,
cam_sync_util_cb_dispatch);
sync_cb->status = row->state;
CAM_DBG(CAM_SYNC, "Enqueue callback for sync object:%d",
sync_cb->sync_obj);
queue_work(sync_dev->work_queue,
&sync_cb->cb_dispatch_work);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
}
return 0;
}
sync_cb->callback_func = cb_func;
sync_cb->cb_data = userdata;
sync_cb->sync_obj = sync_obj;
INIT_WORK(&sync_cb->cb_dispatch_work, cam_sync_util_cb_dispatch);
list_add_tail(&sync_cb->list, &row->callback_list);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return 0;
}
int cam_sync_deregister_callback(sync_callback cb_func,
void *userdata, int32_t sync_obj)
{
struct sync_table_row *row = NULL;
struct sync_callback_info *sync_cb, *temp;
bool found = false;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
}
CAM_DBG(CAM_SYNC, "deregistered callback for sync object:%d",
sync_obj);
list_for_each_entry_safe(sync_cb, temp, &row->callback_list, list) {
if (sync_cb->callback_func == cb_func &&
sync_cb->cb_data == userdata) {
list_del_init(&sync_cb->list);
kfree(sync_cb);
found = true;
}
}
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return found ? 0 : -ENOENT;
}
int cam_sync_signal(int32_t sync_obj, uint32_t status)
{
struct sync_table_row *row = NULL;
struct sync_table_row *parent_row = NULL;
struct sync_parent_info *parent_info, *temp_parent_info;
struct list_head parents_list;
int rc = 0;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0) {
CAM_ERR(CAM_SYNC, "Error: Out of range sync obj (0 <= %d < %d)",
sync_obj, CAM_SYNC_MAX_OBJS);
return -EINVAL;
}
row = sync_dev->sync_table + sync_obj;
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (row->state == CAM_SYNC_STATE_INVALID) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
if (row->type == CAM_SYNC_TYPE_GROUP) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: Signaling a GROUP sync object = %d",
sync_obj);
return -EINVAL;
}
if (row->state != CAM_SYNC_STATE_ACTIVE) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: Sync object already signaled sync_obj = %d",
sync_obj);
return -EALREADY;
}
if (status != CAM_SYNC_STATE_SIGNALED_SUCCESS &&
status != CAM_SYNC_STATE_SIGNALED_ERROR) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: signaling with undefined status = %d",
status);
return -EINVAL;
}
if (!atomic_dec_and_test(&row->ref_cnt)) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return 0;
}
row->state = status;
cam_sync_util_dispatch_signaled_cb(sync_obj, status);
/* copy parent list to local and release child lock */
INIT_LIST_HEAD(&parents_list);
list_splice_init(&row->parents_list, &parents_list);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
if (list_empty(&parents_list))
return 0;
/*
* Now iterate over all parents of this object and if they too need to
* be signaled dispatch cb's
*/
list_for_each_entry_safe(parent_info,
temp_parent_info,
&parents_list,
list) {
parent_row = sync_dev->sync_table + parent_info->sync_id;
spin_lock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
parent_row->remaining--;
rc = cam_sync_util_update_parent_state(
parent_row,
status);
if (rc) {
CAM_ERR(CAM_SYNC, "Invalid parent state %d",
parent_row->state);
spin_unlock_bh(
&sync_dev->row_spinlocks[parent_info->sync_id]);
kfree(parent_info);
continue;
}
if (!parent_row->remaining)
cam_sync_util_dispatch_signaled_cb(
parent_info->sync_id, parent_row->state);
spin_unlock_bh(&sync_dev->row_spinlocks[parent_info->sync_id]);
list_del_init(&parent_info->list);
kfree(parent_info);
}
return 0;
}
int cam_sync_merge(int32_t *sync_obj, uint32_t num_objs, int32_t *merged_obj)
{
int rc;
long idx = 0;
bool bit;
if (!sync_obj || !merged_obj) {
CAM_ERR(CAM_SYNC, "Invalid pointer(s)");
return -EINVAL;
}
if (num_objs <= 1) {
CAM_ERR(CAM_SYNC, "Single object merge is not allowed");
return -EINVAL;
}
if (cam_common_util_remove_duplicate_arr(sync_obj, num_objs)
!= num_objs) {
CAM_ERR(CAM_SYNC, "The obj list has duplicate fence");
return -EINVAL;
}
do {
idx = find_first_zero_bit(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
if (idx >= CAM_SYNC_MAX_OBJS)
return -ENOMEM;
bit = test_and_set_bit(idx, sync_dev->bitmap);
} while (bit);
spin_lock_bh(&sync_dev->row_spinlocks[idx]);
rc = cam_sync_init_group_object(sync_dev->sync_table,
idx, sync_obj,
num_objs);
if (rc < 0) {
CAM_ERR(CAM_SYNC, "Error: Unable to init row at idx = %ld",
idx);
clear_bit(idx, sync_dev->bitmap);
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return -EINVAL;
}
CAM_DBG(CAM_SYNC, "Init row at idx:%ld to merge objects", idx);
*merged_obj = idx;
spin_unlock_bh(&sync_dev->row_spinlocks[idx]);
return 0;
}
int cam_sync_get_obj_ref(int32_t sync_obj)
{
struct sync_table_row *row = NULL;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
row = sync_dev->sync_table + sync_obj;
spin_lock(&sync_dev->row_spinlocks[sync_obj]);
if (row->state != CAM_SYNC_STATE_ACTIVE) {
spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
atomic_inc(&row->ref_cnt);
spin_unlock(&sync_dev->row_spinlocks[sync_obj]);
CAM_DBG(CAM_SYNC, "get ref for obj %d", sync_obj);
return 0;
}
int cam_sync_put_obj_ref(int32_t sync_obj)
{
struct sync_table_row *row = NULL;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
row = sync_dev->sync_table + sync_obj;
atomic_dec(&row->ref_cnt);
CAM_DBG(CAM_SYNC, "put ref for obj %d", sync_obj);
return 0;
}
int cam_sync_destroy(int32_t sync_obj)
{
CAM_DBG(CAM_SYNC, "sync_obj: %i", sync_obj);
return cam_sync_deinit_object(sync_dev->sync_table, sync_obj);
}
int cam_sync_wait(int32_t sync_obj, uint64_t timeout_ms)
{
unsigned long timeleft;
int rc = -EINVAL;
struct sync_table_row *row = NULL;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
return -EINVAL;
}
timeleft = wait_for_completion_timeout(&row->signaled,
msecs_to_jiffies(timeout_ms));
if (!timeleft) {
CAM_ERR(CAM_SYNC,
"Error: timed out for sync obj = %d", sync_obj);
rc = -ETIMEDOUT;
} else {
switch (row->state) {
case CAM_SYNC_STATE_INVALID:
case CAM_SYNC_STATE_ACTIVE:
case CAM_SYNC_STATE_SIGNALED_ERROR:
CAM_ERR(CAM_SYNC,
"Error: Wait on invalid state = %d, obj = %d",
row->state, sync_obj);
rc = -EINVAL;
break;
case CAM_SYNC_STATE_SIGNALED_SUCCESS:
rc = 0;
break;
default:
rc = -EINVAL;
break;
}
}
return rc;
}
static int cam_sync_handle_create(struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_info sync_create;
int result;
if (k_ioctl->size != sizeof(struct cam_sync_info))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&sync_create,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
result = cam_sync_create(&sync_create.sync_obj,
sync_create.name);
if (!result)
if (copy_to_user(
u64_to_user_ptr(k_ioctl->ioctl_ptr),
&sync_create,
k_ioctl->size))
return -EFAULT;
return result;
}
static int cam_sync_handle_signal(struct cam_private_ioctl_arg *k_ioctl)
{
int rc = 0;
struct cam_sync_signal sync_signal;
if (k_ioctl->size != sizeof(struct cam_sync_signal))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&sync_signal,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
/* need to get ref for UMD signaled fences */
rc = cam_sync_get_obj_ref(sync_signal.sync_obj);
if (rc) {
CAM_DBG(CAM_SYNC,
"Error: cannot signal an uninitialized sync obj = %d",
sync_signal.sync_obj);
return rc;
}
return cam_sync_signal(sync_signal.sync_obj,
sync_signal.sync_state);
}
static int cam_sync_handle_merge(struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_merge sync_merge;
uint32_t *sync_objs;
uint32_t num_objs;
uint32_t size;
int result;
if (k_ioctl->size != sizeof(struct cam_sync_merge))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&sync_merge,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
if (sync_merge.num_objs >= CAM_SYNC_MAX_OBJS)
return -EINVAL;
size = sizeof(uint32_t) * sync_merge.num_objs;
sync_objs = kzalloc(size, GFP_ATOMIC);
if (!sync_objs)
return -ENOMEM;
if (copy_from_user(sync_objs,
u64_to_user_ptr(sync_merge.sync_objs),
sizeof(uint32_t) * sync_merge.num_objs)) {
kfree(sync_objs);
return -EFAULT;
}
num_objs = sync_merge.num_objs;
result = cam_sync_merge(sync_objs,
num_objs,
&sync_merge.merged);
if (!result)
if (copy_to_user(
u64_to_user_ptr(k_ioctl->ioctl_ptr),
&sync_merge,
k_ioctl->size)) {
kfree(sync_objs);
return -EFAULT;
}
kfree(sync_objs);
return result;
}
static int cam_sync_handle_wait(struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_wait sync_wait;
if (k_ioctl->size != sizeof(struct cam_sync_wait))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&sync_wait,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
k_ioctl->result = cam_sync_wait(sync_wait.sync_obj,
sync_wait.timeout_ms);
return 0;
}
static int cam_sync_handle_destroy(struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_info sync_create;
if (k_ioctl->size != sizeof(struct cam_sync_info))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&sync_create,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
return cam_sync_destroy(sync_create.sync_obj);
}
static int cam_sync_handle_register_user_payload(
struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_userpayload_info userpayload_info;
struct sync_user_payload *user_payload_kernel;
struct sync_user_payload *user_payload_iter;
struct sync_user_payload *temp_upayload_kernel;
uint32_t sync_obj;
struct sync_table_row *row = NULL;
if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info))
return -EINVAL;
if (!k_ioctl->ioctl_ptr)
return -EINVAL;
if (copy_from_user(&userpayload_info,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
sync_obj = userpayload_info.sync_obj;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
user_payload_kernel = kzalloc(sizeof(*user_payload_kernel), GFP_KERNEL);
if (!user_payload_kernel)
return -ENOMEM;
memcpy(user_payload_kernel->payload_data,
userpayload_info.payload,
CAM_SYNC_PAYLOAD_WORDS * sizeof(__u64));
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
kfree(user_payload_kernel);
return -EINVAL;
}
if (row->state == CAM_SYNC_STATE_SIGNALED_SUCCESS ||
row->state == CAM_SYNC_STATE_SIGNALED_ERROR) {
cam_sync_util_send_v4l2_event(CAM_SYNC_V4L_EVENT_ID_CB_TRIG,
sync_obj,
row->state,
user_payload_kernel->payload_data,
CAM_SYNC_USER_PAYLOAD_SIZE * sizeof(__u64));
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
kfree(user_payload_kernel);
return 0;
}
list_for_each_entry_safe(user_payload_iter,
temp_upayload_kernel,
&row->user_payload_list,
list) {
if (user_payload_iter->payload_data[0] ==
user_payload_kernel->payload_data[0] &&
user_payload_iter->payload_data[1] ==
user_payload_kernel->payload_data[1]) {
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
kfree(user_payload_kernel);
return -EALREADY;
}
}
list_add_tail(&user_payload_kernel->list, &row->user_payload_list);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return 0;
}
static int cam_sync_handle_deregister_user_payload(
struct cam_private_ioctl_arg *k_ioctl)
{
struct cam_sync_userpayload_info userpayload_info;
struct sync_user_payload *user_payload_kernel, *temp;
uint32_t sync_obj;
struct sync_table_row *row = NULL;
if (k_ioctl->size != sizeof(struct cam_sync_userpayload_info)) {
CAM_ERR(CAM_SYNC, "Incorrect ioctl size");
return -EINVAL;
}
if (!k_ioctl->ioctl_ptr) {
CAM_ERR(CAM_SYNC, "Invalid embedded ioctl ptr");
return -EINVAL;
}
if (copy_from_user(&userpayload_info,
u64_to_user_ptr(k_ioctl->ioctl_ptr),
k_ioctl->size))
return -EFAULT;
sync_obj = userpayload_info.sync_obj;
if (sync_obj >= CAM_SYNC_MAX_OBJS || sync_obj <= 0)
return -EINVAL;
spin_lock_bh(&sync_dev->row_spinlocks[sync_obj]);
row = sync_dev->sync_table + sync_obj;
if (row->state == CAM_SYNC_STATE_INVALID) {
CAM_ERR(CAM_SYNC,
"Error: accessing an uninitialized sync obj = %d",
sync_obj);
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return -EINVAL;
}
list_for_each_entry_safe(user_payload_kernel, temp,
&row->user_payload_list, list) {
if (user_payload_kernel->payload_data[0] ==
userpayload_info.payload[0] &&
user_payload_kernel->payload_data[1] ==
userpayload_info.payload[1]) {
list_del_init(&user_payload_kernel->list);
kfree(user_payload_kernel);
}
}
spin_unlock_bh(&sync_dev->row_spinlocks[sync_obj]);
return 0;
}
static long cam_sync_dev_ioctl(struct file *filep, void *fh,
bool valid_prio, unsigned int cmd, void *arg)
{
int32_t rc;
struct sync_device *sync_dev = video_drvdata(filep);
struct cam_private_ioctl_arg k_ioctl;
if (!sync_dev) {
CAM_ERR(CAM_SYNC, "sync_dev NULL");
return -EINVAL;
}
if (!arg)
return -EINVAL;
if (cmd != CAM_PRIVATE_IOCTL_CMD)
return -ENOIOCTLCMD;
k_ioctl = *(struct cam_private_ioctl_arg *)arg;
switch (k_ioctl.id) {
case CAM_SYNC_CREATE:
rc = cam_sync_handle_create(&k_ioctl);
break;
case CAM_SYNC_DESTROY:
rc = cam_sync_handle_destroy(&k_ioctl);
break;
case CAM_SYNC_REGISTER_PAYLOAD:
rc = cam_sync_handle_register_user_payload(
&k_ioctl);
break;
case CAM_SYNC_DEREGISTER_PAYLOAD:
rc = cam_sync_handle_deregister_user_payload(
&k_ioctl);
break;
case CAM_SYNC_SIGNAL:
rc = cam_sync_handle_signal(&k_ioctl);
break;
case CAM_SYNC_MERGE:
rc = cam_sync_handle_merge(&k_ioctl);
break;
case CAM_SYNC_WAIT:
rc = cam_sync_handle_wait(&k_ioctl);
((struct cam_private_ioctl_arg *)arg)->result =
k_ioctl.result;
break;
default:
rc = -ENOIOCTLCMD;
}
return rc;
}
static unsigned int cam_sync_poll(struct file *f,
struct poll_table_struct *pll_table)
{
int rc = 0;
struct v4l2_fh *eventq = f->private_data;
if (!eventq)
return -EINVAL;
poll_wait(f, &eventq->wait, pll_table);
if (v4l2_event_pending(eventq))
rc = POLLPRI;
return rc;
}
static int cam_sync_open(struct file *filep)
{
int rc;
struct sync_device *sync_dev = video_drvdata(filep);
if (!sync_dev) {
CAM_ERR(CAM_SYNC, "Sync device NULL");
return -ENODEV;
}
mutex_lock(&sync_dev->table_lock);
if (sync_dev->open_cnt >= 1) {
mutex_unlock(&sync_dev->table_lock);
return -EALREADY;
}
rc = v4l2_fh_open(filep);
if (!rc) {
sync_dev->open_cnt++;
spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
sync_dev->cam_sync_eventq = filep->private_data;
spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
} else {
CAM_ERR(CAM_SYNC, "v4l2_fh_open failed : %d", rc);
}
mutex_unlock(&sync_dev->table_lock);
return rc;
}
static int cam_sync_close(struct file *filep)
{
int rc = 0;
int i;
struct sync_device *sync_dev = video_drvdata(filep);
if (!sync_dev) {
CAM_ERR(CAM_SYNC, "Sync device NULL");
rc = -ENODEV;
return rc;
}
mutex_lock(&sync_dev->table_lock);
sync_dev->open_cnt--;
if (!sync_dev->open_cnt) {
for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
struct sync_table_row *row =
sync_dev->sync_table + i;
/*
* Signal all ACTIVE objects as ERR, but we don't
* care about the return status here apart from logging
* it.
*/
if (row->state == CAM_SYNC_STATE_ACTIVE) {
rc = cam_sync_signal(i,
CAM_SYNC_STATE_SIGNALED_ERROR);
if (rc < 0)
CAM_ERR(CAM_SYNC,
"Cleanup signal fail idx:%d\n",
i);
}
}
/*
* Flush the work queue to wait for pending signal callbacks to
* finish
*/
flush_workqueue(sync_dev->work_queue);
/*
* Now that all callbacks worker threads have finished,
* destroy the sync objects
*/
for (i = 1; i < CAM_SYNC_MAX_OBJS; i++) {
struct sync_table_row *row =
sync_dev->sync_table + i;
if (row->state != CAM_SYNC_STATE_INVALID) {
rc = cam_sync_destroy(i);
if (rc < 0)
CAM_ERR(CAM_SYNC,
"Cleanup destroy fail:idx:%d\n",
i);
}
}
}
mutex_unlock(&sync_dev->table_lock);
spin_lock_bh(&sync_dev->cam_sync_eventq_lock);
sync_dev->cam_sync_eventq = NULL;
spin_unlock_bh(&sync_dev->cam_sync_eventq_lock);
v4l2_fh_release(filep);
return rc;
}
int cam_sync_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
return v4l2_event_subscribe(fh, sub, CAM_SYNC_MAX_V4L2_EVENTS, NULL);
}
int cam_sync_unsubscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
return v4l2_event_unsubscribe(fh, sub);
}
static const struct v4l2_ioctl_ops g_cam_sync_ioctl_ops = {
.vidioc_subscribe_event = cam_sync_subscribe_event,
.vidioc_unsubscribe_event = cam_sync_unsubscribe_event,
.vidioc_default = cam_sync_dev_ioctl,
};
static struct v4l2_file_operations cam_sync_v4l2_fops = {
.owner = THIS_MODULE,
.open = cam_sync_open,
.release = cam_sync_close,
.poll = cam_sync_poll,
.unlocked_ioctl = video_ioctl2,
#ifdef CONFIG_COMPAT
.compat_ioctl32 = video_ioctl2,
#endif
};
#if defined(CONFIG_MEDIA_CONTROLLER)
static int cam_sync_media_controller_init(struct sync_device *sync_dev,
struct platform_device *pdev)
{
int rc;
sync_dev->v4l2_dev.mdev = kzalloc(sizeof(struct media_device),
GFP_KERNEL);
if (!sync_dev->v4l2_dev.mdev)
return -ENOMEM;
media_device_init(sync_dev->v4l2_dev.mdev);
strlcpy(sync_dev->v4l2_dev.mdev->model, CAM_SYNC_DEVICE_NAME,
sizeof(sync_dev->v4l2_dev.mdev->model));
sync_dev->v4l2_dev.mdev->dev = &(pdev->dev);
rc = media_device_register(sync_dev->v4l2_dev.mdev);
if (rc < 0)
goto register_fail;
rc = media_entity_pads_init(&sync_dev->vdev->entity, 0, NULL);
if (rc < 0)
goto entity_fail;
return 0;
entity_fail:
media_device_unregister(sync_dev->v4l2_dev.mdev);
register_fail:
media_device_cleanup(sync_dev->v4l2_dev.mdev);
return rc;
}
static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
{
media_entity_cleanup(&sync_dev->vdev->entity);
media_device_unregister(sync_dev->v4l2_dev.mdev);
media_device_cleanup(sync_dev->v4l2_dev.mdev);
kfree(sync_dev->v4l2_dev.mdev);
}
static void cam_sync_init_entity(struct sync_device *sync_dev)
{
sync_dev->vdev->entity.function = CAM_SYNC_DEVICE_TYPE;
sync_dev->vdev->entity.name =
video_device_node_name(sync_dev->vdev);
}
#else
static int cam_sync_media_controller_init(struct sync_device *sync_dev,
struct platform_device *pdev)
{
return 0;
}
static void cam_sync_media_controller_cleanup(struct sync_device *sync_dev)
{
}
static void cam_sync_init_entity(struct sync_device *sync_dev)
{
}
#endif
static int cam_sync_create_debugfs(void)
{
sync_dev->dentry = debugfs_create_dir("camera_sync", NULL);
if (!sync_dev->dentry) {
CAM_ERR(CAM_SYNC, "Failed to create sync dir");
return -ENOMEM;
}
if (!debugfs_create_bool("trigger_cb_without_switch",
0644, sync_dev->dentry,
&trigger_cb_without_switch)) {
CAM_ERR(CAM_SYNC,
"failed to create trigger_cb_without_switch entry");
return -ENOMEM;
}
return 0;
}
#ifdef CONFIG_MSM_GLOBAL_SYNX
static void cam_sync_register_synx_bind_ops(void)
{
int rc = 0;
struct synx_register_params params;
params.name = CAM_SYNC_NAME;
params.type = SYNX_TYPE_CSL;
params.ops.register_callback = cam_sync_register_callback;
params.ops.deregister_callback = cam_sync_deregister_callback;
params.ops.enable_signaling = cam_sync_get_obj_ref;
params.ops.signal = cam_sync_signal;
rc = synx_register_ops(&params);
if (rc)
CAM_ERR(CAM_SYNC, "synx registration fail with %d", rc);
}
#endif
static int cam_sync_probe(struct platform_device *pdev)
{
int rc;
int idx;
sync_dev = kzalloc(sizeof(*sync_dev), GFP_KERNEL);
if (!sync_dev)
return -ENOMEM;
mutex_init(&sync_dev->table_lock);
spin_lock_init(&sync_dev->cam_sync_eventq_lock);
for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
spin_lock_init(&sync_dev->row_spinlocks[idx]);
sync_dev->vdev = video_device_alloc();
if (!sync_dev->vdev) {
rc = -ENOMEM;
goto vdev_fail;
}
rc = cam_sync_media_controller_init(sync_dev, pdev);
if (rc < 0)
goto mcinit_fail;
sync_dev->vdev->v4l2_dev = &sync_dev->v4l2_dev;
rc = v4l2_device_register(&(pdev->dev), sync_dev->vdev->v4l2_dev);
if (rc < 0)
goto register_fail;
strlcpy(sync_dev->vdev->name, CAM_SYNC_NAME,
sizeof(sync_dev->vdev->name));
sync_dev->vdev->release = video_device_release;
sync_dev->vdev->fops = &cam_sync_v4l2_fops;
sync_dev->vdev->ioctl_ops = &g_cam_sync_ioctl_ops;
sync_dev->vdev->minor = -1;
sync_dev->vdev->vfl_type = VFL_TYPE_GRABBER;
rc = video_register_device(sync_dev->vdev,
VFL_TYPE_GRABBER, -1);
if (rc < 0)
goto v4l2_fail;
cam_sync_init_entity(sync_dev);
video_set_drvdata(sync_dev->vdev, sync_dev);
memset(&sync_dev->sync_table, 0, sizeof(sync_dev->sync_table));
memset(&sync_dev->bitmap, 0, sizeof(sync_dev->bitmap));
bitmap_zero(sync_dev->bitmap, CAM_SYNC_MAX_OBJS);
/*
* We treat zero as invalid handle, so we will keep the 0th bit set
* always
*/
set_bit(0, sync_dev->bitmap);
sync_dev->work_queue = alloc_workqueue(CAM_SYNC_WORKQUEUE_NAME,
WQ_HIGHPRI | WQ_UNBOUND, 1);
if (!sync_dev->work_queue) {
CAM_ERR(CAM_SYNC,
"Error: high priority work queue creation failed");
rc = -ENOMEM;
goto v4l2_fail;
}
trigger_cb_without_switch = false;
cam_sync_create_debugfs();
#ifdef CONFIG_MSM_GLOBAL_SYNX
cam_sync_register_synx_bind_ops();
#endif
return rc;
v4l2_fail:
v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
register_fail:
cam_sync_media_controller_cleanup(sync_dev);
mcinit_fail:
video_device_release(sync_dev->vdev);
vdev_fail:
mutex_destroy(&sync_dev->table_lock);
kfree(sync_dev);
return rc;
}
static int cam_sync_remove(struct platform_device *pdev)
{
v4l2_device_unregister(sync_dev->vdev->v4l2_dev);
cam_sync_media_controller_cleanup(sync_dev);
video_device_release(sync_dev->vdev);
debugfs_remove_recursive(sync_dev->dentry);
sync_dev->dentry = NULL;
kfree(sync_dev);
sync_dev = NULL;
return 0;
}
static struct platform_device cam_sync_device = {
.name = "cam_sync",
.id = -1,
};
static struct platform_driver cam_sync_driver = {
.probe = cam_sync_probe,
.remove = cam_sync_remove,
.driver = {
.name = "cam_sync",
.owner = THIS_MODULE,
.suppress_bind_attrs = true,
},
};
static int __init cam_sync_init(void)
{
int rc;
rc = platform_device_register(&cam_sync_device);
if (rc)
return -ENODEV;
return platform_driver_register(&cam_sync_driver);
}
static void __exit cam_sync_exit(void)
{
int idx;
for (idx = 0; idx < CAM_SYNC_MAX_OBJS; idx++)
spin_lock_init(&sync_dev->row_spinlocks[idx]);
platform_driver_unregister(&cam_sync_driver);
platform_device_unregister(&cam_sync_device);
kfree(sync_dev);
}
module_init(cam_sync_init);
module_exit(cam_sync_exit);
MODULE_DESCRIPTION("Camera sync driver");
MODULE_LICENSE("GPL v2");