adc_enqueue_request(struct adc_host *adc, struct adc_request *req)
{
int head, tail;
-
- mutex_lock(&adc->queue_mutex);
-
+ unsigned long flags;
+
+ spin_lock_irqsave(&adc->lock, flags);
head = adc->queue_head;
tail = adc->queue_tail;
if (adc->queue[tail]) {
- mutex_unlock(&adc->queue_mutex);
+ spin_unlock_irqrestore(&adc->lock,flags);
dev_err(adc->dev, "ADC queue is full, dropping request\n");
return -EBUSY;
}
trigger_next_adc_job_if_any(adc);
adc->queue_tail = (tail + 1) & (MAX_ADC_FIFO_DEPTH - 1);
- mutex_unlock(&adc->queue_mutex);
+ spin_unlock_irqrestore(&adc->lock,flags);
return 0;
}
int adc_async_read(struct adc_client *client)
{
+ int ret = 0;
struct adc_request *req = NULL;
if(client == NULL) {
req->client = client;
req->status = ASYNC_READ;
- return adc_enqueue_request(client->adc, req);
+ ret = adc_enqueue_request(client->adc, req);
+ if(ret < 0)
+ kfree(req);
+
+ return ret;
}
EXPORT_SYMBOL(adc_async_read);
{
struct adc_request *req;
int head, res;
-
+ spin_lock(adc->lock);
head = adc->queue_head;
req = adc->queue[head];
if (WARN_ON(!req)) {
+ spin_unlock(&adc->lock);
dev_err(adc->dev, "adc irq: ADC queue empty!\n");
return;
}
kfree(req);
req = NULL;
}
+ spin_unlock(&adc->lock);
}
EXPORT_SYMBOL(adc_core_irq_handle);
adc = adc_alloc_host(sizeof(struct rk29_adc_device), &pdev->dev);\r
if (!adc)\r
return -ENOMEM;\r
- mutex_init(&adc->queue_mutex);\r
+ spin_lock_init(&adc->lock);\r
adc->dev = &pdev->dev;\r
adc->is_suspended = 0;\r
adc->ops = &rk29_adc_ops;\r
struct adc_request *queue[MAX_ADC_FIFO_DEPTH];\r
int queue_head;\r
int queue_tail;\r
- struct mutex queue_mutex;\r
+ spinlock_t lock;\r
struct adc_client *cur;\r
const struct adc_ops *ops;\r
unsigned long private[0];\r