struct list_head issued_list;
struct list_head done_list;
struct pl08x_txd *at;
- spinlock_t lock;
struct pl08x_driver_data *host;
enum pl08x_dma_chan_state state;
bool slave;
unsigned long flags;
size_t bytes = 0;
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
ch = plchan->phychan;
txd = plchan->at;
}
}
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return bytes;
}
* Eww. We know this isn't going to deadlock
* but lockdep probably doesn't.
*/
- spin_lock(&next->lock);
+ spin_lock(&next->vc.lock);
/* Re-check the state now that we have the lock */
success = next->state == PL08X_CHAN_WAITING;
if (success)
pl08x_phy_reassign_start(plchan->phychan, next);
- spin_unlock(&next->lock);
+ spin_unlock(&next->vc.lock);
/* If the state changed, try to find another channel */
if (!success)
unsigned long flags;
dma_cookie_t cookie;
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
cookie = dma_cookie_assign(tx);
/* Put this onto the pending list */
list_add_tail(&txd->node, &plchan->pend_list);
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return cookie;
}
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
unsigned long flags;
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
list_splice_tail_init(&plchan->pend_list, &plchan->issued_list);
if (!list_empty(&plchan->issued_list)) {
if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING)
pl08x_phy_alloc_and_start(plchan);
}
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
}
static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
if (!num_llis) {
unsigned long flags;
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return -EINVAL;
}
* Anything succeeds on channels with no physical allocation and
* no queued transfers.
*/
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
if (!plchan->phychan && !plchan->at) {
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return 0;
}
break;
}
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
return ret;
}
unsigned long flags;
LIST_HEAD(head);
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
list_splice_tail_init(&plchan->done_list, &head);
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
while (!list_empty(&head)) {
struct pl08x_txd *txd = list_first_entry(&head,
pl08x_unmap_buffers(txd);
/* Free the descriptor */
- spin_lock_irqsave(&plchan->lock, flags);
+ spin_lock_irqsave(&plchan->vc.lock, flags);
pl08x_free_txd(pl08x, txd);
- spin_unlock_irqrestore(&plchan->lock, flags);
+ spin_unlock_irqrestore(&plchan->vc.lock, flags);
/* Callback to signal completion */
if (callback)
continue;
}
- spin_lock(&plchan->lock);
+ spin_lock(&plchan->vc.lock);
tx = plchan->at;
if (tx) {
plchan->at = NULL;
else
pl08x_phy_free(plchan);
}
- spin_unlock(&plchan->lock);
+ spin_unlock(&plchan->vc.lock);
/* Schedule tasklet on this channel */
tasklet_schedule(&plchan->tasklet);
"initialize virtual channel \"%s\"\n",
chan->name);
- chan->vc.chan.device = dmadev;
- dma_cookie_init(&chan->vc.chan);
-
- spin_lock_init(&chan->lock);
INIT_LIST_HEAD(&chan->pend_list);
INIT_LIST_HEAD(&chan->issued_list);
INIT_LIST_HEAD(&chan->done_list);
tasklet_init(&chan->tasklet, pl08x_tasklet,
(unsigned long) chan);
- list_add_tail(&chan->vc.chan.device_node, &dmadev->channels);
+ vchan_init(&chan->vc, dmadev);
}
dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
i, slave ? "slave" : "memcpy");