2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/dmaengine.h>
15 #include <crypto/scatterwalk.h>
19 int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
23 dma->txchan = dma_request_slave_channel_reason(dev, "tx");
24 if (IS_ERR(dma->txchan))
25 return PTR_ERR(dma->txchan);
27 dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
28 if (IS_ERR(dma->rxchan)) {
29 ret = PTR_ERR(dma->rxchan);
33 dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
35 if (!dma->result_buf) {
40 dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
44 dma_release_channel(dma->rxchan);
46 dma_release_channel(dma->txchan);
50 void qce_dma_release(struct qce_dma_data *dma)
52 dma_release_channel(dma->txchan);
53 dma_release_channel(dma->rxchan);
54 kfree(dma->result_buf);
57 int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
58 enum dma_data_direction dir, bool chained)
64 err = dma_map_sg(dev, sg, 1, dir);
67 sg = scatterwalk_sg_next(sg);
70 err = dma_map_sg(dev, sg, nents, dir);
78 void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
79 enum dma_data_direction dir, bool chained)
83 dma_unmap_sg(dev, sg, 1, dir);
84 sg = scatterwalk_sg_next(sg);
87 dma_unmap_sg(dev, sg, nents, dir);
90 int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
92 struct scatterlist *sg = sglist;
98 while (nbytes > 0 && sg) {
100 nbytes -= sg->length;
101 if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
103 sg = scatterwalk_sg_next(sg);
110 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
112 struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
121 return ERR_PTR(-EINVAL);
123 while (new_sgl && sg) {
124 sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
128 new_sgl = sg_next(new_sgl);
134 static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
135 int nents, unsigned long flags,
136 enum dma_transfer_direction dir,
137 dma_async_tx_callback cb, void *cb_param)
139 struct dma_async_tx_descriptor *desc;
145 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
150 desc->callback_param = cb_param;
151 cookie = dmaengine_submit(desc);
153 return dma_submit_error(cookie);
156 int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
157 int rx_nents, struct scatterlist *tx_sg, int tx_nents,
158 dma_async_tx_callback cb, void *cb_param)
160 struct dma_chan *rxchan = dma->rxchan;
161 struct dma_chan *txchan = dma->txchan;
162 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
165 ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
170 return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
174 void qce_dma_issue_pending(struct qce_dma_data *dma)
176 dma_async_issue_pending(dma->rxchan);
177 dma_async_issue_pending(dma->txchan);
180 int qce_dma_terminate_all(struct qce_dma_data *dma)
184 ret = dmaengine_terminate_all(dma->rxchan);
185 return ret ?: dmaengine_terminate_all(dma->txchan);