2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
20 #include <linux/dmaengine.h>
21 #include <linux/init.h>
22 #include <linux/dmapool.h>
23 #include <linux/cache.h>
24 #include <linux/pci_ids.h>
25 #include <linux/circ_buf.h>
26 #include <linux/interrupt.h>
27 #include "registers.h"
30 #define IOAT_DMA_VERSION "4.00"
32 #define IOAT_DMA_DCA_ANY_CPU ~0
34 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
38 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
40 /* ioat hardware assumes at least two sources for raid operations */
41 #define src_cnt_to_sw(x) ((x) + 2)
42 #define src_cnt_to_hw(x) ((x) - 2)
43 #define ndest_to_sw(x) ((x) + 1)
44 #define ndest_to_hw(x) ((x) - 1)
45 #define src16_cnt_to_sw(x) ((x) + 9)
46 #define src16_cnt_to_hw(x) ((x) - 9)
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
52 #define NULL_DESC_BUFFER_SIZE 1
62 * struct ioatdma_device - internal representation of a IOAT device
63 * @pdev: PCI-Express device
64 * @reg_base: MMIO register space base address
65 * @dma_pool: for allocating DMA descriptors
66 * @completion_pool: DMA buffers for completion ops
67 * @sed_hw_pool: DMA super descriptor pools
68 * @dma_dev: embedded struct dma_device
69 * @version: version of ioatdma device
70 * @msix_entries: irq handlers
71 * @idx: per channel data
72 * @dca: direct cache access context
73 * @irq_mode: interrupt mode (INTX, MSI, MSIX)
74 * @cap: read DMA capabilities register
76 struct ioatdma_device {
78 void __iomem *reg_base;
79 struct pci_pool *dma_pool;
80 struct pci_pool *completion_pool;
81 #define MAX_SED_POOLS 5
82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
83 struct dma_device dma_dev;
85 struct msix_entry msix_entries[4];
86 struct ioatdma_chan *idx[4];
87 struct dca_provider *dca;
88 enum ioat_irq_mode irq_mode;
93 struct dma_chan dma_chan;
94 void __iomem *reg_base;
95 dma_addr_t last_completion;
96 spinlock_t cleanup_lock;
98 #define IOAT_COMPLETION_PENDING 0
99 #define IOAT_COMPLETION_ACK 1
100 #define IOAT_RESET_PENDING 2
101 #define IOAT_KOBJ_INIT_FAIL 3
102 #define IOAT_RESHAPE_PENDING 4
104 #define IOAT_CHAN_ACTIVE 6
105 struct timer_list timer;
106 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
107 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
108 #define RESET_DELAY msecs_to_jiffies(100)
109 struct ioatdma_device *ioat_dma;
110 dma_addr_t completion_dma;
112 struct tasklet_struct cleanup_task;
115 /* ioat v2 / v3 channel attributes
116 * @xfercap_log; log2 of channel max transfer length (for fast division)
117 * @head: allocated index
118 * @issued: hardware notification point
119 * @tail: cleanup index
120 * @dmacount: identical to 'head' except for occasionally resetting to zero
121 * @alloc_order: log2 of the number of allocated descriptors
122 * @produce: number of descriptors to produce at submit time
123 * @ring: software ring buffer implementation of hardware ring
124 * @prep_lock: serializes descriptor preparation (producers)
133 struct ioat_ring_ent **ring;
134 spinlock_t prep_lock;
137 struct ioat_sysfs_entry {
138 struct attribute attr;
139 ssize_t (*show)(struct dma_chan *, char *);
143 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
145 * @dma: dma address for the SED
146 * @parent: point to the dma descriptor that's the parent
147 * @hw_pool: descriptor pool index
149 struct ioat_sed_ent {
150 struct ioat_sed_raw_descriptor *hw;
152 struct ioat_ring_ent *parent;
153 unsigned int hw_pool;
157 * struct ioat_ring_ent - wrapper around hardware descriptor
158 * @hw: hardware DMA descriptor (for memcpy)
159 * @xor: hardware xor descriptor
160 * @xor_ex: hardware xor extension descriptor
161 * @pq: hardware pq descriptor
162 * @pq_ex: hardware pq extension descriptor
163 * @pqu: hardware pq update descriptor
164 * @raw: hardware raw (un-typed) descriptor
165 * @txd: the generic software descriptor for all engines
166 * @len: total transaction length for unmap
167 * @result: asynchronous result of validate operations
168 * @id: identifier for debug
169 * @sed: pointer to super extended descriptor sw desc
172 struct ioat_ring_ent {
174 struct ioat_dma_descriptor *hw;
175 struct ioat_xor_descriptor *xor;
176 struct ioat_xor_ext_descriptor *xor_ex;
177 struct ioat_pq_descriptor *pq;
178 struct ioat_pq_ext_descriptor *pq_ex;
179 struct ioat_pq_update_descriptor *pqu;
180 struct ioat_raw_descriptor *raw;
183 struct dma_async_tx_descriptor txd;
184 enum sum_check_flags *result;
188 struct ioat_sed_ent *sed;
191 extern const struct sysfs_ops ioat_sysfs_ops;
192 extern struct ioat_sysfs_entry ioat_version_attr;
193 extern struct ioat_sysfs_entry ioat_cap_attr;
194 extern int ioat_pending_level;
195 extern int ioat_ring_alloc_order;
196 extern struct kobj_type ioat_ktype;
197 extern struct kmem_cache *ioat_cache;
198 extern int ioat_ring_max_alloc_order;
199 extern struct kmem_cache *ioat_sed_cache;
201 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
203 return container_of(c, struct ioatdma_chan, dma_chan);
206 /* wrapper around hardware descriptor format + additional software fields */
208 #define set_desc_id(desc, i) ((desc)->id = (i))
209 #define desc_id(desc) ((desc)->id)
211 #define set_desc_id(desc, i)
212 #define desc_id(desc) (0)
216 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
217 struct dma_async_tx_descriptor *tx, int id)
219 struct device *dev = to_dev(ioat_chan);
221 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
222 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
223 (unsigned long long) tx->phys,
224 (unsigned long long) hw->next, tx->cookie, tx->flags,
225 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
228 #define dump_desc_dbg(c, d) \
229 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
231 static inline struct ioatdma_chan *
232 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
234 return ioat_dma->idx[index];
237 static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
239 u8 ver = ioat_chan->ioat_dma->version;
243 /* We need to read the low address first as this causes the
244 * chipset to latch the upper bits for the subsequent read
246 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
247 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
254 #if BITS_PER_LONG == 64
256 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
258 u8 ver = ioat_chan->ioat_dma->version;
261 /* With IOAT v3.3 the status register is 64bit. */
262 if (ver >= IOAT_VER_3_3)
263 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
265 status = ioat_chansts_32(ioat_chan);
271 #define ioat_chansts ioat_chansts_32
274 static inline u64 ioat_chansts_to_addr(u64 status)
276 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
279 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
281 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
284 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
286 u8 ver = ioat_chan->ioat_dma->version;
288 writeb(IOAT_CHANCMD_SUSPEND,
289 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
292 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
294 u8 ver = ioat_chan->ioat_dma->version;
296 writeb(IOAT_CHANCMD_RESET,
297 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
300 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
302 u8 ver = ioat_chan->ioat_dma->version;
305 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
306 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
309 static inline bool is_ioat_active(unsigned long status)
311 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
314 static inline bool is_ioat_idle(unsigned long status)
316 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
319 static inline bool is_ioat_halted(unsigned long status)
321 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
324 static inline bool is_ioat_suspended(unsigned long status)
326 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
329 /* channel was fatally programmed */
330 static inline bool is_ioat_bug(unsigned long err)
335 #define IOAT_MAX_ORDER 16
336 #define ioat_get_alloc_order() \
337 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
338 #define ioat_get_max_alloc_order() \
339 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
341 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
343 return 1 << ioat_chan->alloc_order;
346 /* count of descriptors in flight with the engine */
347 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
349 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
350 ioat_ring_size(ioat_chan));
353 /* count of descriptors pending submission to hardware */
354 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
356 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
357 ioat_ring_size(ioat_chan));
360 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
362 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
366 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
368 u16 num_descs = len >> ioat_chan->xfercap_log;
370 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
374 static inline struct ioat_ring_ent *
375 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
377 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
381 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
383 writel(addr & 0x00000000FFFFFFFF,
384 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
386 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
389 /* IOAT Prep functions */
390 struct dma_async_tx_descriptor *
391 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
392 dma_addr_t dma_src, size_t len, unsigned long flags);
393 struct dma_async_tx_descriptor *
394 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
395 struct dma_async_tx_descriptor *
396 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
397 unsigned int src_cnt, size_t len, unsigned long flags);
398 struct dma_async_tx_descriptor *
399 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
400 unsigned int src_cnt, size_t len,
401 enum sum_check_flags *result, unsigned long flags);
402 struct dma_async_tx_descriptor *
403 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
404 unsigned int src_cnt, const unsigned char *scf, size_t len,
405 unsigned long flags);
406 struct dma_async_tx_descriptor *
407 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
408 unsigned int src_cnt, const unsigned char *scf, size_t len,
409 enum sum_check_flags *pqres, unsigned long flags);
410 struct dma_async_tx_descriptor *
411 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
412 unsigned int src_cnt, size_t len, unsigned long flags);
413 struct dma_async_tx_descriptor *
414 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
415 unsigned int src_cnt, size_t len,
416 enum sum_check_flags *result, unsigned long flags);
418 /* IOAT Operation functions */
419 irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
420 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
421 struct ioat_ring_ent **
422 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
423 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
424 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
425 int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
427 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
428 struct dma_tx_state *txstate);
429 void ioat_cleanup_event(unsigned long data);
430 void ioat_timer_event(unsigned long data);
431 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
432 void ioat_issue_pending(struct dma_chan *chan);
433 void ioat_timer_event(unsigned long data);
435 /* IOAT Init functions */
436 bool is_bwd_ioat(struct pci_dev *pdev);
437 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
438 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
439 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
440 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
441 void ioat_stop(struct ioatdma_chan *ioat_chan);
442 #endif /* IOATDMA_H */