2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
20 #include <linux/dmaengine.h>
21 #include <linux/init.h>
22 #include <linux/dmapool.h>
23 #include <linux/cache.h>
24 #include <linux/pci_ids.h>
25 #include <linux/circ_buf.h>
26 #include <linux/interrupt.h>
27 #include "registers.h"
30 #define IOAT_DMA_VERSION "4.00"
32 #define IOAT_DMA_DCA_ANY_CPU ~0
34 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
38 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
40 /* ioat hardware assumes at least two sources for raid operations */
41 #define src_cnt_to_sw(x) ((x) + 2)
42 #define src_cnt_to_hw(x) ((x) - 2)
43 #define ndest_to_sw(x) ((x) + 1)
44 #define ndest_to_hw(x) ((x) - 1)
45 #define src16_cnt_to_sw(x) ((x) + 9)
46 #define src16_cnt_to_hw(x) ((x) - 9)
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
52 #define NULL_DESC_BUFFER_SIZE 1
62 * struct ioatdma_device - internal representation of a IOAT device
63 * @pdev: PCI-Express device
64 * @reg_base: MMIO register space base address
65 * @dma_pool: for allocating DMA descriptors
66 * @dma_dev: embedded struct dma_device
67 * @version: version of ioatdma device
68 * @msix_entries: irq handlers
69 * @idx: per channel data
70 * @dca: direct cache access context
71 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
72 * @enumerate_channels: hw version specific channel enumeration
73 * @reset_hw: hw version specific channel (re)initialization
74 * @cleanup_fn: select between the v2 and v3 cleanup routines
75 * @timer_fn: select between the v2 and v3 timer watchdog routines
76 * @self_test: hardware version specific self test for each supported op type
78 * Note: the v3 cleanup routine supports raid operations
80 struct ioatdma_device {
82 void __iomem *reg_base;
83 struct pci_pool *dma_pool;
84 struct pci_pool *completion_pool;
85 #define MAX_SED_POOLS 5
86 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
87 struct dma_device dma_dev;
89 struct msix_entry msix_entries[4];
90 struct ioatdma_chan *idx[4];
91 struct dca_provider *dca;
92 enum ioat_irq_mode irq_mode;
94 void (*intr_quirk)(struct ioatdma_device *ioat_dma);
95 int (*enumerate_channels)(struct ioatdma_device *ioat_dma);
96 int (*reset_hw)(struct ioatdma_chan *ioat_chan);
97 void (*cleanup_fn)(unsigned long data);
98 void (*timer_fn)(unsigned long data);
99 int (*self_test)(struct ioatdma_device *ioat_dma);
102 struct ioatdma_chan {
103 struct dma_chan dma_chan;
104 void __iomem *reg_base;
105 dma_addr_t last_completion;
106 spinlock_t cleanup_lock;
108 #define IOAT_COMPLETION_PENDING 0
109 #define IOAT_COMPLETION_ACK 1
110 #define IOAT_RESET_PENDING 2
111 #define IOAT_KOBJ_INIT_FAIL 3
112 #define IOAT_RESHAPE_PENDING 4
114 #define IOAT_CHAN_ACTIVE 6
115 struct timer_list timer;
116 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
117 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
118 #define RESET_DELAY msecs_to_jiffies(100)
119 struct ioatdma_device *ioat_dma;
120 dma_addr_t completion_dma;
122 struct tasklet_struct cleanup_task;
125 /* ioat v2 / v3 channel attributes
126 * @xfercap_log; log2 of channel max transfer length (for fast division)
127 * @head: allocated index
128 * @issued: hardware notification point
129 * @tail: cleanup index
130 * @dmacount: identical to 'head' except for occasionally resetting to zero
131 * @alloc_order: log2 of the number of allocated descriptors
132 * @produce: number of descriptors to produce at submit time
133 * @ring: software ring buffer implementation of hardware ring
134 * @prep_lock: serializes descriptor preparation (producers)
143 struct ioat_ring_ent **ring;
144 spinlock_t prep_lock;
147 struct ioat_sysfs_entry {
148 struct attribute attr;
149 ssize_t (*show)(struct dma_chan *, char *);
153 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
155 * @sed_dma: dma address for the SED
157 * @parent: point to the dma descriptor that's the parent
159 struct ioat_sed_ent {
160 struct ioat_sed_raw_descriptor *hw;
162 struct ioat_ring_ent *parent;
163 unsigned int hw_pool;
167 * struct ioat_ring_ent - wrapper around hardware descriptor
168 * @hw: hardware DMA descriptor (for memcpy)
169 * @fill: hardware fill descriptor
170 * @xor: hardware xor descriptor
171 * @xor_ex: hardware xor extension descriptor
172 * @pq: hardware pq descriptor
173 * @pq_ex: hardware pq extension descriptor
174 * @pqu: hardware pq update descriptor
175 * @raw: hardware raw (un-typed) descriptor
176 * @txd: the generic software descriptor for all engines
177 * @len: total transaction length for unmap
178 * @result: asynchronous result of validate operations
179 * @id: identifier for debug
182 struct ioat_ring_ent {
184 struct ioat_dma_descriptor *hw;
185 struct ioat_xor_descriptor *xor;
186 struct ioat_xor_ext_descriptor *xor_ex;
187 struct ioat_pq_descriptor *pq;
188 struct ioat_pq_ext_descriptor *pq_ex;
189 struct ioat_pq_update_descriptor *pqu;
190 struct ioat_raw_descriptor *raw;
193 struct dma_async_tx_descriptor txd;
194 enum sum_check_flags *result;
198 struct ioat_sed_ent *sed;
201 extern const struct sysfs_ops ioat_sysfs_ops;
202 extern struct ioat_sysfs_entry ioat_version_attr;
203 extern struct ioat_sysfs_entry ioat_cap_attr;
204 extern int ioat_pending_level;
205 extern int ioat_ring_alloc_order;
206 extern struct kobj_type ioat_ktype;
207 extern struct kmem_cache *ioat_cache;
208 extern int ioat_ring_max_alloc_order;
209 extern struct kmem_cache *ioat_sed_cache;
211 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
213 return container_of(c, struct ioatdma_chan, dma_chan);
216 /* wrapper around hardware descriptor format + additional software fields */
218 #define set_desc_id(desc, i) ((desc)->id = (i))
219 #define desc_id(desc) ((desc)->id)
221 #define set_desc_id(desc, i)
222 #define desc_id(desc) (0)
226 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
227 struct dma_async_tx_descriptor *tx, int id)
229 struct device *dev = to_dev(ioat_chan);
231 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
232 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
233 (unsigned long long) tx->phys,
234 (unsigned long long) hw->next, tx->cookie, tx->flags,
235 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
238 #define dump_desc_dbg(c, d) \
239 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
241 static inline struct ioatdma_chan *
242 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
244 return ioat_dma->idx[index];
247 static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
249 u8 ver = ioat_chan->ioat_dma->version;
253 /* We need to read the low address first as this causes the
254 * chipset to latch the upper bits for the subsequent read
256 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
257 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
264 #if BITS_PER_LONG == 64
266 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
268 u8 ver = ioat_chan->ioat_dma->version;
271 /* With IOAT v3.3 the status register is 64bit. */
272 if (ver >= IOAT_VER_3_3)
273 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
275 status = ioat_chansts_32(ioat_chan);
281 #define ioat_chansts ioat_chansts_32
284 static inline u64 ioat_chansts_to_addr(u64 status)
286 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
289 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
291 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
294 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
296 u8 ver = ioat_chan->ioat_dma->version;
298 writeb(IOAT_CHANCMD_SUSPEND,
299 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
302 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
304 u8 ver = ioat_chan->ioat_dma->version;
306 writeb(IOAT_CHANCMD_RESET,
307 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
310 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
312 u8 ver = ioat_chan->ioat_dma->version;
315 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
316 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
319 static inline bool is_ioat_active(unsigned long status)
321 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
324 static inline bool is_ioat_idle(unsigned long status)
326 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
329 static inline bool is_ioat_halted(unsigned long status)
331 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
334 static inline bool is_ioat_suspended(unsigned long status)
336 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
339 /* channel was fatally programmed */
340 static inline bool is_ioat_bug(unsigned long err)
345 #define IOAT_MAX_ORDER 16
346 #define ioat_get_alloc_order() \
347 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
348 #define ioat_get_max_alloc_order() \
349 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
351 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
353 return 1 << ioat_chan->alloc_order;
356 /* count of descriptors in flight with the engine */
357 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
359 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
360 ioat_ring_size(ioat_chan));
363 /* count of descriptors pending submission to hardware */
364 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
366 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
367 ioat_ring_size(ioat_chan));
370 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
372 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
376 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
378 u16 num_descs = len >> ioat_chan->xfercap_log;
380 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
384 static inline struct ioat_ring_ent *
385 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
387 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
391 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
393 writel(addr & 0x00000000FFFFFFFF,
394 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
396 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
399 /* IOAT Prep functions */
400 struct dma_async_tx_descriptor *
401 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
402 dma_addr_t dma_src, size_t len, unsigned long flags);
403 struct dma_async_tx_descriptor *
404 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
405 struct dma_async_tx_descriptor *
406 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
407 unsigned int src_cnt, size_t len, unsigned long flags);
408 struct dma_async_tx_descriptor *
409 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
410 unsigned int src_cnt, size_t len,
411 enum sum_check_flags *result, unsigned long flags);
412 struct dma_async_tx_descriptor *
413 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
414 unsigned int src_cnt, const unsigned char *scf, size_t len,
415 unsigned long flags);
416 struct dma_async_tx_descriptor *
417 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
418 unsigned int src_cnt, const unsigned char *scf, size_t len,
419 enum sum_check_flags *pqres, unsigned long flags);
420 struct dma_async_tx_descriptor *
421 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
422 unsigned int src_cnt, size_t len, unsigned long flags);
423 struct dma_async_tx_descriptor *
424 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
425 unsigned int src_cnt, size_t len,
426 enum sum_check_flags *result, unsigned long flags);
428 /* IOAT Operation functions */
429 irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
430 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
431 struct ioat_ring_ent **
432 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
433 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
434 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
435 int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
437 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
438 struct dma_tx_state *txstate);
439 void ioat_cleanup_event(unsigned long data);
440 void ioat_timer_event(unsigned long data);
441 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
442 void ioat_issue_pending(struct dma_chan *chan);
443 void ioat_timer_event(unsigned long data);
445 /* IOAT Init functions */
446 bool is_bwd_ioat(struct pci_dev *pdev);
447 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
448 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
449 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
450 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
451 void ioat_stop(struct ioatdma_chan *ioat_chan);
452 #endif /* IOATDMA_H */