2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
20 #include <linux/dmaengine.h>
21 #include <linux/init.h>
22 #include <linux/dmapool.h>
23 #include <linux/cache.h>
24 #include <linux/pci_ids.h>
25 #include <linux/circ_buf.h>
26 #include <linux/interrupt.h>
27 #include "registers.h"
30 #define IOAT_DMA_VERSION "4.00"
32 #define IOAT_DMA_DCA_ANY_CPU ~0
34 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
38 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
40 /* ioat hardware assumes at least two sources for raid operations */
41 #define src_cnt_to_sw(x) ((x) + 2)
42 #define src_cnt_to_hw(x) ((x) - 2)
43 #define ndest_to_sw(x) ((x) + 1)
44 #define ndest_to_hw(x) ((x) - 1)
45 #define src16_cnt_to_sw(x) ((x) + 9)
46 #define src16_cnt_to_hw(x) ((x) - 9)
49 * workaround for IOAT ver.3.0 null descriptor issue
50 * (channel returns error when size is 0)
52 #define NULL_DESC_BUFFER_SIZE 1
62 * struct ioatdma_device - internal representation of a IOAT device
63 * @pdev: PCI-Express device
64 * @reg_base: MMIO register space base address
65 * @dma_pool: for allocating DMA descriptors
66 * @completion_pool: DMA buffers for completion ops
67 * @sed_hw_pool: DMA super descriptor pools
68 * @dma_dev: embedded struct dma_device
69 * @version: version of ioatdma device
70 * @msix_entries: irq handlers
71 * @idx: per channel data
72 * @dca: direct cache access context
73 * @irq_mode: interrupt mode (INTX, MSI, MSIX)
74 * @cap: read DMA capabilities register
76 struct ioatdma_device {
78 void __iomem *reg_base;
79 struct pci_pool *dma_pool;
80 struct pci_pool *completion_pool;
81 #define MAX_SED_POOLS 5
82 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
83 struct dma_device dma_dev;
85 struct msix_entry msix_entries[4];
86 struct ioatdma_chan *idx[4];
87 struct dca_provider *dca;
88 enum ioat_irq_mode irq_mode;
93 struct dma_chan dma_chan;
94 void __iomem *reg_base;
95 dma_addr_t last_completion;
96 spinlock_t cleanup_lock;
98 #define IOAT_COMPLETION_ACK 1
99 #define IOAT_RESET_PENDING 2
100 #define IOAT_KOBJ_INIT_FAIL 3
101 #define IOAT_RESHAPE_PENDING 4
103 #define IOAT_CHAN_ACTIVE 6
104 struct timer_list timer;
105 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
106 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
107 #define RESET_DELAY msecs_to_jiffies(100)
108 struct ioatdma_device *ioat_dma;
109 dma_addr_t completion_dma;
111 struct tasklet_struct cleanup_task;
114 /* ioat v2 / v3 channel attributes
115 * @xfercap_log; log2 of channel max transfer length (for fast division)
116 * @head: allocated index
117 * @issued: hardware notification point
118 * @tail: cleanup index
119 * @dmacount: identical to 'head' except for occasionally resetting to zero
120 * @alloc_order: log2 of the number of allocated descriptors
121 * @produce: number of descriptors to produce at submit time
122 * @ring: software ring buffer implementation of hardware ring
123 * @prep_lock: serializes descriptor preparation (producers)
132 struct ioat_ring_ent **ring;
133 spinlock_t prep_lock;
136 struct ioat_sysfs_entry {
137 struct attribute attr;
138 ssize_t (*show)(struct dma_chan *, char *);
142 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
144 * @dma: dma address for the SED
145 * @parent: point to the dma descriptor that's the parent
146 * @hw_pool: descriptor pool index
148 struct ioat_sed_ent {
149 struct ioat_sed_raw_descriptor *hw;
151 struct ioat_ring_ent *parent;
152 unsigned int hw_pool;
156 * struct ioat_ring_ent - wrapper around hardware descriptor
157 * @hw: hardware DMA descriptor (for memcpy)
158 * @xor: hardware xor descriptor
159 * @xor_ex: hardware xor extension descriptor
160 * @pq: hardware pq descriptor
161 * @pq_ex: hardware pq extension descriptor
162 * @pqu: hardware pq update descriptor
163 * @raw: hardware raw (un-typed) descriptor
164 * @txd: the generic software descriptor for all engines
165 * @len: total transaction length for unmap
166 * @result: asynchronous result of validate operations
167 * @id: identifier for debug
168 * @sed: pointer to super extended descriptor sw desc
171 struct ioat_ring_ent {
173 struct ioat_dma_descriptor *hw;
174 struct ioat_xor_descriptor *xor;
175 struct ioat_xor_ext_descriptor *xor_ex;
176 struct ioat_pq_descriptor *pq;
177 struct ioat_pq_ext_descriptor *pq_ex;
178 struct ioat_pq_update_descriptor *pqu;
179 struct ioat_raw_descriptor *raw;
182 struct dma_async_tx_descriptor txd;
183 enum sum_check_flags *result;
187 struct ioat_sed_ent *sed;
190 extern const struct sysfs_ops ioat_sysfs_ops;
191 extern struct ioat_sysfs_entry ioat_version_attr;
192 extern struct ioat_sysfs_entry ioat_cap_attr;
193 extern int ioat_pending_level;
194 extern int ioat_ring_alloc_order;
195 extern struct kobj_type ioat_ktype;
196 extern struct kmem_cache *ioat_cache;
197 extern int ioat_ring_max_alloc_order;
198 extern struct kmem_cache *ioat_sed_cache;
200 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
202 return container_of(c, struct ioatdma_chan, dma_chan);
205 /* wrapper around hardware descriptor format + additional software fields */
207 #define set_desc_id(desc, i) ((desc)->id = (i))
208 #define desc_id(desc) ((desc)->id)
210 #define set_desc_id(desc, i)
211 #define desc_id(desc) (0)
215 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
216 struct dma_async_tx_descriptor *tx, int id)
218 struct device *dev = to_dev(ioat_chan);
220 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
221 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
222 (unsigned long long) tx->phys,
223 (unsigned long long) hw->next, tx->cookie, tx->flags,
224 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
227 #define dump_desc_dbg(c, d) \
228 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
230 static inline struct ioatdma_chan *
231 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
233 return ioat_dma->idx[index];
236 static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
238 u8 ver = ioat_chan->ioat_dma->version;
242 /* We need to read the low address first as this causes the
243 * chipset to latch the upper bits for the subsequent read
245 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
246 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
253 #if BITS_PER_LONG == 64
255 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
257 u8 ver = ioat_chan->ioat_dma->version;
260 /* With IOAT v3.3 the status register is 64bit. */
261 if (ver >= IOAT_VER_3_3)
262 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
264 status = ioat_chansts_32(ioat_chan);
270 #define ioat_chansts ioat_chansts_32
273 static inline u64 ioat_chansts_to_addr(u64 status)
275 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
278 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
280 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
283 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
285 u8 ver = ioat_chan->ioat_dma->version;
287 writeb(IOAT_CHANCMD_SUSPEND,
288 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
291 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
293 u8 ver = ioat_chan->ioat_dma->version;
295 writeb(IOAT_CHANCMD_RESET,
296 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
299 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
301 u8 ver = ioat_chan->ioat_dma->version;
304 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
305 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
308 static inline bool is_ioat_active(unsigned long status)
310 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
313 static inline bool is_ioat_idle(unsigned long status)
315 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
318 static inline bool is_ioat_halted(unsigned long status)
320 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
323 static inline bool is_ioat_suspended(unsigned long status)
325 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
328 /* channel was fatally programmed */
329 static inline bool is_ioat_bug(unsigned long err)
334 #define IOAT_MAX_ORDER 16
335 #define ioat_get_alloc_order() \
336 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
337 #define ioat_get_max_alloc_order() \
338 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
340 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
342 return 1 << ioat_chan->alloc_order;
345 /* count of descriptors in flight with the engine */
346 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
348 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
349 ioat_ring_size(ioat_chan));
352 /* count of descriptors pending submission to hardware */
353 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
355 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
356 ioat_ring_size(ioat_chan));
359 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
361 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
365 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
367 u16 num_descs = len >> ioat_chan->xfercap_log;
369 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
373 static inline struct ioat_ring_ent *
374 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
376 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
380 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
382 writel(addr & 0x00000000FFFFFFFF,
383 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
385 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
388 /* IOAT Prep functions */
389 struct dma_async_tx_descriptor *
390 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
391 dma_addr_t dma_src, size_t len, unsigned long flags);
392 struct dma_async_tx_descriptor *
393 ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags);
394 struct dma_async_tx_descriptor *
395 ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
396 unsigned int src_cnt, size_t len, unsigned long flags);
397 struct dma_async_tx_descriptor *
398 ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src,
399 unsigned int src_cnt, size_t len,
400 enum sum_check_flags *result, unsigned long flags);
401 struct dma_async_tx_descriptor *
402 ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
403 unsigned int src_cnt, const unsigned char *scf, size_t len,
404 unsigned long flags);
405 struct dma_async_tx_descriptor *
406 ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
407 unsigned int src_cnt, const unsigned char *scf, size_t len,
408 enum sum_check_flags *pqres, unsigned long flags);
409 struct dma_async_tx_descriptor *
410 ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
411 unsigned int src_cnt, size_t len, unsigned long flags);
412 struct dma_async_tx_descriptor *
413 ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
414 unsigned int src_cnt, size_t len,
415 enum sum_check_flags *result, unsigned long flags);
417 /* IOAT Operation functions */
418 irqreturn_t ioat_dma_do_interrupt(int irq, void *data);
419 irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data);
420 struct ioat_ring_ent **
421 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags);
422 void ioat_start_null_desc(struct ioatdma_chan *ioat_chan);
423 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan);
424 int ioat_reset_hw(struct ioatdma_chan *ioat_chan);
426 ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
427 struct dma_tx_state *txstate);
428 void ioat_cleanup_event(unsigned long data);
429 void ioat_timer_event(unsigned long data);
430 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
431 void ioat_issue_pending(struct dma_chan *chan);
432 void ioat_timer_event(unsigned long data);
434 /* IOAT Init functions */
435 bool is_bwd_ioat(struct pci_dev *pdev);
436 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
437 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
438 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
439 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
440 void ioat_stop(struct ioatdma_chan *ioat_chan);
441 #endif /* IOATDMA_H */