2 * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
20 #include <linux/dmaengine.h>
21 #include <linux/init.h>
22 #include <linux/dmapool.h>
23 #include <linux/cache.h>
24 #include <linux/pci_ids.h>
25 #include <linux/circ_buf.h>
26 #include <linux/interrupt.h>
27 #include "registers.h"
30 #define IOAT_DMA_VERSION "4.00"
32 #define IOAT_DMA_DCA_ANY_CPU ~0
34 #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, dma_dev)
35 #define to_dev(ioat_chan) (&(ioat_chan)->ioat_dma->pdev->dev)
36 #define to_pdev(ioat_chan) ((ioat_chan)->ioat_dma->pdev)
38 #define chan_num(ch) ((int)((ch)->reg_base - (ch)->ioat_dma->reg_base) / 0x80)
41 * workaround for IOAT ver.3.0 null descriptor issue
42 * (channel returns error when size is 0)
44 #define NULL_DESC_BUFFER_SIZE 1
54 * struct ioatdma_device - internal representation of a IOAT device
55 * @pdev: PCI-Express device
56 * @reg_base: MMIO register space base address
57 * @dma_pool: for allocating DMA descriptors
58 * @dma_dev: embedded struct dma_device
59 * @version: version of ioatdma device
60 * @msix_entries: irq handlers
61 * @idx: per channel data
62 * @dca: direct cache access context
63 * @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
64 * @enumerate_channels: hw version specific channel enumeration
65 * @reset_hw: hw version specific channel (re)initialization
66 * @cleanup_fn: select between the v2 and v3 cleanup routines
67 * @timer_fn: select between the v2 and v3 timer watchdog routines
68 * @self_test: hardware version specific self test for each supported op type
70 * Note: the v3 cleanup routine supports raid operations
72 struct ioatdma_device {
74 void __iomem *reg_base;
75 struct pci_pool *dma_pool;
76 struct pci_pool *completion_pool;
77 #define MAX_SED_POOLS 5
78 struct dma_pool *sed_hw_pool[MAX_SED_POOLS];
79 struct dma_device dma_dev;
81 struct msix_entry msix_entries[4];
82 struct ioatdma_chan *idx[4];
83 struct dca_provider *dca;
84 enum ioat_irq_mode irq_mode;
86 void (*intr_quirk)(struct ioatdma_device *ioat_dma);
87 int (*enumerate_channels)(struct ioatdma_device *ioat_dma);
88 int (*reset_hw)(struct ioatdma_chan *ioat_chan);
89 void (*cleanup_fn)(unsigned long data);
90 void (*timer_fn)(unsigned long data);
91 int (*self_test)(struct ioatdma_device *ioat_dma);
95 struct dma_chan dma_chan;
96 void __iomem *reg_base;
97 dma_addr_t last_completion;
98 spinlock_t cleanup_lock;
100 #define IOAT_COMPLETION_PENDING 0
101 #define IOAT_COMPLETION_ACK 1
102 #define IOAT_RESET_PENDING 2
103 #define IOAT_KOBJ_INIT_FAIL 3
104 #define IOAT_RESHAPE_PENDING 4
106 #define IOAT_CHAN_ACTIVE 6
107 struct timer_list timer;
108 #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
109 #define IDLE_TIMEOUT msecs_to_jiffies(2000)
110 #define RESET_DELAY msecs_to_jiffies(100)
111 struct ioatdma_device *ioat_dma;
112 dma_addr_t completion_dma;
114 struct tasklet_struct cleanup_task;
117 /* ioat v2 / v3 channel attributes
118 * @xfercap_log; log2 of channel max transfer length (for fast division)
119 * @head: allocated index
120 * @issued: hardware notification point
121 * @tail: cleanup index
122 * @dmacount: identical to 'head' except for occasionally resetting to zero
123 * @alloc_order: log2 of the number of allocated descriptors
124 * @produce: number of descriptors to produce at submit time
125 * @ring: software ring buffer implementation of hardware ring
126 * @prep_lock: serializes descriptor preparation (producers)
135 struct ioat_ring_ent **ring;
136 spinlock_t prep_lock;
139 struct ioat_sysfs_entry {
140 struct attribute attr;
141 ssize_t (*show)(struct dma_chan *, char *);
145 * struct ioat_sed_ent - wrapper around super extended hardware descriptor
147 * @sed_dma: dma address for the SED
149 * @parent: point to the dma descriptor that's the parent
151 struct ioat_sed_ent {
152 struct ioat_sed_raw_descriptor *hw;
154 struct ioat_ring_ent *parent;
155 unsigned int hw_pool;
159 * struct ioat_ring_ent - wrapper around hardware descriptor
160 * @hw: hardware DMA descriptor (for memcpy)
161 * @fill: hardware fill descriptor
162 * @xor: hardware xor descriptor
163 * @xor_ex: hardware xor extension descriptor
164 * @pq: hardware pq descriptor
165 * @pq_ex: hardware pq extension descriptor
166 * @pqu: hardware pq update descriptor
167 * @raw: hardware raw (un-typed) descriptor
168 * @txd: the generic software descriptor for all engines
169 * @len: total transaction length for unmap
170 * @result: asynchronous result of validate operations
171 * @id: identifier for debug
174 struct ioat_ring_ent {
176 struct ioat_dma_descriptor *hw;
177 struct ioat_xor_descriptor *xor;
178 struct ioat_xor_ext_descriptor *xor_ex;
179 struct ioat_pq_descriptor *pq;
180 struct ioat_pq_ext_descriptor *pq_ex;
181 struct ioat_pq_update_descriptor *pqu;
182 struct ioat_raw_descriptor *raw;
185 struct dma_async_tx_descriptor txd;
186 enum sum_check_flags *result;
190 struct ioat_sed_ent *sed;
193 static inline struct ioatdma_chan *to_ioat_chan(struct dma_chan *c)
195 return container_of(c, struct ioatdma_chan, dma_chan);
200 /* wrapper around hardware descriptor format + additional software fields */
203 #define set_desc_id(desc, i) ((desc)->id = (i))
204 #define desc_id(desc) ((desc)->id)
206 #define set_desc_id(desc, i)
207 #define desc_id(desc) (0)
211 __dump_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_dma_descriptor *hw,
212 struct dma_async_tx_descriptor *tx, int id)
214 struct device *dev = to_dev(ioat_chan);
216 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x"
217 " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id,
218 (unsigned long long) tx->phys,
219 (unsigned long long) hw->next, tx->cookie, tx->flags,
220 hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write);
223 #define dump_desc_dbg(c, d) \
224 ({ if (d) __dump_desc_dbg(c, d->hw, &d->txd, desc_id(d)); 0; })
226 static inline struct ioatdma_chan *
227 ioat_chan_by_index(struct ioatdma_device *ioat_dma, int index)
229 return ioat_dma->idx[index];
232 static inline u64 ioat_chansts_32(struct ioatdma_chan *ioat_chan)
234 u8 ver = ioat_chan->ioat_dma->version;
238 /* We need to read the low address first as this causes the
239 * chipset to latch the upper bits for the subsequent read
241 status_lo = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver));
242 status = readl(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver));
249 #if BITS_PER_LONG == 64
251 static inline u64 ioat_chansts(struct ioatdma_chan *ioat_chan)
253 u8 ver = ioat_chan->ioat_dma->version;
256 /* With IOAT v3.3 the status register is 64bit. */
257 if (ver >= IOAT_VER_3_3)
258 status = readq(ioat_chan->reg_base + IOAT_CHANSTS_OFFSET(ver));
260 status = ioat_chansts_32(ioat_chan);
266 #define ioat_chansts ioat_chansts_32
269 static inline u64 ioat_chansts_to_addr(u64 status)
271 return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR;
274 static inline u32 ioat_chanerr(struct ioatdma_chan *ioat_chan)
276 return readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
279 static inline void ioat_suspend(struct ioatdma_chan *ioat_chan)
281 u8 ver = ioat_chan->ioat_dma->version;
283 writeb(IOAT_CHANCMD_SUSPEND,
284 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
287 static inline void ioat_reset(struct ioatdma_chan *ioat_chan)
289 u8 ver = ioat_chan->ioat_dma->version;
291 writeb(IOAT_CHANCMD_RESET,
292 ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
295 static inline bool ioat_reset_pending(struct ioatdma_chan *ioat_chan)
297 u8 ver = ioat_chan->ioat_dma->version;
300 cmd = readb(ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ver));
301 return (cmd & IOAT_CHANCMD_RESET) == IOAT_CHANCMD_RESET;
304 static inline bool is_ioat_active(unsigned long status)
306 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE);
309 static inline bool is_ioat_idle(unsigned long status)
311 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE);
314 static inline bool is_ioat_halted(unsigned long status)
316 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED);
319 static inline bool is_ioat_suspended(unsigned long status)
321 return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED);
324 /* channel was fatally programmed */
325 static inline bool is_ioat_bug(unsigned long err)
330 #define IOAT_MAX_ORDER 16
331 #define ioat_get_alloc_order() \
332 (min(ioat_ring_alloc_order, IOAT_MAX_ORDER))
333 #define ioat_get_max_alloc_order() \
334 (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER))
336 static inline u32 ioat_ring_size(struct ioatdma_chan *ioat_chan)
338 return 1 << ioat_chan->alloc_order;
341 /* count of descriptors in flight with the engine */
342 static inline u16 ioat_ring_active(struct ioatdma_chan *ioat_chan)
344 return CIRC_CNT(ioat_chan->head, ioat_chan->tail,
345 ioat_ring_size(ioat_chan));
348 /* count of descriptors pending submission to hardware */
349 static inline u16 ioat_ring_pending(struct ioatdma_chan *ioat_chan)
351 return CIRC_CNT(ioat_chan->head, ioat_chan->issued,
352 ioat_ring_size(ioat_chan));
355 static inline u32 ioat_ring_space(struct ioatdma_chan *ioat_chan)
357 return ioat_ring_size(ioat_chan) - ioat_ring_active(ioat_chan);
361 ioat_xferlen_to_descs(struct ioatdma_chan *ioat_chan, size_t len)
363 u16 num_descs = len >> ioat_chan->xfercap_log;
365 num_descs += !!(len & ((1 << ioat_chan->xfercap_log) - 1));
369 static inline struct ioat_ring_ent *
370 ioat_get_ring_ent(struct ioatdma_chan *ioat_chan, u16 idx)
372 return ioat_chan->ring[idx & (ioat_ring_size(ioat_chan) - 1)];
376 ioat_set_chainaddr(struct ioatdma_chan *ioat_chan, u64 addr)
378 writel(addr & 0x00000000FFFFFFFF,
379 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW);
381 ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH);
384 int ioat_probe(struct ioatdma_device *ioat_dma);
385 int ioat_register(struct ioatdma_device *ioat_dma);
386 int ioat_dma_self_test(struct ioatdma_device *ioat_dma);
387 void ioat_dma_remove(struct ioatdma_device *ioat_dma);
388 struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase);
389 void ioat_init_channel(struct ioatdma_device *ioat_dma,
390 struct ioatdma_chan *ioat_chan, int idx);
391 enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
392 struct dma_tx_state *txstate);
393 bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
394 dma_addr_t *phys_complete);
395 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type);
396 void ioat_kobject_del(struct ioatdma_device *ioat_dma);
397 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma);
398 void ioat_stop(struct ioatdma_chan *ioat_chan);
399 int ioat_dma_probe(struct ioatdma_device *ioat_dma, int dca);
400 int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca);
401 struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase);
402 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs);
403 int ioat_enumerate_channels(struct ioatdma_device *ioat_dma);
404 struct dma_async_tx_descriptor *
405 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
406 dma_addr_t dma_src, size_t len, unsigned long flags);
407 void ioat_issue_pending(struct dma_chan *chan);
408 int ioat_alloc_chan_resources(struct dma_chan *c);
409 void ioat_free_chan_resources(struct dma_chan *c);
410 void __ioat_restart_chan(struct ioatdma_chan *ioat_chan);
411 bool reshape_ring(struct ioatdma_chan *ioat, int order);
412 void __ioat_issue_pending(struct ioatdma_chan *ioat_chan);
413 void ioat_timer_event(unsigned long data);
414 int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo);
415 int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo);
417 extern const struct sysfs_ops ioat_sysfs_ops;
418 extern struct ioat_sysfs_entry ioat_version_attr;
419 extern struct ioat_sysfs_entry ioat_cap_attr;
420 extern int ioat_pending_level;
421 extern int ioat_ring_alloc_order;
422 extern struct kobj_type ioat_ktype;
423 extern struct kmem_cache *ioat_cache;
425 #endif /* IOATDMA_H */