2 * Driver for OHCI 1394 controllers
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/bitops.h>
22 #include <linux/bug.h>
23 #include <linux/compiler.h>
24 #include <linux/delay.h>
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/firewire.h>
28 #include <linux/firewire-constants.h>
29 #include <linux/init.h>
30 #include <linux/interrupt.h>
32 #include <linux/kernel.h>
33 #include <linux/list.h>
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/mutex.h>
38 #include <linux/pci.h>
39 #include <linux/pci_ids.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/string.h>
43 #include <linux/time.h>
44 #include <linux/vmalloc.h>
46 #include <asm/byteorder.h>
48 #include <asm/system.h>
50 #ifdef CONFIG_PPC_PMAC
51 #include <asm/pmac_feature.h>
57 #define DESCRIPTOR_OUTPUT_MORE 0
58 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
59 #define DESCRIPTOR_INPUT_MORE (2 << 12)
60 #define DESCRIPTOR_INPUT_LAST (3 << 12)
61 #define DESCRIPTOR_STATUS (1 << 11)
62 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
63 #define DESCRIPTOR_PING (1 << 7)
64 #define DESCRIPTOR_YY (1 << 6)
65 #define DESCRIPTOR_NO_IRQ (0 << 4)
66 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
67 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
68 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
69 #define DESCRIPTOR_WAIT (3 << 0)
75 __le32 branch_address;
77 __le16 transfer_status;
78 } __attribute__((aligned(16)));
80 #define CONTROL_SET(regs) (regs)
81 #define CONTROL_CLEAR(regs) ((regs) + 4)
82 #define COMMAND_PTR(regs) ((regs) + 12)
83 #define CONTEXT_MATCH(regs) ((regs) + 16)
85 #define AR_BUFFER_SIZE (32*1024)
86 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
87 /* we need at least two pages for proper list management */
88 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
90 #define MAX_ASYNC_PAYLOAD 4096
91 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
92 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
96 struct page *pages[AR_BUFFERS];
98 struct descriptor *descriptors;
99 dma_addr_t descriptors_bus;
101 unsigned int last_buffer_index;
103 struct tasklet_struct tasklet;
108 typedef int (*descriptor_callback_t)(struct context *ctx,
109 struct descriptor *d,
110 struct descriptor *last);
113 * A buffer that contains a block of DMA-able coherent memory used for
114 * storing a portion of a DMA descriptor program.
116 struct descriptor_buffer {
117 struct list_head list;
118 dma_addr_t buffer_bus;
121 struct descriptor buffer[0];
125 struct fw_ohci *ohci;
127 int total_allocation;
132 * List of page-sized buffers for storing DMA descriptors.
133 * Head of list contains buffers in use and tail of list contains
136 struct list_head buffer_list;
139 * Pointer to a buffer inside buffer_list that contains the tail
140 * end of the current DMA program.
142 struct descriptor_buffer *buffer_tail;
145 * The descriptor containing the branch address of the first
146 * descriptor that has not yet been filled by the device.
148 struct descriptor *last;
151 * The last descriptor in the DMA program. It contains the branch
152 * address that must be updated upon appending a new descriptor.
154 struct descriptor *prev;
156 descriptor_callback_t callback;
158 struct tasklet_struct tasklet;
161 #define IT_HEADER_SY(v) ((v) << 0)
162 #define IT_HEADER_TCODE(v) ((v) << 4)
163 #define IT_HEADER_CHANNEL(v) ((v) << 8)
164 #define IT_HEADER_TAG(v) ((v) << 14)
165 #define IT_HEADER_SPEED(v) ((v) << 16)
166 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
169 struct fw_iso_context base;
170 struct context context;
173 size_t header_length;
179 #define CONFIG_ROM_SIZE 1024
184 __iomem char *registers;
187 int request_generation; /* for timestamping incoming requests */
189 unsigned int pri_req_max;
192 bool csr_state_setclear_abdicate;
196 * Spinlock for accessing fw_ohci data. Never call out of
197 * this driver with this lock held.
201 struct mutex phy_reg_mutex;
204 dma_addr_t misc_buffer_bus;
206 struct ar_context ar_request_ctx;
207 struct ar_context ar_response_ctx;
208 struct context at_request_ctx;
209 struct context at_response_ctx;
211 u32 it_context_support;
212 u32 it_context_mask; /* unoccupied IT contexts */
213 struct iso_context *it_context_list;
214 u64 ir_context_channels; /* unoccupied channels */
215 u32 ir_context_support;
216 u32 ir_context_mask; /* unoccupied IR contexts */
217 struct iso_context *ir_context_list;
218 u64 mc_channels; /* channels in use by the multichannel IR context */
222 dma_addr_t config_rom_bus;
223 __be32 *next_config_rom;
224 dma_addr_t next_config_rom_bus;
228 dma_addr_t self_id_bus;
229 struct tasklet_struct bus_reset_tasklet;
231 u32 self_id_buffer[512];
234 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
236 return container_of(card, struct fw_ohci, card);
239 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
240 #define IR_CONTEXT_BUFFER_FILL 0x80000000
241 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
242 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
243 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
244 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
246 #define CONTEXT_RUN 0x8000
247 #define CONTEXT_WAKE 0x1000
248 #define CONTEXT_DEAD 0x0800
249 #define CONTEXT_ACTIVE 0x0400
251 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
252 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
253 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
255 #define OHCI1394_REGISTER_SIZE 0x800
256 #define OHCI_LOOP_COUNT 500
257 #define OHCI1394_PCI_HCI_Control 0x40
258 #define SELF_ID_BUF_SIZE 0x800
259 #define OHCI_TCODE_PHY_PACKET 0x0e
260 #define OHCI_VERSION_1_1 0x010010
262 static char ohci_driver_name[] = KBUILD_MODNAME;
264 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
265 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
266 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
267 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
269 #define QUIRK_CYCLE_TIMER 1
270 #define QUIRK_RESET_PACKET 2
271 #define QUIRK_BE_HEADERS 4
272 #define QUIRK_NO_1394A 8
273 #define QUIRK_NO_MSI 16
275 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
276 static const struct {
277 unsigned short vendor, device, revision, flags;
279 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
282 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
285 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
288 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
291 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
294 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
297 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
298 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
300 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
303 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
304 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
307 /* This overrides anything that was found in ohci_quirks[]. */
308 static int param_quirks;
309 module_param_named(quirks, param_quirks, int, 0644);
310 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
311 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
312 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
313 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
314 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
315 ", disable MSI = " __stringify(QUIRK_NO_MSI)
318 #define OHCI_PARAM_DEBUG_AT_AR 1
319 #define OHCI_PARAM_DEBUG_SELFIDS 2
320 #define OHCI_PARAM_DEBUG_IRQS 4
321 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
323 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
325 static int param_debug;
326 module_param_named(debug, param_debug, int, 0644);
327 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
328 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
329 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
330 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
331 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
332 ", or a combination, or all = -1)");
334 static void log_irqs(u32 evt)
336 if (likely(!(param_debug &
337 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
340 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
341 !(evt & OHCI1394_busReset))
344 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
345 evt & OHCI1394_selfIDComplete ? " selfID" : "",
346 evt & OHCI1394_RQPkt ? " AR_req" : "",
347 evt & OHCI1394_RSPkt ? " AR_resp" : "",
348 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
349 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
350 evt & OHCI1394_isochRx ? " IR" : "",
351 evt & OHCI1394_isochTx ? " IT" : "",
352 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
353 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
354 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
355 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
356 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
357 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
358 evt & OHCI1394_busReset ? " busReset" : "",
359 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
360 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
361 OHCI1394_respTxComplete | OHCI1394_isochRx |
362 OHCI1394_isochTx | OHCI1394_postedWriteErr |
363 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
364 OHCI1394_cycleInconsistent |
365 OHCI1394_regAccessFail | OHCI1394_busReset)
369 static const char *speed[] = {
370 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
372 static const char *power[] = {
373 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
374 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
376 static const char port[] = { '.', '-', 'p', 'c', };
378 static char _p(u32 *s, int shift)
380 return port[*s >> shift & 3];
383 static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
385 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
388 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
389 self_id_count, generation, node_id);
391 for (; self_id_count--; ++s)
392 if ((*s & 1 << 23) == 0)
393 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
394 "%s gc=%d %s %s%s%s\n",
395 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
396 speed[*s >> 14 & 3], *s >> 16 & 63,
397 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
398 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
400 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
402 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
403 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
406 static const char *evts[] = {
407 [0x00] = "evt_no_status", [0x01] = "-reserved-",
408 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
409 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
410 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
411 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
412 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
413 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
414 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
415 [0x10] = "-reserved-", [0x11] = "ack_complete",
416 [0x12] = "ack_pending ", [0x13] = "-reserved-",
417 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
418 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
419 [0x18] = "-reserved-", [0x19] = "-reserved-",
420 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
421 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
422 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
423 [0x20] = "pending/cancelled",
425 static const char *tcodes[] = {
426 [0x0] = "QW req", [0x1] = "BW req",
427 [0x2] = "W resp", [0x3] = "-reserved-",
428 [0x4] = "QR req", [0x5] = "BR req",
429 [0x6] = "QR resp", [0x7] = "BR resp",
430 [0x8] = "cycle start", [0x9] = "Lk req",
431 [0xa] = "async stream packet", [0xb] = "Lk resp",
432 [0xc] = "-reserved-", [0xd] = "-reserved-",
433 [0xe] = "link internal", [0xf] = "-reserved-",
436 static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
438 int tcode = header[0] >> 4 & 0xf;
441 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
444 if (unlikely(evt >= ARRAY_SIZE(evts)))
447 if (evt == OHCI1394_evt_bus_reset) {
448 fw_notify("A%c evt_bus_reset, generation %d\n",
449 dir, (header[2] >> 16) & 0xff);
454 case 0x0: case 0x6: case 0x8:
455 snprintf(specific, sizeof(specific), " = %08x",
456 be32_to_cpu((__force __be32)header[3]));
458 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
459 snprintf(specific, sizeof(specific), " %x,%x",
460 header[3] >> 16, header[3] & 0xffff);
468 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
471 fw_notify("A%c %s, PHY %08x %08x\n",
472 dir, evts[evt], header[1], header[2]);
474 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
475 fw_notify("A%c spd %x tl %02x, "
478 dir, speed, header[0] >> 10 & 0x3f,
479 header[1] >> 16, header[0] >> 16, evts[evt],
480 tcodes[tcode], header[1] & 0xffff, header[2], specific);
483 fw_notify("A%c spd %x tl %02x, "
486 dir, speed, header[0] >> 10 & 0x3f,
487 header[1] >> 16, header[0] >> 16, evts[evt],
488 tcodes[tcode], specific);
494 #define param_debug 0
495 static inline void log_irqs(u32 evt) {}
496 static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
497 static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
499 #endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
501 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
503 writel(data, ohci->registers + offset);
506 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
508 return readl(ohci->registers + offset);
511 static inline void flush_writes(const struct fw_ohci *ohci)
513 /* Do a dummy read to flush writes. */
514 reg_read(ohci, OHCI1394_Version);
517 static int read_phy_reg(struct fw_ohci *ohci, int addr)
522 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
523 for (i = 0; i < 3 + 100; i++) {
524 val = reg_read(ohci, OHCI1394_PhyControl);
525 if (val & OHCI1394_PhyControl_ReadDone)
526 return OHCI1394_PhyControl_ReadData(val);
529 * Try a few times without waiting. Sleeping is necessary
530 * only when the link/PHY interface is busy.
535 fw_error("failed to read phy reg\n");
540 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
544 reg_write(ohci, OHCI1394_PhyControl,
545 OHCI1394_PhyControl_Write(addr, val));
546 for (i = 0; i < 3 + 100; i++) {
547 val = reg_read(ohci, OHCI1394_PhyControl);
548 if (!(val & OHCI1394_PhyControl_WritePending))
554 fw_error("failed to write phy reg\n");
559 static int update_phy_reg(struct fw_ohci *ohci, int addr,
560 int clear_bits, int set_bits)
562 int ret = read_phy_reg(ohci, addr);
567 * The interrupt status bits are cleared by writing a one bit.
568 * Avoid clearing them unless explicitly requested in set_bits.
571 clear_bits |= PHY_INT_STATUS_BITS;
573 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
576 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
580 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
584 return read_phy_reg(ohci, addr);
587 static int ohci_read_phy_reg(struct fw_card *card, int addr)
589 struct fw_ohci *ohci = fw_ohci(card);
592 mutex_lock(&ohci->phy_reg_mutex);
593 ret = read_phy_reg(ohci, addr);
594 mutex_unlock(&ohci->phy_reg_mutex);
599 static int ohci_update_phy_reg(struct fw_card *card, int addr,
600 int clear_bits, int set_bits)
602 struct fw_ohci *ohci = fw_ohci(card);
605 mutex_lock(&ohci->phy_reg_mutex);
606 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
607 mutex_unlock(&ohci->phy_reg_mutex);
612 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
614 return page_private(ctx->pages[i]);
617 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
619 struct descriptor *d;
621 d = &ctx->descriptors[index];
622 d->branch_address &= cpu_to_le32(~0xf);
623 d->res_count = cpu_to_le16(PAGE_SIZE);
624 d->transfer_status = 0;
626 wmb(); /* finish init of new descriptors before branch_address update */
627 d = &ctx->descriptors[ctx->last_buffer_index];
628 d->branch_address |= cpu_to_le32(1);
630 ctx->last_buffer_index = index;
632 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
633 flush_writes(ctx->ohci);
636 static void ar_context_release(struct ar_context *ctx)
641 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
643 for (i = 0; i < AR_BUFFERS; i++)
645 dma_unmap_page(ctx->ohci->card.device,
646 ar_buffer_bus(ctx, i),
647 PAGE_SIZE, DMA_FROM_DEVICE);
648 __free_page(ctx->pages[i]);
652 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
654 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
655 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
656 flush_writes(ctx->ohci);
658 fw_error("AR error: %s; DMA stopped\n", error_msg);
660 /* FIXME: restart? */
663 static inline unsigned int ar_next_buffer_index(unsigned int index)
665 return (index + 1) % AR_BUFFERS;
668 static inline unsigned int ar_prev_buffer_index(unsigned int index)
670 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
673 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
675 return ar_next_buffer_index(ctx->last_buffer_index);
679 * We search for the buffer that contains the last AR packet DMA data written
682 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
683 unsigned int *buffer_offset)
685 unsigned int i, next_i, last = ctx->last_buffer_index;
686 __le16 res_count, next_res_count;
688 i = ar_first_buffer_index(ctx);
689 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
691 /* A buffer that is not yet completely filled must be the last one. */
692 while (i != last && res_count == 0) {
694 /* Peek at the next descriptor. */
695 next_i = ar_next_buffer_index(i);
696 rmb(); /* read descriptors in order */
697 next_res_count = ACCESS_ONCE(
698 ctx->descriptors[next_i].res_count);
700 * If the next descriptor is still empty, we must stop at this
703 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
705 * The exception is when the DMA data for one packet is
706 * split over three buffers; in this case, the middle
707 * buffer's descriptor might be never updated by the
708 * controller and look still empty, and we have to peek
711 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
712 next_i = ar_next_buffer_index(next_i);
714 next_res_count = ACCESS_ONCE(
715 ctx->descriptors[next_i].res_count);
716 if (next_res_count != cpu_to_le16(PAGE_SIZE))
717 goto next_buffer_is_active;
723 next_buffer_is_active:
725 res_count = next_res_count;
728 rmb(); /* read res_count before the DMA data */
730 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
731 if (*buffer_offset > PAGE_SIZE) {
733 ar_context_abort(ctx, "corrupted descriptor");
739 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
740 unsigned int end_buffer_index,
741 unsigned int end_buffer_offset)
745 i = ar_first_buffer_index(ctx);
746 while (i != end_buffer_index) {
747 dma_sync_single_for_cpu(ctx->ohci->card.device,
748 ar_buffer_bus(ctx, i),
749 PAGE_SIZE, DMA_FROM_DEVICE);
750 i = ar_next_buffer_index(i);
752 if (end_buffer_offset > 0)
753 dma_sync_single_for_cpu(ctx->ohci->card.device,
754 ar_buffer_bus(ctx, i),
755 end_buffer_offset, DMA_FROM_DEVICE);
758 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
759 #define cond_le32_to_cpu(v) \
760 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
762 #define cond_le32_to_cpu(v) le32_to_cpu(v)
765 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
767 struct fw_ohci *ohci = ctx->ohci;
769 u32 status, length, tcode;
772 p.header[0] = cond_le32_to_cpu(buffer[0]);
773 p.header[1] = cond_le32_to_cpu(buffer[1]);
774 p.header[2] = cond_le32_to_cpu(buffer[2]);
776 tcode = (p.header[0] >> 4) & 0x0f;
778 case TCODE_WRITE_QUADLET_REQUEST:
779 case TCODE_READ_QUADLET_RESPONSE:
780 p.header[3] = (__force __u32) buffer[3];
781 p.header_length = 16;
782 p.payload_length = 0;
785 case TCODE_READ_BLOCK_REQUEST :
786 p.header[3] = cond_le32_to_cpu(buffer[3]);
787 p.header_length = 16;
788 p.payload_length = 0;
791 case TCODE_WRITE_BLOCK_REQUEST:
792 case TCODE_READ_BLOCK_RESPONSE:
793 case TCODE_LOCK_REQUEST:
794 case TCODE_LOCK_RESPONSE:
795 p.header[3] = cond_le32_to_cpu(buffer[3]);
796 p.header_length = 16;
797 p.payload_length = p.header[3] >> 16;
798 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
799 ar_context_abort(ctx, "invalid packet length");
804 case TCODE_WRITE_RESPONSE:
805 case TCODE_READ_QUADLET_REQUEST:
806 case OHCI_TCODE_PHY_PACKET:
807 p.header_length = 12;
808 p.payload_length = 0;
812 ar_context_abort(ctx, "invalid tcode");
816 p.payload = (void *) buffer + p.header_length;
818 /* FIXME: What to do about evt_* errors? */
819 length = (p.header_length + p.payload_length + 3) / 4;
820 status = cond_le32_to_cpu(buffer[length]);
821 evt = (status >> 16) & 0x1f;
824 p.speed = (status >> 21) & 0x7;
825 p.timestamp = status & 0xffff;
826 p.generation = ohci->request_generation;
828 log_ar_at_event('R', p.speed, p.header, evt);
831 * Several controllers, notably from NEC and VIA, forget to
832 * write ack_complete status at PHY packet reception.
834 if (evt == OHCI1394_evt_no_status &&
835 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
836 p.ack = ACK_COMPLETE;
839 * The OHCI bus reset handler synthesizes a PHY packet with
840 * the new generation number when a bus reset happens (see
841 * section 8.4.2.3). This helps us determine when a request
842 * was received and make sure we send the response in the same
843 * generation. We only need this for requests; for responses
844 * we use the unique tlabel for finding the matching
847 * Alas some chips sometimes emit bus reset packets with a
848 * wrong generation. We set the correct generation for these
849 * at a slightly incorrect time (in bus_reset_tasklet).
851 if (evt == OHCI1394_evt_bus_reset) {
852 if (!(ohci->quirks & QUIRK_RESET_PACKET))
853 ohci->request_generation = (p.header[2] >> 16) & 0xff;
854 } else if (ctx == &ohci->ar_request_ctx) {
855 fw_core_handle_request(&ohci->card, &p);
857 fw_core_handle_response(&ohci->card, &p);
860 return buffer + length + 1;
863 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
868 next = handle_ar_packet(ctx, p);
877 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
881 i = ar_first_buffer_index(ctx);
882 while (i != end_buffer) {
883 dma_sync_single_for_device(ctx->ohci->card.device,
884 ar_buffer_bus(ctx, i),
885 PAGE_SIZE, DMA_FROM_DEVICE);
886 ar_context_link_page(ctx, i);
887 i = ar_next_buffer_index(i);
891 static void ar_context_tasklet(unsigned long data)
893 struct ar_context *ctx = (struct ar_context *)data;
894 unsigned int end_buffer_index, end_buffer_offset;
901 end_buffer_index = ar_search_last_active_buffer(ctx,
903 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
904 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
906 if (end_buffer_index < ar_first_buffer_index(ctx)) {
908 * The filled part of the overall buffer wraps around; handle
909 * all packets up to the buffer end here. If the last packet
910 * wraps around, its tail will be visible after the buffer end
911 * because the buffer start pages are mapped there again.
913 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
914 p = handle_ar_packets(ctx, p, buffer_end);
917 /* adjust p to point back into the actual buffer */
918 p -= AR_BUFFERS * PAGE_SIZE;
921 p = handle_ar_packets(ctx, p, end);
924 ar_context_abort(ctx, "inconsistent descriptor");
929 ar_recycle_buffers(ctx, end_buffer_index);
937 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
938 unsigned int descriptors_offset, u32 regs)
942 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
943 struct descriptor *d;
947 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
949 for (i = 0; i < AR_BUFFERS; i++) {
950 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
953 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
954 0, PAGE_SIZE, DMA_FROM_DEVICE);
955 if (dma_mapping_error(ohci->card.device, dma_addr)) {
956 __free_page(ctx->pages[i]);
957 ctx->pages[i] = NULL;
960 set_page_private(ctx->pages[i], dma_addr);
963 for (i = 0; i < AR_BUFFERS; i++)
964 pages[i] = ctx->pages[i];
965 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
966 pages[AR_BUFFERS + i] = ctx->pages[i];
967 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
972 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
973 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
975 for (i = 0; i < AR_BUFFERS; i++) {
976 d = &ctx->descriptors[i];
977 d->req_count = cpu_to_le16(PAGE_SIZE);
978 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
980 DESCRIPTOR_BRANCH_ALWAYS);
981 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
982 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
983 ar_next_buffer_index(i) * sizeof(struct descriptor));
989 ar_context_release(ctx);
994 static void ar_context_run(struct ar_context *ctx)
998 for (i = 0; i < AR_BUFFERS; i++)
999 ar_context_link_page(ctx, i);
1001 ctx->pointer = ctx->buffer;
1003 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1004 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1005 flush_writes(ctx->ohci);
1008 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1012 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1014 /* figure out which descriptor the branch address goes in */
1015 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1021 static void context_tasklet(unsigned long data)
1023 struct context *ctx = (struct context *) data;
1024 struct descriptor *d, *last;
1027 struct descriptor_buffer *desc;
1029 desc = list_entry(ctx->buffer_list.next,
1030 struct descriptor_buffer, list);
1032 while (last->branch_address != 0) {
1033 struct descriptor_buffer *old_desc = desc;
1034 address = le32_to_cpu(last->branch_address);
1038 /* If the branch address points to a buffer outside of the
1039 * current buffer, advance to the next buffer. */
1040 if (address < desc->buffer_bus ||
1041 address >= desc->buffer_bus + desc->used)
1042 desc = list_entry(desc->list.next,
1043 struct descriptor_buffer, list);
1044 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1045 last = find_branch_descriptor(d, z);
1047 if (!ctx->callback(ctx, d, last))
1050 if (old_desc != desc) {
1051 /* If we've advanced to the next buffer, move the
1052 * previous buffer to the free list. */
1053 unsigned long flags;
1055 spin_lock_irqsave(&ctx->ohci->lock, flags);
1056 list_move_tail(&old_desc->list, &ctx->buffer_list);
1057 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1064 * Allocate a new buffer and add it to the list of free buffers for this
1065 * context. Must be called with ohci->lock held.
1067 static int context_add_buffer(struct context *ctx)
1069 struct descriptor_buffer *desc;
1070 dma_addr_t uninitialized_var(bus_addr);
1074 * 16MB of descriptors should be far more than enough for any DMA
1075 * program. This will catch run-away userspace or DoS attacks.
1077 if (ctx->total_allocation >= 16*1024*1024)
1080 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1081 &bus_addr, GFP_ATOMIC);
1085 offset = (void *)&desc->buffer - (void *)desc;
1086 desc->buffer_size = PAGE_SIZE - offset;
1087 desc->buffer_bus = bus_addr + offset;
1090 list_add_tail(&desc->list, &ctx->buffer_list);
1091 ctx->total_allocation += PAGE_SIZE;
1096 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1097 u32 regs, descriptor_callback_t callback)
1101 ctx->total_allocation = 0;
1103 INIT_LIST_HEAD(&ctx->buffer_list);
1104 if (context_add_buffer(ctx) < 0)
1107 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1108 struct descriptor_buffer, list);
1110 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1111 ctx->callback = callback;
1114 * We put a dummy descriptor in the buffer that has a NULL
1115 * branch address and looks like it's been sent. That way we
1116 * have a descriptor to append DMA programs to.
1118 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1119 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1120 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1121 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1122 ctx->last = ctx->buffer_tail->buffer;
1123 ctx->prev = ctx->buffer_tail->buffer;
1128 static void context_release(struct context *ctx)
1130 struct fw_card *card = &ctx->ohci->card;
1131 struct descriptor_buffer *desc, *tmp;
1133 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1134 dma_free_coherent(card->device, PAGE_SIZE, desc,
1136 ((void *)&desc->buffer - (void *)desc));
1139 /* Must be called with ohci->lock held */
1140 static struct descriptor *context_get_descriptors(struct context *ctx,
1141 int z, dma_addr_t *d_bus)
1143 struct descriptor *d = NULL;
1144 struct descriptor_buffer *desc = ctx->buffer_tail;
1146 if (z * sizeof(*d) > desc->buffer_size)
1149 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1150 /* No room for the descriptor in this buffer, so advance to the
1153 if (desc->list.next == &ctx->buffer_list) {
1154 /* If there is no free buffer next in the list,
1156 if (context_add_buffer(ctx) < 0)
1159 desc = list_entry(desc->list.next,
1160 struct descriptor_buffer, list);
1161 ctx->buffer_tail = desc;
1164 d = desc->buffer + desc->used / sizeof(*d);
1165 memset(d, 0, z * sizeof(*d));
1166 *d_bus = desc->buffer_bus + desc->used;
1171 static void context_run(struct context *ctx, u32 extra)
1173 struct fw_ohci *ohci = ctx->ohci;
1175 reg_write(ohci, COMMAND_PTR(ctx->regs),
1176 le32_to_cpu(ctx->last->branch_address));
1177 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1178 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1179 ctx->running = true;
1183 static void context_append(struct context *ctx,
1184 struct descriptor *d, int z, int extra)
1187 struct descriptor_buffer *desc = ctx->buffer_tail;
1189 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1191 desc->used += (z + extra) * sizeof(*d);
1193 wmb(); /* finish init of new descriptors before branch_address update */
1194 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1195 ctx->prev = find_branch_descriptor(d, z);
1198 static void context_stop(struct context *ctx)
1203 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1204 ctx->running = false;
1205 flush_writes(ctx->ohci);
1207 for (i = 0; i < 10; i++) {
1208 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
1209 if ((reg & CONTEXT_ACTIVE) == 0)
1214 fw_error("Error: DMA context still active (0x%08x)\n", reg);
1217 struct driver_data {
1219 struct fw_packet *packet;
1223 * This function apppends a packet to the DMA queue for transmission.
1224 * Must always be called with the ochi->lock held to ensure proper
1225 * generation handling and locking around packet queue manipulation.
1227 static int at_context_queue_packet(struct context *ctx,
1228 struct fw_packet *packet)
1230 struct fw_ohci *ohci = ctx->ohci;
1231 dma_addr_t d_bus, uninitialized_var(payload_bus);
1232 struct driver_data *driver_data;
1233 struct descriptor *d, *last;
1237 d = context_get_descriptors(ctx, 4, &d_bus);
1239 packet->ack = RCODE_SEND_ERROR;
1243 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1244 d[0].res_count = cpu_to_le16(packet->timestamp);
1247 * The DMA format for asyncronous link packets is different
1248 * from the IEEE1394 layout, so shift the fields around
1252 tcode = (packet->header[0] >> 4) & 0x0f;
1253 header = (__le32 *) &d[1];
1255 case TCODE_WRITE_QUADLET_REQUEST:
1256 case TCODE_WRITE_BLOCK_REQUEST:
1257 case TCODE_WRITE_RESPONSE:
1258 case TCODE_READ_QUADLET_REQUEST:
1259 case TCODE_READ_BLOCK_REQUEST:
1260 case TCODE_READ_QUADLET_RESPONSE:
1261 case TCODE_READ_BLOCK_RESPONSE:
1262 case TCODE_LOCK_REQUEST:
1263 case TCODE_LOCK_RESPONSE:
1264 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1265 (packet->speed << 16));
1266 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1267 (packet->header[0] & 0xffff0000));
1268 header[2] = cpu_to_le32(packet->header[2]);
1270 if (TCODE_IS_BLOCK_PACKET(tcode))
1271 header[3] = cpu_to_le32(packet->header[3]);
1273 header[3] = (__force __le32) packet->header[3];
1275 d[0].req_count = cpu_to_le16(packet->header_length);
1278 case TCODE_LINK_INTERNAL:
1279 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1280 (packet->speed << 16));
1281 header[1] = cpu_to_le32(packet->header[1]);
1282 header[2] = cpu_to_le32(packet->header[2]);
1283 d[0].req_count = cpu_to_le16(12);
1285 if (is_ping_packet(&packet->header[1]))
1286 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1289 case TCODE_STREAM_DATA:
1290 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1291 (packet->speed << 16));
1292 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1293 d[0].req_count = cpu_to_le16(8);
1298 packet->ack = RCODE_SEND_ERROR;
1302 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1303 driver_data = (struct driver_data *) &d[3];
1304 driver_data->packet = packet;
1305 packet->driver_data = driver_data;
1307 if (packet->payload_length > 0) {
1308 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1309 payload_bus = dma_map_single(ohci->card.device,
1311 packet->payload_length,
1313 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1314 packet->ack = RCODE_SEND_ERROR;
1317 packet->payload_bus = payload_bus;
1318 packet->payload_mapped = true;
1320 memcpy(driver_data->inline_data, packet->payload,
1321 packet->payload_length);
1322 payload_bus = d_bus + 3 * sizeof(*d);
1325 d[2].req_count = cpu_to_le16(packet->payload_length);
1326 d[2].data_address = cpu_to_le32(payload_bus);
1334 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1335 DESCRIPTOR_IRQ_ALWAYS |
1336 DESCRIPTOR_BRANCH_ALWAYS);
1338 /* FIXME: Document how the locking works. */
1339 if (ohci->generation != packet->generation) {
1340 if (packet->payload_mapped)
1341 dma_unmap_single(ohci->card.device, payload_bus,
1342 packet->payload_length, DMA_TO_DEVICE);
1343 packet->ack = RCODE_GENERATION;
1347 context_append(ctx, d, z, 4 - z);
1350 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1353 context_run(ctx, 0);
1359 static void at_context_flush(struct context *ctx)
1361 tasklet_disable(&ctx->tasklet);
1363 ctx->flushing = true;
1364 context_tasklet((unsigned long)ctx);
1365 ctx->flushing = false;
1367 tasklet_enable(&ctx->tasklet);
1370 static int handle_at_packet(struct context *context,
1371 struct descriptor *d,
1372 struct descriptor *last)
1374 struct driver_data *driver_data;
1375 struct fw_packet *packet;
1376 struct fw_ohci *ohci = context->ohci;
1379 if (last->transfer_status == 0 && !context->flushing)
1380 /* This descriptor isn't done yet, stop iteration. */
1383 driver_data = (struct driver_data *) &d[3];
1384 packet = driver_data->packet;
1386 /* This packet was cancelled, just continue. */
1389 if (packet->payload_mapped)
1390 dma_unmap_single(ohci->card.device, packet->payload_bus,
1391 packet->payload_length, DMA_TO_DEVICE);
1393 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1394 packet->timestamp = le16_to_cpu(last->res_count);
1396 log_ar_at_event('T', packet->speed, packet->header, evt);
1399 case OHCI1394_evt_timeout:
1400 /* Async response transmit timed out. */
1401 packet->ack = RCODE_CANCELLED;
1404 case OHCI1394_evt_flushed:
1406 * The packet was flushed should give same error as
1407 * when we try to use a stale generation count.
1409 packet->ack = RCODE_GENERATION;
1412 case OHCI1394_evt_missing_ack:
1413 if (context->flushing)
1414 packet->ack = RCODE_GENERATION;
1417 * Using a valid (current) generation count, but the
1418 * node is not on the bus or not sending acks.
1420 packet->ack = RCODE_NO_ACK;
1424 case ACK_COMPLETE + 0x10:
1425 case ACK_PENDING + 0x10:
1426 case ACK_BUSY_X + 0x10:
1427 case ACK_BUSY_A + 0x10:
1428 case ACK_BUSY_B + 0x10:
1429 case ACK_DATA_ERROR + 0x10:
1430 case ACK_TYPE_ERROR + 0x10:
1431 packet->ack = evt - 0x10;
1434 case OHCI1394_evt_no_status:
1435 if (context->flushing) {
1436 packet->ack = RCODE_GENERATION;
1442 packet->ack = RCODE_SEND_ERROR;
1446 packet->callback(packet, &ohci->card, packet->ack);
1451 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1452 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1453 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1454 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1455 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1457 static void handle_local_rom(struct fw_ohci *ohci,
1458 struct fw_packet *packet, u32 csr)
1460 struct fw_packet response;
1461 int tcode, length, i;
1463 tcode = HEADER_GET_TCODE(packet->header[0]);
1464 if (TCODE_IS_BLOCK_PACKET(tcode))
1465 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1469 i = csr - CSR_CONFIG_ROM;
1470 if (i + length > CONFIG_ROM_SIZE) {
1471 fw_fill_response(&response, packet->header,
1472 RCODE_ADDRESS_ERROR, NULL, 0);
1473 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1474 fw_fill_response(&response, packet->header,
1475 RCODE_TYPE_ERROR, NULL, 0);
1477 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1478 (void *) ohci->config_rom + i, length);
1481 fw_core_handle_response(&ohci->card, &response);
1484 static void handle_local_lock(struct fw_ohci *ohci,
1485 struct fw_packet *packet, u32 csr)
1487 struct fw_packet response;
1488 int tcode, length, ext_tcode, sel, try;
1489 __be32 *payload, lock_old;
1490 u32 lock_arg, lock_data;
1492 tcode = HEADER_GET_TCODE(packet->header[0]);
1493 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1494 payload = packet->payload;
1495 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1497 if (tcode == TCODE_LOCK_REQUEST &&
1498 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1499 lock_arg = be32_to_cpu(payload[0]);
1500 lock_data = be32_to_cpu(payload[1]);
1501 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1505 fw_fill_response(&response, packet->header,
1506 RCODE_TYPE_ERROR, NULL, 0);
1510 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1511 reg_write(ohci, OHCI1394_CSRData, lock_data);
1512 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1513 reg_write(ohci, OHCI1394_CSRControl, sel);
1515 for (try = 0; try < 20; try++)
1516 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1517 lock_old = cpu_to_be32(reg_read(ohci,
1519 fw_fill_response(&response, packet->header,
1521 &lock_old, sizeof(lock_old));
1525 fw_error("swap not done (CSR lock timeout)\n");
1526 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1529 fw_core_handle_response(&ohci->card, &response);
1532 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1536 if (ctx == &ctx->ohci->at_request_ctx) {
1537 packet->ack = ACK_PENDING;
1538 packet->callback(packet, &ctx->ohci->card, packet->ack);
1542 ((unsigned long long)
1543 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1545 csr = offset - CSR_REGISTER_BASE;
1547 /* Handle config rom reads. */
1548 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1549 handle_local_rom(ctx->ohci, packet, csr);
1551 case CSR_BUS_MANAGER_ID:
1552 case CSR_BANDWIDTH_AVAILABLE:
1553 case CSR_CHANNELS_AVAILABLE_HI:
1554 case CSR_CHANNELS_AVAILABLE_LO:
1555 handle_local_lock(ctx->ohci, packet, csr);
1558 if (ctx == &ctx->ohci->at_request_ctx)
1559 fw_core_handle_request(&ctx->ohci->card, packet);
1561 fw_core_handle_response(&ctx->ohci->card, packet);
1565 if (ctx == &ctx->ohci->at_response_ctx) {
1566 packet->ack = ACK_COMPLETE;
1567 packet->callback(packet, &ctx->ohci->card, packet->ack);
1571 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1573 unsigned long flags;
1576 spin_lock_irqsave(&ctx->ohci->lock, flags);
1578 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1579 ctx->ohci->generation == packet->generation) {
1580 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1581 handle_local_request(ctx, packet);
1585 ret = at_context_queue_packet(ctx, packet);
1586 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1589 packet->callback(packet, &ctx->ohci->card, packet->ack);
1593 static void detect_dead_context(struct fw_ohci *ohci,
1594 const char *name, unsigned int regs)
1598 ctl = reg_read(ohci, CONTROL_SET(regs));
1599 if (ctl & CONTEXT_DEAD) {
1600 #ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1601 fw_error("DMA context %s has stopped, error code: %s\n",
1602 name, evts[ctl & 0x1f]);
1604 fw_error("DMA context %s has stopped, error code: %#x\n",
1610 static void handle_dead_contexts(struct fw_ohci *ohci)
1615 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1616 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1617 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1618 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1619 for (i = 0; i < 32; ++i) {
1620 if (!(ohci->it_context_support & (1 << i)))
1622 sprintf(name, "IT%u", i);
1623 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1625 for (i = 0; i < 32; ++i) {
1626 if (!(ohci->ir_context_support & (1 << i)))
1628 sprintf(name, "IR%u", i);
1629 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1631 /* TODO: maybe try to flush and restart the dead contexts */
1634 static u32 cycle_timer_ticks(u32 cycle_timer)
1638 ticks = cycle_timer & 0xfff;
1639 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1640 ticks += (3072 * 8000) * (cycle_timer >> 25);
1646 * Some controllers exhibit one or more of the following bugs when updating the
1647 * iso cycle timer register:
1648 * - When the lowest six bits are wrapping around to zero, a read that happens
1649 * at the same time will return garbage in the lowest ten bits.
1650 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1651 * not incremented for about 60 ns.
1652 * - Occasionally, the entire register reads zero.
1654 * To catch these, we read the register three times and ensure that the
1655 * difference between each two consecutive reads is approximately the same, i.e.
1656 * less than twice the other. Furthermore, any negative difference indicates an
1657 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1658 * execute, so we have enough precision to compute the ratio of the differences.)
1660 static u32 get_cycle_time(struct fw_ohci *ohci)
1667 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1669 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1672 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1676 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1677 t0 = cycle_timer_ticks(c0);
1678 t1 = cycle_timer_ticks(c1);
1679 t2 = cycle_timer_ticks(c2);
1682 } while ((diff01 <= 0 || diff12 <= 0 ||
1683 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1691 * This function has to be called at least every 64 seconds. The bus_time
1692 * field stores not only the upper 25 bits of the BUS_TIME register but also
1693 * the most significant bit of the cycle timer in bit 6 so that we can detect
1694 * changes in this bit.
1696 static u32 update_bus_time(struct fw_ohci *ohci)
1698 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1700 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1701 ohci->bus_time += 0x40;
1703 return ohci->bus_time | cycle_time_seconds;
1706 static void bus_reset_tasklet(unsigned long data)
1708 struct fw_ohci *ohci = (struct fw_ohci *)data;
1709 int self_id_count, i, j, reg;
1710 int generation, new_generation;
1711 unsigned long flags;
1712 void *free_rom = NULL;
1713 dma_addr_t free_rom_bus = 0;
1716 reg = reg_read(ohci, OHCI1394_NodeID);
1717 if (!(reg & OHCI1394_NodeID_idValid)) {
1718 fw_notify("node ID not valid, new bus reset in progress\n");
1721 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1722 fw_notify("malconfigured bus\n");
1725 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1726 OHCI1394_NodeID_nodeNumber);
1728 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1729 if (!(ohci->is_root && is_new_root))
1730 reg_write(ohci, OHCI1394_LinkControlSet,
1731 OHCI1394_LinkControl_cycleMaster);
1732 ohci->is_root = is_new_root;
1734 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1735 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1736 fw_notify("inconsistent self IDs\n");
1740 * The count in the SelfIDCount register is the number of
1741 * bytes in the self ID receive buffer. Since we also receive
1742 * the inverted quadlets and a header quadlet, we shift one
1743 * bit extra to get the actual number of self IDs.
1745 self_id_count = (reg >> 3) & 0xff;
1746 if (self_id_count == 0 || self_id_count > 252) {
1747 fw_notify("inconsistent self IDs\n");
1750 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1753 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1754 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1755 fw_notify("inconsistent self IDs\n");
1758 ohci->self_id_buffer[j] =
1759 cond_le32_to_cpu(ohci->self_id_cpu[i]);
1764 * Check the consistency of the self IDs we just read. The
1765 * problem we face is that a new bus reset can start while we
1766 * read out the self IDs from the DMA buffer. If this happens,
1767 * the DMA buffer will be overwritten with new self IDs and we
1768 * will read out inconsistent data. The OHCI specification
1769 * (section 11.2) recommends a technique similar to
1770 * linux/seqlock.h, where we remember the generation of the
1771 * self IDs in the buffer before reading them out and compare
1772 * it to the current generation after reading them out. If
1773 * the two generations match we know we have a consistent set
1777 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1778 if (new_generation != generation) {
1779 fw_notify("recursive bus reset detected, "
1780 "discarding self ids\n");
1784 /* FIXME: Document how the locking works. */
1785 spin_lock_irqsave(&ohci->lock, flags);
1787 ohci->generation = -1; /* prevent AT packet queueing */
1788 context_stop(&ohci->at_request_ctx);
1789 context_stop(&ohci->at_response_ctx);
1791 spin_unlock_irqrestore(&ohci->lock, flags);
1794 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1795 * packets in the AT queues and software needs to drain them.
1796 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1798 at_context_flush(&ohci->at_request_ctx);
1799 at_context_flush(&ohci->at_response_ctx);
1801 spin_lock_irqsave(&ohci->lock, flags);
1803 ohci->generation = generation;
1804 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1806 if (ohci->quirks & QUIRK_RESET_PACKET)
1807 ohci->request_generation = generation;
1810 * This next bit is unrelated to the AT context stuff but we
1811 * have to do it under the spinlock also. If a new config rom
1812 * was set up before this reset, the old one is now no longer
1813 * in use and we can free it. Update the config rom pointers
1814 * to point to the current config rom and clear the
1815 * next_config_rom pointer so a new update can take place.
1818 if (ohci->next_config_rom != NULL) {
1819 if (ohci->next_config_rom != ohci->config_rom) {
1820 free_rom = ohci->config_rom;
1821 free_rom_bus = ohci->config_rom_bus;
1823 ohci->config_rom = ohci->next_config_rom;
1824 ohci->config_rom_bus = ohci->next_config_rom_bus;
1825 ohci->next_config_rom = NULL;
1828 * Restore config_rom image and manually update
1829 * config_rom registers. Writing the header quadlet
1830 * will indicate that the config rom is ready, so we
1833 reg_write(ohci, OHCI1394_BusOptions,
1834 be32_to_cpu(ohci->config_rom[2]));
1835 ohci->config_rom[0] = ohci->next_header;
1836 reg_write(ohci, OHCI1394_ConfigROMhdr,
1837 be32_to_cpu(ohci->next_header));
1840 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1841 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1842 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1845 spin_unlock_irqrestore(&ohci->lock, flags);
1848 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1849 free_rom, free_rom_bus);
1851 log_selfids(ohci->node_id, generation,
1852 self_id_count, ohci->self_id_buffer);
1854 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
1855 self_id_count, ohci->self_id_buffer,
1856 ohci->csr_state_setclear_abdicate);
1857 ohci->csr_state_setclear_abdicate = false;
1860 static irqreturn_t irq_handler(int irq, void *data)
1862 struct fw_ohci *ohci = data;
1863 u32 event, iso_event;
1866 event = reg_read(ohci, OHCI1394_IntEventClear);
1868 if (!event || !~event)
1872 * busReset and postedWriteErr must not be cleared yet
1873 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
1875 reg_write(ohci, OHCI1394_IntEventClear,
1876 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
1879 if (event & OHCI1394_selfIDComplete)
1880 tasklet_schedule(&ohci->bus_reset_tasklet);
1882 if (event & OHCI1394_RQPkt)
1883 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1885 if (event & OHCI1394_RSPkt)
1886 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1888 if (event & OHCI1394_reqTxComplete)
1889 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1891 if (event & OHCI1394_respTxComplete)
1892 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1894 if (event & OHCI1394_isochRx) {
1895 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1896 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1899 i = ffs(iso_event) - 1;
1901 &ohci->ir_context_list[i].context.tasklet);
1902 iso_event &= ~(1 << i);
1906 if (event & OHCI1394_isochTx) {
1907 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1908 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
1911 i = ffs(iso_event) - 1;
1913 &ohci->it_context_list[i].context.tasklet);
1914 iso_event &= ~(1 << i);
1918 if (unlikely(event & OHCI1394_regAccessFail))
1919 fw_error("Register access failure - "
1920 "please notify linux1394-devel@lists.sf.net\n");
1922 if (unlikely(event & OHCI1394_postedWriteErr)) {
1923 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
1924 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
1925 reg_write(ohci, OHCI1394_IntEventClear,
1926 OHCI1394_postedWriteErr);
1927 fw_error("PCI posted write error\n");
1930 if (unlikely(event & OHCI1394_cycleTooLong)) {
1931 if (printk_ratelimit())
1932 fw_notify("isochronous cycle too long\n");
1933 reg_write(ohci, OHCI1394_LinkControlSet,
1934 OHCI1394_LinkControl_cycleMaster);
1937 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1939 * We need to clear this event bit in order to make
1940 * cycleMatch isochronous I/O work. In theory we should
1941 * stop active cycleMatch iso contexts now and restart
1942 * them at least two cycles later. (FIXME?)
1944 if (printk_ratelimit())
1945 fw_notify("isochronous cycle inconsistent\n");
1948 if (unlikely(event & OHCI1394_unrecoverableError))
1949 handle_dead_contexts(ohci);
1951 if (event & OHCI1394_cycle64Seconds) {
1952 spin_lock(&ohci->lock);
1953 update_bus_time(ohci);
1954 spin_unlock(&ohci->lock);
1961 static int software_reset(struct fw_ohci *ohci)
1965 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1967 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1968 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1969 OHCI1394_HCControl_softReset) == 0)
1977 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1979 size_t size = length * 4;
1981 memcpy(dest, src, size);
1982 if (size < CONFIG_ROM_SIZE)
1983 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1986 static int configure_1394a_enhancements(struct fw_ohci *ohci)
1989 int ret, clear, set, offset;
1991 /* Check if the driver should configure link and PHY. */
1992 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1993 OHCI1394_HCControl_programPhyEnable))
1996 /* Paranoia: check whether the PHY supports 1394a, too. */
1997 enable_1394a = false;
1998 ret = read_phy_reg(ohci, 2);
2001 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2002 ret = read_paged_phy_reg(ohci, 1, 8);
2006 enable_1394a = true;
2009 if (ohci->quirks & QUIRK_NO_1394A)
2010 enable_1394a = false;
2012 /* Configure PHY and link consistently. */
2015 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2017 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2020 ret = update_phy_reg(ohci, 5, clear, set);
2025 offset = OHCI1394_HCControlSet;
2027 offset = OHCI1394_HCControlClear;
2028 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2030 /* Clean up: configuration has been taken care of. */
2031 reg_write(ohci, OHCI1394_HCControlClear,
2032 OHCI1394_HCControl_programPhyEnable);
2037 static int ohci_enable(struct fw_card *card,
2038 const __be32 *config_rom, size_t length)
2040 struct fw_ohci *ohci = fw_ohci(card);
2041 struct pci_dev *dev = to_pci_dev(card->device);
2042 u32 lps, seconds, version, irqs;
2045 if (software_reset(ohci)) {
2046 fw_error("Failed to reset ohci card.\n");
2051 * Now enable LPS, which we need in order to start accessing
2052 * most of the registers. In fact, on some cards (ALI M5251),
2053 * accessing registers in the SClk domain without LPS enabled
2054 * will lock up the machine. Wait 50msec to make sure we have
2055 * full link enabled. However, with some cards (well, at least
2056 * a JMicron PCIe card), we have to try again sometimes.
2058 reg_write(ohci, OHCI1394_HCControlSet,
2059 OHCI1394_HCControl_LPS |
2060 OHCI1394_HCControl_postedWriteEnable);
2063 for (lps = 0, i = 0; !lps && i < 3; i++) {
2065 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2066 OHCI1394_HCControl_LPS;
2070 fw_error("Failed to set Link Power Status\n");
2074 reg_write(ohci, OHCI1394_HCControlClear,
2075 OHCI1394_HCControl_noByteSwapData);
2077 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2078 reg_write(ohci, OHCI1394_LinkControlSet,
2079 OHCI1394_LinkControl_cycleTimerEnable |
2080 OHCI1394_LinkControl_cycleMaster);
2082 reg_write(ohci, OHCI1394_ATRetries,
2083 OHCI1394_MAX_AT_REQ_RETRIES |
2084 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2085 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2088 seconds = lower_32_bits(get_seconds());
2089 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
2090 ohci->bus_time = seconds & ~0x3f;
2092 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2093 if (version >= OHCI_VERSION_1_1) {
2094 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2096 card->broadcast_channel_auto_allocated = true;
2099 /* Get implemented bits of the priority arbitration request counter. */
2100 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2101 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2102 reg_write(ohci, OHCI1394_FairnessControl, 0);
2103 card->priority_budget_implemented = ohci->pri_req_max != 0;
2105 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2106 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2107 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2109 ret = configure_1394a_enhancements(ohci);
2113 /* Activate link_on bit and contender bit in our self ID packets.*/
2114 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2119 * When the link is not yet enabled, the atomic config rom
2120 * update mechanism described below in ohci_set_config_rom()
2121 * is not active. We have to update ConfigRomHeader and
2122 * BusOptions manually, and the write to ConfigROMmap takes
2123 * effect immediately. We tie this to the enabling of the
2124 * link, so we have a valid config rom before enabling - the
2125 * OHCI requires that ConfigROMhdr and BusOptions have valid
2126 * values before enabling.
2128 * However, when the ConfigROMmap is written, some controllers
2129 * always read back quadlets 0 and 2 from the config rom to
2130 * the ConfigRomHeader and BusOptions registers on bus reset.
2131 * They shouldn't do that in this initial case where the link
2132 * isn't enabled. This means we have to use the same
2133 * workaround here, setting the bus header to 0 and then write
2134 * the right values in the bus reset tasklet.
2138 ohci->next_config_rom =
2139 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2140 &ohci->next_config_rom_bus,
2142 if (ohci->next_config_rom == NULL)
2145 copy_config_rom(ohci->next_config_rom, config_rom, length);
2148 * In the suspend case, config_rom is NULL, which
2149 * means that we just reuse the old config rom.
2151 ohci->next_config_rom = ohci->config_rom;
2152 ohci->next_config_rom_bus = ohci->config_rom_bus;
2155 ohci->next_header = ohci->next_config_rom[0];
2156 ohci->next_config_rom[0] = 0;
2157 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2158 reg_write(ohci, OHCI1394_BusOptions,
2159 be32_to_cpu(ohci->next_config_rom[2]));
2160 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2162 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2164 if (!(ohci->quirks & QUIRK_NO_MSI))
2165 pci_enable_msi(dev);
2166 if (request_irq(dev->irq, irq_handler,
2167 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
2168 ohci_driver_name, ohci)) {
2169 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2170 pci_disable_msi(dev);
2171 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2172 ohci->config_rom, ohci->config_rom_bus);
2176 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2177 OHCI1394_RQPkt | OHCI1394_RSPkt |
2178 OHCI1394_isochTx | OHCI1394_isochRx |
2179 OHCI1394_postedWriteErr |
2180 OHCI1394_selfIDComplete |
2181 OHCI1394_regAccessFail |
2182 OHCI1394_cycle64Seconds |
2183 OHCI1394_cycleInconsistent |
2184 OHCI1394_unrecoverableError |
2185 OHCI1394_cycleTooLong |
2186 OHCI1394_masterIntEnable;
2187 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2188 irqs |= OHCI1394_busReset;
2189 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2191 reg_write(ohci, OHCI1394_HCControlSet,
2192 OHCI1394_HCControl_linkEnable |
2193 OHCI1394_HCControl_BIBimageValid);
2195 reg_write(ohci, OHCI1394_LinkControlSet,
2196 OHCI1394_LinkControl_rcvSelfID |
2197 OHCI1394_LinkControl_rcvPhyPkt);
2199 ar_context_run(&ohci->ar_request_ctx);
2200 ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
2202 /* We are ready to go, reset bus to finish initialization. */
2203 fw_schedule_bus_reset(&ohci->card, false, true);
2208 static int ohci_set_config_rom(struct fw_card *card,
2209 const __be32 *config_rom, size_t length)
2211 struct fw_ohci *ohci;
2212 unsigned long flags;
2213 __be32 *next_config_rom;
2214 dma_addr_t uninitialized_var(next_config_rom_bus);
2216 ohci = fw_ohci(card);
2219 * When the OHCI controller is enabled, the config rom update
2220 * mechanism is a bit tricky, but easy enough to use. See
2221 * section 5.5.6 in the OHCI specification.
2223 * The OHCI controller caches the new config rom address in a
2224 * shadow register (ConfigROMmapNext) and needs a bus reset
2225 * for the changes to take place. When the bus reset is
2226 * detected, the controller loads the new values for the
2227 * ConfigRomHeader and BusOptions registers from the specified
2228 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2229 * shadow register. All automatically and atomically.
2231 * Now, there's a twist to this story. The automatic load of
2232 * ConfigRomHeader and BusOptions doesn't honor the
2233 * noByteSwapData bit, so with a be32 config rom, the
2234 * controller will load be32 values in to these registers
2235 * during the atomic update, even on litte endian
2236 * architectures. The workaround we use is to put a 0 in the
2237 * header quadlet; 0 is endian agnostic and means that the
2238 * config rom isn't ready yet. In the bus reset tasklet we
2239 * then set up the real values for the two registers.
2241 * We use ohci->lock to avoid racing with the code that sets
2242 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
2246 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2247 &next_config_rom_bus, GFP_KERNEL);
2248 if (next_config_rom == NULL)
2251 spin_lock_irqsave(&ohci->lock, flags);
2254 * If there is not an already pending config_rom update,
2255 * push our new allocation into the ohci->next_config_rom
2256 * and then mark the local variable as null so that we
2257 * won't deallocate the new buffer.
2259 * OTOH, if there is a pending config_rom update, just
2260 * use that buffer with the new config_rom data, and
2261 * let this routine free the unused DMA allocation.
2264 if (ohci->next_config_rom == NULL) {
2265 ohci->next_config_rom = next_config_rom;
2266 ohci->next_config_rom_bus = next_config_rom_bus;
2267 next_config_rom = NULL;
2270 copy_config_rom(ohci->next_config_rom, config_rom, length);
2272 ohci->next_header = config_rom[0];
2273 ohci->next_config_rom[0] = 0;
2275 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2277 spin_unlock_irqrestore(&ohci->lock, flags);
2279 /* If we didn't use the DMA allocation, delete it. */
2280 if (next_config_rom != NULL)
2281 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2282 next_config_rom, next_config_rom_bus);
2285 * Now initiate a bus reset to have the changes take
2286 * effect. We clean up the old config rom memory and DMA
2287 * mappings in the bus reset tasklet, since the OHCI
2288 * controller could need to access it before the bus reset
2292 fw_schedule_bus_reset(&ohci->card, true, true);
2297 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2299 struct fw_ohci *ohci = fw_ohci(card);
2301 at_context_transmit(&ohci->at_request_ctx, packet);
2304 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2306 struct fw_ohci *ohci = fw_ohci(card);
2308 at_context_transmit(&ohci->at_response_ctx, packet);
2311 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2313 struct fw_ohci *ohci = fw_ohci(card);
2314 struct context *ctx = &ohci->at_request_ctx;
2315 struct driver_data *driver_data = packet->driver_data;
2318 tasklet_disable(&ctx->tasklet);
2320 if (packet->ack != 0)
2323 if (packet->payload_mapped)
2324 dma_unmap_single(ohci->card.device, packet->payload_bus,
2325 packet->payload_length, DMA_TO_DEVICE);
2327 log_ar_at_event('T', packet->speed, packet->header, 0x20);
2328 driver_data->packet = NULL;
2329 packet->ack = RCODE_CANCELLED;
2330 packet->callback(packet, &ohci->card, packet->ack);
2333 tasklet_enable(&ctx->tasklet);
2338 static int ohci_enable_phys_dma(struct fw_card *card,
2339 int node_id, int generation)
2341 #ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2344 struct fw_ohci *ohci = fw_ohci(card);
2345 unsigned long flags;
2349 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2350 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2353 spin_lock_irqsave(&ohci->lock, flags);
2355 if (ohci->generation != generation) {
2361 * Note, if the node ID contains a non-local bus ID, physical DMA is
2362 * enabled for _all_ nodes on remote buses.
2365 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2367 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2369 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2373 spin_unlock_irqrestore(&ohci->lock, flags);
2376 #endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
2379 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2381 struct fw_ohci *ohci = fw_ohci(card);
2382 unsigned long flags;
2385 switch (csr_offset) {
2386 case CSR_STATE_CLEAR:
2388 if (ohci->is_root &&
2389 (reg_read(ohci, OHCI1394_LinkControlSet) &
2390 OHCI1394_LinkControl_cycleMaster))
2391 value = CSR_STATE_BIT_CMSTR;
2394 if (ohci->csr_state_setclear_abdicate)
2395 value |= CSR_STATE_BIT_ABDICATE;
2400 return reg_read(ohci, OHCI1394_NodeID) << 16;
2402 case CSR_CYCLE_TIME:
2403 return get_cycle_time(ohci);
2407 * We might be called just after the cycle timer has wrapped
2408 * around but just before the cycle64Seconds handler, so we
2409 * better check here, too, if the bus time needs to be updated.
2411 spin_lock_irqsave(&ohci->lock, flags);
2412 value = update_bus_time(ohci);
2413 spin_unlock_irqrestore(&ohci->lock, flags);
2416 case CSR_BUSY_TIMEOUT:
2417 value = reg_read(ohci, OHCI1394_ATRetries);
2418 return (value >> 4) & 0x0ffff00f;
2420 case CSR_PRIORITY_BUDGET:
2421 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2422 (ohci->pri_req_max << 8);
2430 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2432 struct fw_ohci *ohci = fw_ohci(card);
2433 unsigned long flags;
2435 switch (csr_offset) {
2436 case CSR_STATE_CLEAR:
2437 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2438 reg_write(ohci, OHCI1394_LinkControlClear,
2439 OHCI1394_LinkControl_cycleMaster);
2442 if (value & CSR_STATE_BIT_ABDICATE)
2443 ohci->csr_state_setclear_abdicate = false;
2447 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2448 reg_write(ohci, OHCI1394_LinkControlSet,
2449 OHCI1394_LinkControl_cycleMaster);
2452 if (value & CSR_STATE_BIT_ABDICATE)
2453 ohci->csr_state_setclear_abdicate = true;
2457 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2461 case CSR_CYCLE_TIME:
2462 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2463 reg_write(ohci, OHCI1394_IntEventSet,
2464 OHCI1394_cycleInconsistent);
2469 spin_lock_irqsave(&ohci->lock, flags);
2470 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2471 spin_unlock_irqrestore(&ohci->lock, flags);
2474 case CSR_BUSY_TIMEOUT:
2475 value = (value & 0xf) | ((value & 0xf) << 4) |
2476 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2477 reg_write(ohci, OHCI1394_ATRetries, value);
2481 case CSR_PRIORITY_BUDGET:
2482 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2492 static void copy_iso_headers(struct iso_context *ctx, void *p)
2494 int i = ctx->header_length;
2496 if (i + ctx->base.header_size > PAGE_SIZE)
2500 * The iso header is byteswapped to little endian by
2501 * the controller, but the remaining header quadlets
2502 * are big endian. We want to present all the headers
2503 * as big endian, so we have to swap the first quadlet.
2505 if (ctx->base.header_size > 0)
2506 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
2507 if (ctx->base.header_size > 4)
2508 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
2509 if (ctx->base.header_size > 8)
2510 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
2511 ctx->header_length += ctx->base.header_size;
2514 static int handle_ir_packet_per_buffer(struct context *context,
2515 struct descriptor *d,
2516 struct descriptor *last)
2518 struct iso_context *ctx =
2519 container_of(context, struct iso_context, context);
2520 struct descriptor *pd;
2524 for (pd = d; pd <= last; pd++)
2525 if (pd->transfer_status)
2528 /* Descriptor(s) not done yet, stop iteration */
2532 copy_iso_headers(ctx, p);
2534 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2535 ir_header = (__le32 *) p;
2536 ctx->base.callback.sc(&ctx->base,
2537 le32_to_cpu(ir_header[0]) & 0xffff,
2538 ctx->header_length, ctx->header,
2539 ctx->base.callback_data);
2540 ctx->header_length = 0;
2546 /* d == last because each descriptor block is only a single descriptor. */
2547 static int handle_ir_buffer_fill(struct context *context,
2548 struct descriptor *d,
2549 struct descriptor *last)
2551 struct iso_context *ctx =
2552 container_of(context, struct iso_context, context);
2554 if (!last->transfer_status)
2555 /* Descriptor(s) not done yet, stop iteration */
2558 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2559 ctx->base.callback.mc(&ctx->base,
2560 le32_to_cpu(last->data_address) +
2561 le16_to_cpu(last->req_count) -
2562 le16_to_cpu(last->res_count),
2563 ctx->base.callback_data);
2568 static int handle_it_packet(struct context *context,
2569 struct descriptor *d,
2570 struct descriptor *last)
2572 struct iso_context *ctx =
2573 container_of(context, struct iso_context, context);
2575 struct descriptor *pd;
2577 for (pd = d; pd <= last; pd++)
2578 if (pd->transfer_status)
2581 /* Descriptor(s) not done yet, stop iteration */
2584 i = ctx->header_length;
2585 if (i + 4 < PAGE_SIZE) {
2586 /* Present this value as big-endian to match the receive code */
2587 *(__be32 *)(ctx->header + i) = cpu_to_be32(
2588 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
2589 le16_to_cpu(pd->res_count));
2590 ctx->header_length += 4;
2592 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2593 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2594 ctx->header_length, ctx->header,
2595 ctx->base.callback_data);
2596 ctx->header_length = 0;
2601 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2603 u32 hi = channels >> 32, lo = channels;
2605 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2606 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2607 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2608 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2610 ohci->mc_channels = channels;
2613 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2614 int type, int channel, size_t header_size)
2616 struct fw_ohci *ohci = fw_ohci(card);
2617 struct iso_context *uninitialized_var(ctx);
2618 descriptor_callback_t uninitialized_var(callback);
2619 u64 *uninitialized_var(channels);
2620 u32 *uninitialized_var(mask), uninitialized_var(regs);
2621 unsigned long flags;
2622 int index, ret = -EBUSY;
2624 spin_lock_irqsave(&ohci->lock, flags);
2627 case FW_ISO_CONTEXT_TRANSMIT:
2628 mask = &ohci->it_context_mask;
2629 callback = handle_it_packet;
2630 index = ffs(*mask) - 1;
2632 *mask &= ~(1 << index);
2633 regs = OHCI1394_IsoXmitContextBase(index);
2634 ctx = &ohci->it_context_list[index];
2638 case FW_ISO_CONTEXT_RECEIVE:
2639 channels = &ohci->ir_context_channels;
2640 mask = &ohci->ir_context_mask;
2641 callback = handle_ir_packet_per_buffer;
2642 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2644 *channels &= ~(1ULL << channel);
2645 *mask &= ~(1 << index);
2646 regs = OHCI1394_IsoRcvContextBase(index);
2647 ctx = &ohci->ir_context_list[index];
2651 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2652 mask = &ohci->ir_context_mask;
2653 callback = handle_ir_buffer_fill;
2654 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2656 ohci->mc_allocated = true;
2657 *mask &= ~(1 << index);
2658 regs = OHCI1394_IsoRcvContextBase(index);
2659 ctx = &ohci->ir_context_list[index];
2668 spin_unlock_irqrestore(&ohci->lock, flags);
2671 return ERR_PTR(ret);
2673 memset(ctx, 0, sizeof(*ctx));
2674 ctx->header_length = 0;
2675 ctx->header = (void *) __get_free_page(GFP_KERNEL);
2676 if (ctx->header == NULL) {
2680 ret = context_init(&ctx->context, ohci, regs, callback);
2682 goto out_with_header;
2684 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2685 set_multichannel_mask(ohci, 0);
2690 free_page((unsigned long)ctx->header);
2692 spin_lock_irqsave(&ohci->lock, flags);
2695 case FW_ISO_CONTEXT_RECEIVE:
2696 *channels |= 1ULL << channel;
2699 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2700 ohci->mc_allocated = false;
2703 *mask |= 1 << index;
2705 spin_unlock_irqrestore(&ohci->lock, flags);
2707 return ERR_PTR(ret);
2710 static int ohci_start_iso(struct fw_iso_context *base,
2711 s32 cycle, u32 sync, u32 tags)
2713 struct iso_context *ctx = container_of(base, struct iso_context, base);
2714 struct fw_ohci *ohci = ctx->context.ohci;
2715 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
2718 /* the controller cannot start without any queued packets */
2719 if (ctx->context.last->branch_address == 0)
2722 switch (ctx->base.type) {
2723 case FW_ISO_CONTEXT_TRANSMIT:
2724 index = ctx - ohci->it_context_list;
2727 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
2728 (cycle & 0x7fff) << 16;
2730 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2731 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
2732 context_run(&ctx->context, match);
2735 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2736 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2738 case FW_ISO_CONTEXT_RECEIVE:
2739 index = ctx - ohci->ir_context_list;
2740 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2742 match |= (cycle & 0x07fff) << 12;
2743 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2746 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2747 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
2748 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
2749 context_run(&ctx->context, control);
2760 static int ohci_stop_iso(struct fw_iso_context *base)
2762 struct fw_ohci *ohci = fw_ohci(base->card);
2763 struct iso_context *ctx = container_of(base, struct iso_context, base);
2766 switch (ctx->base.type) {
2767 case FW_ISO_CONTEXT_TRANSMIT:
2768 index = ctx - ohci->it_context_list;
2769 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
2772 case FW_ISO_CONTEXT_RECEIVE:
2773 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2774 index = ctx - ohci->ir_context_list;
2775 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
2779 context_stop(&ctx->context);
2780 tasklet_kill(&ctx->context.tasklet);
2785 static void ohci_free_iso_context(struct fw_iso_context *base)
2787 struct fw_ohci *ohci = fw_ohci(base->card);
2788 struct iso_context *ctx = container_of(base, struct iso_context, base);
2789 unsigned long flags;
2792 ohci_stop_iso(base);
2793 context_release(&ctx->context);
2794 free_page((unsigned long)ctx->header);
2796 spin_lock_irqsave(&ohci->lock, flags);
2798 switch (base->type) {
2799 case FW_ISO_CONTEXT_TRANSMIT:
2800 index = ctx - ohci->it_context_list;
2801 ohci->it_context_mask |= 1 << index;
2804 case FW_ISO_CONTEXT_RECEIVE:
2805 index = ctx - ohci->ir_context_list;
2806 ohci->ir_context_mask |= 1 << index;
2807 ohci->ir_context_channels |= 1ULL << base->channel;
2810 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2811 index = ctx - ohci->ir_context_list;
2812 ohci->ir_context_mask |= 1 << index;
2813 ohci->ir_context_channels |= ohci->mc_channels;
2814 ohci->mc_channels = 0;
2815 ohci->mc_allocated = false;
2819 spin_unlock_irqrestore(&ohci->lock, flags);
2822 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2824 struct fw_ohci *ohci = fw_ohci(base->card);
2825 unsigned long flags;
2828 switch (base->type) {
2829 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2831 spin_lock_irqsave(&ohci->lock, flags);
2833 /* Don't allow multichannel to grab other contexts' channels. */
2834 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
2835 *channels = ohci->ir_context_channels;
2838 set_multichannel_mask(ohci, *channels);
2842 spin_unlock_irqrestore(&ohci->lock, flags);
2853 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
2856 struct iso_context *ctx;
2858 for (i = 0 ; i < ohci->n_ir ; i++) {
2859 ctx = &ohci->ir_context_list[i];
2860 if (ctx->context.running)
2861 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2864 for (i = 0 ; i < ohci->n_it ; i++) {
2865 ctx = &ohci->it_context_list[i];
2866 if (ctx->context.running)
2867 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2872 static int queue_iso_transmit(struct iso_context *ctx,
2873 struct fw_iso_packet *packet,
2874 struct fw_iso_buffer *buffer,
2875 unsigned long payload)
2877 struct descriptor *d, *last, *pd;
2878 struct fw_iso_packet *p;
2880 dma_addr_t d_bus, page_bus;
2881 u32 z, header_z, payload_z, irq;
2882 u32 payload_index, payload_end_index, next_page_index;
2883 int page, end_page, i, length, offset;
2886 payload_index = payload;
2892 if (p->header_length > 0)
2895 /* Determine the first page the payload isn't contained in. */
2896 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2897 if (p->payload_length > 0)
2898 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2904 /* Get header size in number of descriptors. */
2905 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
2907 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2912 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
2913 d[0].req_count = cpu_to_le16(8);
2915 * Link the skip address to this descriptor itself. This causes
2916 * a context to skip a cycle whenever lost cycles or FIFO
2917 * overruns occur, without dropping the data. The application
2918 * should then decide whether this is an error condition or not.
2919 * FIXME: Make the context's cycle-lost behaviour configurable?
2921 d[0].branch_address = cpu_to_le32(d_bus | z);
2923 header = (__le32 *) &d[1];
2924 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2925 IT_HEADER_TAG(p->tag) |
2926 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2927 IT_HEADER_CHANNEL(ctx->base.channel) |
2928 IT_HEADER_SPEED(ctx->base.speed));
2930 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
2931 p->payload_length));
2934 if (p->header_length > 0) {
2935 d[2].req_count = cpu_to_le16(p->header_length);
2936 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
2937 memcpy(&d[z], p->header, p->header_length);
2940 pd = d + z - payload_z;
2941 payload_end_index = payload_index + p->payload_length;
2942 for (i = 0; i < payload_z; i++) {
2943 page = payload_index >> PAGE_SHIFT;
2944 offset = payload_index & ~PAGE_MASK;
2945 next_page_index = (page + 1) << PAGE_SHIFT;
2947 min(next_page_index, payload_end_index) - payload_index;
2948 pd[i].req_count = cpu_to_le16(length);
2950 page_bus = page_private(buffer->pages[page]);
2951 pd[i].data_address = cpu_to_le32(page_bus + offset);
2953 payload_index += length;
2957 irq = DESCRIPTOR_IRQ_ALWAYS;
2959 irq = DESCRIPTOR_NO_IRQ;
2961 last = z == 2 ? d : d + z - 1;
2962 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2964 DESCRIPTOR_BRANCH_ALWAYS |
2967 context_append(&ctx->context, d, z, header_z);
2972 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
2973 struct fw_iso_packet *packet,
2974 struct fw_iso_buffer *buffer,
2975 unsigned long payload)
2977 struct descriptor *d, *pd;
2978 dma_addr_t d_bus, page_bus;
2979 u32 z, header_z, rest;
2981 int page, offset, packet_count, header_size, payload_per_buffer;
2984 * The OHCI controller puts the isochronous header and trailer in the
2985 * buffer, so we need at least 8 bytes.
2987 packet_count = packet->header_length / ctx->base.header_size;
2988 header_size = max(ctx->base.header_size, (size_t)8);
2990 /* Get header size in number of descriptors. */
2991 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2992 page = payload >> PAGE_SHIFT;
2993 offset = payload & ~PAGE_MASK;
2994 payload_per_buffer = packet->payload_length / packet_count;
2996 for (i = 0; i < packet_count; i++) {
2997 /* d points to the header descriptor */
2998 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
2999 d = context_get_descriptors(&ctx->context,
3000 z + header_z, &d_bus);
3004 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3005 DESCRIPTOR_INPUT_MORE);
3006 if (packet->skip && i == 0)
3007 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3008 d->req_count = cpu_to_le16(header_size);
3009 d->res_count = d->req_count;
3010 d->transfer_status = 0;
3011 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3013 rest = payload_per_buffer;
3015 for (j = 1; j < z; j++) {
3017 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3018 DESCRIPTOR_INPUT_MORE);
3020 if (offset + rest < PAGE_SIZE)
3023 length = PAGE_SIZE - offset;
3024 pd->req_count = cpu_to_le16(length);
3025 pd->res_count = pd->req_count;
3026 pd->transfer_status = 0;
3028 page_bus = page_private(buffer->pages[page]);
3029 pd->data_address = cpu_to_le32(page_bus + offset);
3031 offset = (offset + length) & ~PAGE_MASK;
3036 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3037 DESCRIPTOR_INPUT_LAST |
3038 DESCRIPTOR_BRANCH_ALWAYS);
3039 if (packet->interrupt && i == packet_count - 1)
3040 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3042 context_append(&ctx->context, d, z, header_z);
3048 static int queue_iso_buffer_fill(struct iso_context *ctx,
3049 struct fw_iso_packet *packet,
3050 struct fw_iso_buffer *buffer,
3051 unsigned long payload)
3053 struct descriptor *d;
3054 dma_addr_t d_bus, page_bus;
3055 int page, offset, rest, z, i, length;
3057 page = payload >> PAGE_SHIFT;
3058 offset = payload & ~PAGE_MASK;
3059 rest = packet->payload_length;
3061 /* We need one descriptor for each page in the buffer. */
3062 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3064 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3067 for (i = 0; i < z; i++) {
3068 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3072 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3073 DESCRIPTOR_BRANCH_ALWAYS);
3074 if (packet->skip && i == 0)
3075 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3076 if (packet->interrupt && i == z - 1)
3077 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3079 if (offset + rest < PAGE_SIZE)
3082 length = PAGE_SIZE - offset;
3083 d->req_count = cpu_to_le16(length);
3084 d->res_count = d->req_count;
3085 d->transfer_status = 0;
3087 page_bus = page_private(buffer->pages[page]);
3088 d->data_address = cpu_to_le32(page_bus + offset);
3094 context_append(&ctx->context, d, 1, 0);
3100 static int ohci_queue_iso(struct fw_iso_context *base,
3101 struct fw_iso_packet *packet,
3102 struct fw_iso_buffer *buffer,
3103 unsigned long payload)
3105 struct iso_context *ctx = container_of(base, struct iso_context, base);
3106 unsigned long flags;
3109 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3110 switch (base->type) {
3111 case FW_ISO_CONTEXT_TRANSMIT:
3112 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3114 case FW_ISO_CONTEXT_RECEIVE:
3115 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3117 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3118 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3121 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3126 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3128 struct context *ctx =
3129 &container_of(base, struct iso_context, base)->context;
3131 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3132 flush_writes(ctx->ohci);
3135 static const struct fw_card_driver ohci_driver = {
3136 .enable = ohci_enable,
3137 .read_phy_reg = ohci_read_phy_reg,
3138 .update_phy_reg = ohci_update_phy_reg,
3139 .set_config_rom = ohci_set_config_rom,
3140 .send_request = ohci_send_request,
3141 .send_response = ohci_send_response,
3142 .cancel_packet = ohci_cancel_packet,
3143 .enable_phys_dma = ohci_enable_phys_dma,
3144 .read_csr = ohci_read_csr,
3145 .write_csr = ohci_write_csr,
3147 .allocate_iso_context = ohci_allocate_iso_context,
3148 .free_iso_context = ohci_free_iso_context,
3149 .set_iso_channels = ohci_set_iso_channels,
3150 .queue_iso = ohci_queue_iso,
3151 .flush_queue_iso = ohci_flush_queue_iso,
3152 .start_iso = ohci_start_iso,
3153 .stop_iso = ohci_stop_iso,
3156 #ifdef CONFIG_PPC_PMAC
3157 static void pmac_ohci_on(struct pci_dev *dev)
3159 if (machine_is(powermac)) {
3160 struct device_node *ofn = pci_device_to_OF_node(dev);
3163 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3164 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3169 static void pmac_ohci_off(struct pci_dev *dev)
3171 if (machine_is(powermac)) {
3172 struct device_node *ofn = pci_device_to_OF_node(dev);
3175 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3176 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3181 static inline void pmac_ohci_on(struct pci_dev *dev) {}
3182 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3183 #endif /* CONFIG_PPC_PMAC */
3185 static int __devinit pci_probe(struct pci_dev *dev,
3186 const struct pci_device_id *ent)
3188 struct fw_ohci *ohci;
3189 u32 bus_options, max_receive, link_speed, version;
3194 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3195 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3199 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
3205 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3209 err = pci_enable_device(dev);
3211 fw_error("Failed to enable OHCI hardware\n");
3215 pci_set_master(dev);
3216 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3217 pci_set_drvdata(dev, ohci);
3219 spin_lock_init(&ohci->lock);
3220 mutex_init(&ohci->phy_reg_mutex);
3222 tasklet_init(&ohci->bus_reset_tasklet,
3223 bus_reset_tasklet, (unsigned long)ohci);
3225 err = pci_request_region(dev, 0, ohci_driver_name);
3227 fw_error("MMIO resource unavailable\n");
3231 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3232 if (ohci->registers == NULL) {
3233 fw_error("Failed to remap registers\n");
3238 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3239 if ((ohci_quirks[i].vendor == dev->vendor) &&
3240 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3241 ohci_quirks[i].device == dev->device) &&
3242 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3243 ohci_quirks[i].revision >= dev->revision)) {
3244 ohci->quirks = ohci_quirks[i].flags;
3248 ohci->quirks = param_quirks;
3251 * Because dma_alloc_coherent() allocates at least one page,
3252 * we save space by using a common buffer for the AR request/
3253 * response descriptors and the self IDs buffer.
3255 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3256 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3257 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3259 &ohci->misc_buffer_bus,
3261 if (!ohci->misc_buffer) {
3266 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3267 OHCI1394_AsReqRcvContextControlSet);
3271 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3272 OHCI1394_AsRspRcvContextControlSet);
3274 goto fail_arreq_ctx;
3276 err = context_init(&ohci->at_request_ctx, ohci,
3277 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3279 goto fail_arrsp_ctx;
3281 err = context_init(&ohci->at_response_ctx, ohci,
3282 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3284 goto fail_atreq_ctx;
3286 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3287 ohci->ir_context_channels = ~0ULL;
3288 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3289 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3290 ohci->ir_context_mask = ohci->ir_context_support;
3291 ohci->n_ir = hweight32(ohci->ir_context_mask);
3292 size = sizeof(struct iso_context) * ohci->n_ir;
3293 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3295 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3296 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3297 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3298 ohci->it_context_mask = ohci->it_context_support;
3299 ohci->n_it = hweight32(ohci->it_context_mask);
3300 size = sizeof(struct iso_context) * ohci->n_it;
3301 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
3303 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
3308 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
3309 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3311 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3312 max_receive = (bus_options >> 12) & 0xf;
3313 link_speed = bus_options & 0x7;
3314 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3315 reg_read(ohci, OHCI1394_GUIDLo);
3317 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3321 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3322 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3323 "%d IR + %d IT contexts, quirks 0x%x\n",
3324 dev_name(&dev->dev), version >> 16, version & 0xff,
3325 ohci->n_ir, ohci->n_it, ohci->quirks);
3330 kfree(ohci->ir_context_list);
3331 kfree(ohci->it_context_list);
3332 context_release(&ohci->at_response_ctx);
3334 context_release(&ohci->at_request_ctx);
3336 ar_context_release(&ohci->ar_response_ctx);
3338 ar_context_release(&ohci->ar_request_ctx);
3340 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3341 ohci->misc_buffer, ohci->misc_buffer_bus);
3343 pci_iounmap(dev, ohci->registers);
3345 pci_release_region(dev, 0);
3347 pci_disable_device(dev);
3353 fw_error("Out of memory\n");
3358 static void pci_remove(struct pci_dev *dev)
3360 struct fw_ohci *ohci;
3362 ohci = pci_get_drvdata(dev);
3363 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3365 fw_core_remove_card(&ohci->card);
3368 * FIXME: Fail all pending packets here, now that the upper
3369 * layers can't queue any more.
3372 software_reset(ohci);
3373 free_irq(dev->irq, ohci);
3375 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3376 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3377 ohci->next_config_rom, ohci->next_config_rom_bus);
3378 if (ohci->config_rom)
3379 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3380 ohci->config_rom, ohci->config_rom_bus);
3381 ar_context_release(&ohci->ar_request_ctx);
3382 ar_context_release(&ohci->ar_response_ctx);
3383 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3384 ohci->misc_buffer, ohci->misc_buffer_bus);
3385 context_release(&ohci->at_request_ctx);
3386 context_release(&ohci->at_response_ctx);
3387 kfree(ohci->it_context_list);
3388 kfree(ohci->ir_context_list);
3389 pci_disable_msi(dev);
3390 pci_iounmap(dev, ohci->registers);
3391 pci_release_region(dev, 0);
3392 pci_disable_device(dev);
3396 fw_notify("Removed fw-ohci device.\n");
3400 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3402 struct fw_ohci *ohci = pci_get_drvdata(dev);
3405 software_reset(ohci);
3406 free_irq(dev->irq, ohci);
3407 pci_disable_msi(dev);
3408 err = pci_save_state(dev);
3410 fw_error("pci_save_state failed\n");
3413 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3415 fw_error("pci_set_power_state failed with %d\n", err);
3421 static int pci_resume(struct pci_dev *dev)
3423 struct fw_ohci *ohci = pci_get_drvdata(dev);
3427 pci_set_power_state(dev, PCI_D0);
3428 pci_restore_state(dev);
3429 err = pci_enable_device(dev);
3431 fw_error("pci_enable_device failed\n");
3435 /* Some systems don't setup GUID register on resume from ram */
3436 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3437 !reg_read(ohci, OHCI1394_GUIDHi)) {
3438 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3439 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3442 err = ohci_enable(&ohci->card, NULL, 0);
3446 ohci_resume_iso_dma(ohci);
3452 static const struct pci_device_id pci_table[] = {
3453 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3457 MODULE_DEVICE_TABLE(pci, pci_table);
3459 static struct pci_driver fw_ohci_pci_driver = {
3460 .name = ohci_driver_name,
3461 .id_table = pci_table,
3463 .remove = pci_remove,
3465 .resume = pci_resume,
3466 .suspend = pci_suspend,
3470 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3471 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3472 MODULE_LICENSE("GPL");
3474 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3475 #ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3476 MODULE_ALIAS("ohci1394");
3479 static int __init fw_ohci_init(void)
3481 return pci_register_driver(&fw_ohci_pci_driver);
3484 static void __exit fw_ohci_cleanup(void)
3486 pci_unregister_driver(&fw_ohci_pci_driver);
3489 module_init(fw_ohci_init);
3490 module_exit(fw_ohci_cleanup);