1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for OHCI 1394 controllers
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
8 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/firewire.h>
15 #include <linux/firewire-constants.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/vmalloc.h>
32 #include <linux/workqueue.h>
34 #include <asm/byteorder.h>
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
43 #include "packet-header-definitions.h"
45 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
46 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
47 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
49 #define DESCRIPTOR_OUTPUT_MORE 0
50 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
51 #define DESCRIPTOR_INPUT_MORE (2 << 12)
52 #define DESCRIPTOR_INPUT_LAST (3 << 12)
53 #define DESCRIPTOR_STATUS (1 << 11)
54 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
55 #define DESCRIPTOR_PING (1 << 7)
56 #define DESCRIPTOR_YY (1 << 6)
57 #define DESCRIPTOR_NO_IRQ (0 << 4)
58 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
59 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
60 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
61 #define DESCRIPTOR_WAIT (3 << 0)
63 #define DESCRIPTOR_CMD (0xf << 12)
69 __le32 branch_address;
71 __le16 transfer_status;
72 } __attribute__((aligned(16)));
74 #define CONTROL_SET(regs) (regs)
75 #define CONTROL_CLEAR(regs) ((regs) + 4)
76 #define COMMAND_PTR(regs) ((regs) + 12)
77 #define CONTEXT_MATCH(regs) ((regs) + 16)
79 #define AR_BUFFER_SIZE (32*1024)
80 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
81 /* we need at least two pages for proper list management */
82 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
84 #define MAX_ASYNC_PAYLOAD 4096
85 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
86 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
90 struct page *pages[AR_BUFFERS];
92 struct descriptor *descriptors;
93 dma_addr_t descriptors_bus;
95 unsigned int last_buffer_index;
97 struct tasklet_struct tasklet;
102 typedef int (*descriptor_callback_t)(struct context *ctx,
103 struct descriptor *d,
104 struct descriptor *last);
107 * A buffer that contains a block of DMA-able coherent memory used for
108 * storing a portion of a DMA descriptor program.
110 struct descriptor_buffer {
111 struct list_head list;
112 dma_addr_t buffer_bus;
115 struct descriptor buffer[];
119 struct fw_ohci *ohci;
121 int total_allocation;
127 * List of page-sized buffers for storing DMA descriptors.
128 * Head of list contains buffers in use and tail of list contains
131 struct list_head buffer_list;
134 * Pointer to a buffer inside buffer_list that contains the tail
135 * end of the current DMA program.
137 struct descriptor_buffer *buffer_tail;
140 * The descriptor containing the branch address of the first
141 * descriptor that has not yet been filled by the device.
143 struct descriptor *last;
146 * The last descriptor block in the DMA program. It contains the branch
147 * address that must be updated upon appending a new descriptor.
149 struct descriptor *prev;
152 descriptor_callback_t callback;
154 struct tasklet_struct tasklet;
157 #define IT_HEADER_SY(v) ((v) << 0)
158 #define IT_HEADER_TCODE(v) ((v) << 4)
159 #define IT_HEADER_CHANNEL(v) ((v) << 8)
160 #define IT_HEADER_TAG(v) ((v) << 14)
161 #define IT_HEADER_SPEED(v) ((v) << 16)
162 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
165 struct fw_iso_context base;
166 struct context context;
168 size_t header_length;
169 unsigned long flushing_completions;
177 #define CONFIG_ROM_SIZE 1024
182 __iomem char *registers;
185 int request_generation; /* for timestamping incoming requests */
187 unsigned int pri_req_max;
189 bool bus_time_running;
191 bool csr_state_setclear_abdicate;
195 * Spinlock for accessing fw_ohci data. Never call out of
196 * this driver with this lock held.
200 struct mutex phy_reg_mutex;
203 dma_addr_t misc_buffer_bus;
205 struct ar_context ar_request_ctx;
206 struct ar_context ar_response_ctx;
207 struct context at_request_ctx;
208 struct context at_response_ctx;
210 u32 it_context_support;
211 u32 it_context_mask; /* unoccupied IT contexts */
212 struct iso_context *it_context_list;
213 u64 ir_context_channels; /* unoccupied channels */
214 u32 ir_context_support;
215 u32 ir_context_mask; /* unoccupied IR contexts */
216 struct iso_context *ir_context_list;
217 u64 mc_channels; /* channels in use by the multichannel IR context */
221 dma_addr_t config_rom_bus;
222 __be32 *next_config_rom;
223 dma_addr_t next_config_rom_bus;
227 dma_addr_t self_id_bus;
228 struct work_struct bus_reset_work;
230 u32 self_id_buffer[512];
233 static struct workqueue_struct *selfid_workqueue;
235 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
237 return container_of(card, struct fw_ohci, card);
240 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
241 #define IR_CONTEXT_BUFFER_FILL 0x80000000
242 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
243 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
244 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
245 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
247 #define CONTEXT_RUN 0x8000
248 #define CONTEXT_WAKE 0x1000
249 #define CONTEXT_DEAD 0x0800
250 #define CONTEXT_ACTIVE 0x0400
252 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
253 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
254 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
256 #define OHCI1394_REGISTER_SIZE 0x800
257 #define OHCI1394_PCI_HCI_Control 0x40
258 #define SELF_ID_BUF_SIZE 0x800
259 #define OHCI_TCODE_PHY_PACKET 0x0e
260 #define OHCI_VERSION_1_1 0x010010
262 static char ohci_driver_name[] = KBUILD_MODNAME;
264 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
265 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
266 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
267 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
268 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
269 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
270 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
271 #define PCI_DEVICE_ID_VIA_VT630X 0x3044
272 #define PCI_REV_ID_VIA_VT6306 0x46
273 #define PCI_DEVICE_ID_VIA_VT6315 0x3403
275 #define QUIRK_CYCLE_TIMER 0x1
276 #define QUIRK_RESET_PACKET 0x2
277 #define QUIRK_BE_HEADERS 0x4
278 #define QUIRK_NO_1394A 0x8
279 #define QUIRK_NO_MSI 0x10
280 #define QUIRK_TI_SLLZ059 0x20
281 #define QUIRK_IR_WAKE 0x40
283 // On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
284 // ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
285 // (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
286 // clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
287 // while it is probable due to detection of any type of PCIe error.
288 #define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
290 #if IS_ENABLED(CONFIG_X86)
292 static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
294 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
297 #define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
299 static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
301 const struct pci_dev *pcie_to_pci_bridge;
303 // Detect any type of AMD Ryzen machine.
304 if (!static_cpu_has(X86_FEATURE_ZEN))
307 // Detect VIA VT6306/6307/6308.
308 if (pdev->vendor != PCI_VENDOR_ID_VIA)
310 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
313 // Detect Asmedia ASM1083/1085.
314 pcie_to_pci_bridge = pdev->bus->self;
315 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
317 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
324 #define has_reboot_by_cycle_timer_read_quirk(ohci) false
325 #define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
328 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
329 static const struct {
330 unsigned short vendor, device, revision, flags;
332 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
335 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
338 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
341 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
344 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
347 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
350 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
353 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
354 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
356 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
357 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
359 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
360 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
362 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
363 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
365 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
368 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
369 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
371 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
372 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
374 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
377 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
378 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
381 /* This overrides anything that was found in ohci_quirks[]. */
382 static int param_quirks;
383 module_param_named(quirks, param_quirks, int, 0644);
384 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
385 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
386 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
387 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
388 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
389 ", disable MSI = " __stringify(QUIRK_NO_MSI)
390 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
391 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
394 #define OHCI_PARAM_DEBUG_AT_AR 1
395 #define OHCI_PARAM_DEBUG_SELFIDS 2
396 #define OHCI_PARAM_DEBUG_IRQS 4
398 static int param_debug;
399 module_param_named(debug, param_debug, int, 0644);
400 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
401 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
402 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
403 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
404 ", or a combination, or all = -1)");
406 static bool param_remote_dma;
407 module_param_named(remote_dma, param_remote_dma, bool, 0444);
408 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
410 static void log_irqs(struct fw_ohci *ohci, u32 evt)
412 if (likely(!(param_debug & OHCI_PARAM_DEBUG_IRQS)))
415 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
416 evt & OHCI1394_selfIDComplete ? " selfID" : "",
417 evt & OHCI1394_RQPkt ? " AR_req" : "",
418 evt & OHCI1394_RSPkt ? " AR_resp" : "",
419 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
420 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
421 evt & OHCI1394_isochRx ? " IR" : "",
422 evt & OHCI1394_isochTx ? " IT" : "",
423 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
424 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
425 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
426 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
427 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
428 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
429 evt & OHCI1394_busReset ? " busReset" : "",
430 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
431 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
432 OHCI1394_respTxComplete | OHCI1394_isochRx |
433 OHCI1394_isochTx | OHCI1394_postedWriteErr |
434 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
435 OHCI1394_cycleInconsistent |
436 OHCI1394_regAccessFail | OHCI1394_busReset)
440 static const char *speed[] = {
441 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
443 static const char *power[] = {
444 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
445 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
447 static const char port[] = { '.', '-', 'p', 'c', };
449 static char _p(u32 *s, int shift)
451 return port[*s >> shift & 3];
454 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
458 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
461 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
462 self_id_count, generation, ohci->node_id);
464 for (s = ohci->self_id_buffer; self_id_count--; ++s)
465 if ((*s & 1 << 23) == 0)
467 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
468 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
469 speed[*s >> 14 & 3], *s >> 16 & 63,
470 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
471 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
474 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
476 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
477 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
480 static const char *evts[] = {
481 [0x00] = "evt_no_status", [0x01] = "-reserved-",
482 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
483 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
484 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
485 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
486 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
487 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
488 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
489 [0x10] = "-reserved-", [0x11] = "ack_complete",
490 [0x12] = "ack_pending ", [0x13] = "-reserved-",
491 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
492 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
493 [0x18] = "-reserved-", [0x19] = "-reserved-",
494 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
495 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
496 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
497 [0x20] = "pending/cancelled",
499 static const char *tcodes[] = {
500 [0x0] = "QW req", [0x1] = "BW req",
501 [0x2] = "W resp", [0x3] = "-reserved-",
502 [0x4] = "QR req", [0x5] = "BR req",
503 [0x6] = "QR resp", [0x7] = "BR resp",
504 [0x8] = "cycle start", [0x9] = "Lk req",
505 [0xa] = "async stream packet", [0xb] = "Lk resp",
506 [0xc] = "-reserved-", [0xd] = "-reserved-",
507 [0xe] = "link internal", [0xf] = "-reserved-",
510 static void log_ar_at_event(struct fw_ohci *ohci,
511 char dir, int speed, u32 *header, int evt)
513 int tcode = async_header_get_tcode(header);
516 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
519 if (unlikely(evt >= ARRAY_SIZE(evts)))
522 if (evt == OHCI1394_evt_bus_reset) {
523 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
524 dir, (header[2] >> 16) & 0xff);
529 case TCODE_WRITE_QUADLET_REQUEST:
530 case TCODE_READ_QUADLET_RESPONSE:
531 case TCODE_CYCLE_START:
532 snprintf(specific, sizeof(specific), " = %08x",
533 be32_to_cpu((__force __be32)header[3]));
535 case TCODE_WRITE_BLOCK_REQUEST:
536 case TCODE_READ_BLOCK_REQUEST:
537 case TCODE_READ_BLOCK_RESPONSE:
538 case TCODE_LOCK_REQUEST:
539 case TCODE_LOCK_RESPONSE:
540 snprintf(specific, sizeof(specific), " %x,%x",
541 async_header_get_data_length(header),
542 async_header_get_extended_tcode(header));
549 case TCODE_STREAM_DATA:
550 ohci_notice(ohci, "A%c %s, %s\n",
551 dir, evts[evt], tcodes[tcode]);
554 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
555 dir, evts[evt], header[1], header[2]);
557 case TCODE_WRITE_QUADLET_REQUEST:
558 case TCODE_WRITE_BLOCK_REQUEST:
559 case TCODE_READ_QUADLET_REQUEST:
560 case TCODE_READ_BLOCK_REQUEST:
561 case TCODE_LOCK_REQUEST:
563 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %012llx%s\n",
564 dir, speed, async_header_get_tlabel(header),
565 async_header_get_source(header), async_header_get_destination(header),
566 evts[evt], tcodes[tcode], async_header_get_offset(header), specific);
570 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
571 dir, speed, async_header_get_tlabel(header),
572 async_header_get_source(header), async_header_get_destination(header),
573 evts[evt], tcodes[tcode], specific);
577 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
579 writel(data, ohci->registers + offset);
582 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
584 return readl(ohci->registers + offset);
587 static inline void flush_writes(const struct fw_ohci *ohci)
589 /* Do a dummy read to flush writes. */
590 reg_read(ohci, OHCI1394_Version);
594 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
595 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
596 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
597 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
599 static int read_phy_reg(struct fw_ohci *ohci, int addr)
604 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
605 for (i = 0; i < 3 + 100; i++) {
606 val = reg_read(ohci, OHCI1394_PhyControl);
608 return -ENODEV; /* Card was ejected. */
610 if (val & OHCI1394_PhyControl_ReadDone)
611 return OHCI1394_PhyControl_ReadData(val);
614 * Try a few times without waiting. Sleeping is necessary
615 * only when the link/PHY interface is busy.
620 ohci_err(ohci, "failed to read phy reg %d\n", addr);
626 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
630 reg_write(ohci, OHCI1394_PhyControl,
631 OHCI1394_PhyControl_Write(addr, val));
632 for (i = 0; i < 3 + 100; i++) {
633 val = reg_read(ohci, OHCI1394_PhyControl);
635 return -ENODEV; /* Card was ejected. */
637 if (!(val & OHCI1394_PhyControl_WritePending))
643 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
649 static int update_phy_reg(struct fw_ohci *ohci, int addr,
650 int clear_bits, int set_bits)
652 int ret = read_phy_reg(ohci, addr);
657 * The interrupt status bits are cleared by writing a one bit.
658 * Avoid clearing them unless explicitly requested in set_bits.
661 clear_bits |= PHY_INT_STATUS_BITS;
663 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
666 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
670 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
674 return read_phy_reg(ohci, addr);
677 static int ohci_read_phy_reg(struct fw_card *card, int addr)
679 struct fw_ohci *ohci = fw_ohci(card);
682 mutex_lock(&ohci->phy_reg_mutex);
683 ret = read_phy_reg(ohci, addr);
684 mutex_unlock(&ohci->phy_reg_mutex);
689 static int ohci_update_phy_reg(struct fw_card *card, int addr,
690 int clear_bits, int set_bits)
692 struct fw_ohci *ohci = fw_ohci(card);
695 mutex_lock(&ohci->phy_reg_mutex);
696 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
697 mutex_unlock(&ohci->phy_reg_mutex);
702 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
704 return page_private(ctx->pages[i]);
707 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
709 struct descriptor *d;
711 d = &ctx->descriptors[index];
712 d->branch_address &= cpu_to_le32(~0xf);
713 d->res_count = cpu_to_le16(PAGE_SIZE);
714 d->transfer_status = 0;
716 wmb(); /* finish init of new descriptors before branch_address update */
717 d = &ctx->descriptors[ctx->last_buffer_index];
718 d->branch_address |= cpu_to_le32(1);
720 ctx->last_buffer_index = index;
722 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
725 static void ar_context_release(struct ar_context *ctx)
727 struct device *dev = ctx->ohci->card.device;
735 for (i = 0; i < AR_BUFFERS; i++) {
737 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
738 ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
742 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
744 struct fw_ohci *ohci = ctx->ohci;
746 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
747 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
750 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
752 /* FIXME: restart? */
755 static inline unsigned int ar_next_buffer_index(unsigned int index)
757 return (index + 1) % AR_BUFFERS;
760 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
762 return ar_next_buffer_index(ctx->last_buffer_index);
766 * We search for the buffer that contains the last AR packet DMA data written
769 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
770 unsigned int *buffer_offset)
772 unsigned int i, next_i, last = ctx->last_buffer_index;
773 __le16 res_count, next_res_count;
775 i = ar_first_buffer_index(ctx);
776 res_count = READ_ONCE(ctx->descriptors[i].res_count);
778 /* A buffer that is not yet completely filled must be the last one. */
779 while (i != last && res_count == 0) {
781 /* Peek at the next descriptor. */
782 next_i = ar_next_buffer_index(i);
783 rmb(); /* read descriptors in order */
784 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
786 * If the next descriptor is still empty, we must stop at this
789 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
791 * The exception is when the DMA data for one packet is
792 * split over three buffers; in this case, the middle
793 * buffer's descriptor might be never updated by the
794 * controller and look still empty, and we have to peek
797 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
798 next_i = ar_next_buffer_index(next_i);
800 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
801 if (next_res_count != cpu_to_le16(PAGE_SIZE))
802 goto next_buffer_is_active;
808 next_buffer_is_active:
810 res_count = next_res_count;
813 rmb(); /* read res_count before the DMA data */
815 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
816 if (*buffer_offset > PAGE_SIZE) {
818 ar_context_abort(ctx, "corrupted descriptor");
824 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
825 unsigned int end_buffer_index,
826 unsigned int end_buffer_offset)
830 i = ar_first_buffer_index(ctx);
831 while (i != end_buffer_index) {
832 dma_sync_single_for_cpu(ctx->ohci->card.device,
833 ar_buffer_bus(ctx, i),
834 PAGE_SIZE, DMA_FROM_DEVICE);
835 i = ar_next_buffer_index(i);
837 if (end_buffer_offset > 0)
838 dma_sync_single_for_cpu(ctx->ohci->card.device,
839 ar_buffer_bus(ctx, i),
840 end_buffer_offset, DMA_FROM_DEVICE);
843 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
844 #define cond_le32_to_cpu(v) \
845 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
847 #define cond_le32_to_cpu(v) le32_to_cpu(v)
850 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
852 struct fw_ohci *ohci = ctx->ohci;
854 u32 status, length, tcode;
857 p.header[0] = cond_le32_to_cpu(buffer[0]);
858 p.header[1] = cond_le32_to_cpu(buffer[1]);
859 p.header[2] = cond_le32_to_cpu(buffer[2]);
861 tcode = async_header_get_tcode(p.header);
863 case TCODE_WRITE_QUADLET_REQUEST:
864 case TCODE_READ_QUADLET_RESPONSE:
865 p.header[3] = (__force __u32) buffer[3];
866 p.header_length = 16;
867 p.payload_length = 0;
870 case TCODE_READ_BLOCK_REQUEST :
871 p.header[3] = cond_le32_to_cpu(buffer[3]);
872 p.header_length = 16;
873 p.payload_length = 0;
876 case TCODE_WRITE_BLOCK_REQUEST:
877 case TCODE_READ_BLOCK_RESPONSE:
878 case TCODE_LOCK_REQUEST:
879 case TCODE_LOCK_RESPONSE:
880 p.header[3] = cond_le32_to_cpu(buffer[3]);
881 p.header_length = 16;
882 p.payload_length = async_header_get_data_length(p.header);
883 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
884 ar_context_abort(ctx, "invalid packet length");
889 case TCODE_WRITE_RESPONSE:
890 case TCODE_READ_QUADLET_REQUEST:
891 case OHCI_TCODE_PHY_PACKET:
892 p.header_length = 12;
893 p.payload_length = 0;
897 ar_context_abort(ctx, "invalid tcode");
901 p.payload = (void *) buffer + p.header_length;
903 /* FIXME: What to do about evt_* errors? */
904 length = (p.header_length + p.payload_length + 3) / 4;
905 status = cond_le32_to_cpu(buffer[length]);
906 evt = (status >> 16) & 0x1f;
909 p.speed = (status >> 21) & 0x7;
910 p.timestamp = status & 0xffff;
911 p.generation = ohci->request_generation;
913 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
916 * Several controllers, notably from NEC and VIA, forget to
917 * write ack_complete status at PHY packet reception.
919 if (evt == OHCI1394_evt_no_status && tcode == OHCI1394_phy_tcode)
920 p.ack = ACK_COMPLETE;
923 * The OHCI bus reset handler synthesizes a PHY packet with
924 * the new generation number when a bus reset happens (see
925 * section 8.4.2.3). This helps us determine when a request
926 * was received and make sure we send the response in the same
927 * generation. We only need this for requests; for responses
928 * we use the unique tlabel for finding the matching
931 * Alas some chips sometimes emit bus reset packets with a
932 * wrong generation. We set the correct generation for these
933 * at a slightly incorrect time (in bus_reset_work).
935 if (evt == OHCI1394_evt_bus_reset) {
936 if (!(ohci->quirks & QUIRK_RESET_PACKET))
937 ohci->request_generation = (p.header[2] >> 16) & 0xff;
938 } else if (ctx == &ohci->ar_request_ctx) {
939 fw_core_handle_request(&ohci->card, &p);
941 fw_core_handle_response(&ohci->card, &p);
944 return buffer + length + 1;
947 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
952 next = handle_ar_packet(ctx, p);
961 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
965 i = ar_first_buffer_index(ctx);
966 while (i != end_buffer) {
967 dma_sync_single_for_device(ctx->ohci->card.device,
968 ar_buffer_bus(ctx, i),
969 PAGE_SIZE, DMA_FROM_DEVICE);
970 ar_context_link_page(ctx, i);
971 i = ar_next_buffer_index(i);
975 static void ar_context_tasklet(unsigned long data)
977 struct ar_context *ctx = (struct ar_context *)data;
978 unsigned int end_buffer_index, end_buffer_offset;
985 end_buffer_index = ar_search_last_active_buffer(ctx,
987 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
988 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
990 if (end_buffer_index < ar_first_buffer_index(ctx)) {
992 * The filled part of the overall buffer wraps around; handle
993 * all packets up to the buffer end here. If the last packet
994 * wraps around, its tail will be visible after the buffer end
995 * because the buffer start pages are mapped there again.
997 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
998 p = handle_ar_packets(ctx, p, buffer_end);
1001 /* adjust p to point back into the actual buffer */
1002 p -= AR_BUFFERS * PAGE_SIZE;
1005 p = handle_ar_packets(ctx, p, end);
1008 ar_context_abort(ctx, "inconsistent descriptor");
1013 ar_recycle_buffers(ctx, end_buffer_index);
1018 ctx->pointer = NULL;
1021 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
1022 unsigned int descriptors_offset, u32 regs)
1024 struct device *dev = ohci->card.device;
1026 dma_addr_t dma_addr;
1027 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
1028 struct descriptor *d;
1032 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
1034 for (i = 0; i < AR_BUFFERS; i++) {
1035 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
1036 DMA_FROM_DEVICE, GFP_KERNEL);
1039 set_page_private(ctx->pages[i], dma_addr);
1040 dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
1044 for (i = 0; i < AR_BUFFERS; i++)
1045 pages[i] = ctx->pages[i];
1046 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1047 pages[AR_BUFFERS + i] = ctx->pages[i];
1048 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1052 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1053 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1055 for (i = 0; i < AR_BUFFERS; i++) {
1056 d = &ctx->descriptors[i];
1057 d->req_count = cpu_to_le16(PAGE_SIZE);
1058 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1060 DESCRIPTOR_BRANCH_ALWAYS);
1061 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1062 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1063 ar_next_buffer_index(i) * sizeof(struct descriptor));
1069 ar_context_release(ctx);
1074 static void ar_context_run(struct ar_context *ctx)
1078 for (i = 0; i < AR_BUFFERS; i++)
1079 ar_context_link_page(ctx, i);
1081 ctx->pointer = ctx->buffer;
1083 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1084 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1087 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1091 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1093 /* figure out which descriptor the branch address goes in */
1094 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1100 static void context_tasklet(unsigned long data)
1102 struct context *ctx = (struct context *) data;
1103 struct descriptor *d, *last;
1106 struct descriptor_buffer *desc;
1108 desc = list_entry(ctx->buffer_list.next,
1109 struct descriptor_buffer, list);
1111 while (last->branch_address != 0) {
1112 struct descriptor_buffer *old_desc = desc;
1113 address = le32_to_cpu(last->branch_address);
1116 ctx->current_bus = address;
1118 /* If the branch address points to a buffer outside of the
1119 * current buffer, advance to the next buffer. */
1120 if (address < desc->buffer_bus ||
1121 address >= desc->buffer_bus + desc->used)
1122 desc = list_entry(desc->list.next,
1123 struct descriptor_buffer, list);
1124 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1125 last = find_branch_descriptor(d, z);
1127 if (!ctx->callback(ctx, d, last))
1130 if (old_desc != desc) {
1131 /* If we've advanced to the next buffer, move the
1132 * previous buffer to the free list. */
1133 unsigned long flags;
1135 spin_lock_irqsave(&ctx->ohci->lock, flags);
1136 list_move_tail(&old_desc->list, &ctx->buffer_list);
1137 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1144 * Allocate a new buffer and add it to the list of free buffers for this
1145 * context. Must be called with ohci->lock held.
1147 static int context_add_buffer(struct context *ctx)
1149 struct descriptor_buffer *desc;
1150 dma_addr_t bus_addr;
1154 * 16MB of descriptors should be far more than enough for any DMA
1155 * program. This will catch run-away userspace or DoS attacks.
1157 if (ctx->total_allocation >= 16*1024*1024)
1160 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1164 offset = (void *)&desc->buffer - (void *)desc;
1166 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1167 * for descriptors, even 0x10-byte ones. This can cause page faults when
1168 * an IOMMU is in use and the oversized read crosses a page boundary.
1169 * Work around this by always leaving at least 0x10 bytes of padding.
1171 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1172 desc->buffer_bus = bus_addr + offset;
1175 list_add_tail(&desc->list, &ctx->buffer_list);
1176 ctx->total_allocation += PAGE_SIZE;
1181 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1182 u32 regs, descriptor_callback_t callback)
1186 ctx->total_allocation = 0;
1188 INIT_LIST_HEAD(&ctx->buffer_list);
1189 if (context_add_buffer(ctx) < 0)
1192 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1193 struct descriptor_buffer, list);
1195 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1196 ctx->callback = callback;
1199 * We put a dummy descriptor in the buffer that has a NULL
1200 * branch address and looks like it's been sent. That way we
1201 * have a descriptor to append DMA programs to.
1203 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1204 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1205 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1206 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1207 ctx->last = ctx->buffer_tail->buffer;
1208 ctx->prev = ctx->buffer_tail->buffer;
1214 static void context_release(struct context *ctx)
1216 struct fw_card *card = &ctx->ohci->card;
1217 struct descriptor_buffer *desc, *tmp;
1219 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1220 dmam_free_coherent(card->device, PAGE_SIZE, desc,
1221 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1225 /* Must be called with ohci->lock held */
1226 static struct descriptor *context_get_descriptors(struct context *ctx,
1227 int z, dma_addr_t *d_bus)
1229 struct descriptor *d = NULL;
1230 struct descriptor_buffer *desc = ctx->buffer_tail;
1232 if (z * sizeof(*d) > desc->buffer_size)
1235 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1236 /* No room for the descriptor in this buffer, so advance to the
1239 if (desc->list.next == &ctx->buffer_list) {
1240 /* If there is no free buffer next in the list,
1242 if (context_add_buffer(ctx) < 0)
1245 desc = list_entry(desc->list.next,
1246 struct descriptor_buffer, list);
1247 ctx->buffer_tail = desc;
1250 d = desc->buffer + desc->used / sizeof(*d);
1251 memset(d, 0, z * sizeof(*d));
1252 *d_bus = desc->buffer_bus + desc->used;
1257 static void context_run(struct context *ctx, u32 extra)
1259 struct fw_ohci *ohci = ctx->ohci;
1261 reg_write(ohci, COMMAND_PTR(ctx->regs),
1262 le32_to_cpu(ctx->last->branch_address));
1263 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1264 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1265 ctx->running = true;
1269 static void context_append(struct context *ctx,
1270 struct descriptor *d, int z, int extra)
1273 struct descriptor_buffer *desc = ctx->buffer_tail;
1274 struct descriptor *d_branch;
1276 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1278 desc->used += (z + extra) * sizeof(*d);
1280 wmb(); /* finish init of new descriptors before branch_address update */
1282 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1283 d_branch->branch_address = cpu_to_le32(d_bus | z);
1286 * VT6306 incorrectly checks only the single descriptor at the
1287 * CommandPtr when the wake bit is written, so if it's a
1288 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1289 * the branch address in the first descriptor.
1291 * Not doing this for transmit contexts since not sure how it interacts
1292 * with skip addresses.
1294 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1295 d_branch != ctx->prev &&
1296 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1297 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1298 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1305 static void context_stop(struct context *ctx)
1307 struct fw_ohci *ohci = ctx->ohci;
1311 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1312 ctx->running = false;
1314 for (i = 0; i < 1000; i++) {
1315 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1316 if ((reg & CONTEXT_ACTIVE) == 0)
1322 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1325 struct driver_data {
1327 struct fw_packet *packet;
1331 * This function apppends a packet to the DMA queue for transmission.
1332 * Must always be called with the ochi->lock held to ensure proper
1333 * generation handling and locking around packet queue manipulation.
1335 static int at_context_queue_packet(struct context *ctx,
1336 struct fw_packet *packet)
1338 struct fw_ohci *ohci = ctx->ohci;
1339 dma_addr_t d_bus, payload_bus;
1340 struct driver_data *driver_data;
1341 struct descriptor *d, *last;
1345 d = context_get_descriptors(ctx, 4, &d_bus);
1347 packet->ack = RCODE_SEND_ERROR;
1351 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1352 d[0].res_count = cpu_to_le16(packet->timestamp);
1355 * The DMA format for asynchronous link packets is different
1356 * from the IEEE1394 layout, so shift the fields around
1360 tcode = async_header_get_tcode(packet->header);
1361 header = (__le32 *) &d[1];
1363 case TCODE_WRITE_QUADLET_REQUEST:
1364 case TCODE_WRITE_BLOCK_REQUEST:
1365 case TCODE_WRITE_RESPONSE:
1366 case TCODE_READ_QUADLET_REQUEST:
1367 case TCODE_READ_BLOCK_REQUEST:
1368 case TCODE_READ_QUADLET_RESPONSE:
1369 case TCODE_READ_BLOCK_RESPONSE:
1370 case TCODE_LOCK_REQUEST:
1371 case TCODE_LOCK_RESPONSE:
1372 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1373 (packet->speed << 16));
1374 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1375 (packet->header[0] & 0xffff0000));
1376 header[2] = cpu_to_le32(packet->header[2]);
1378 if (tcode_is_block_packet(tcode))
1379 header[3] = cpu_to_le32(packet->header[3]);
1381 header[3] = (__force __le32) packet->header[3];
1383 d[0].req_count = cpu_to_le16(packet->header_length);
1386 case TCODE_LINK_INTERNAL:
1387 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1388 (packet->speed << 16));
1389 header[1] = cpu_to_le32(packet->header[1]);
1390 header[2] = cpu_to_le32(packet->header[2]);
1391 d[0].req_count = cpu_to_le16(12);
1393 if (is_ping_packet(&packet->header[1]))
1394 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1397 case TCODE_STREAM_DATA:
1398 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1399 (packet->speed << 16));
1400 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1401 d[0].req_count = cpu_to_le16(8);
1406 packet->ack = RCODE_SEND_ERROR;
1410 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1411 driver_data = (struct driver_data *) &d[3];
1412 driver_data->packet = packet;
1413 packet->driver_data = driver_data;
1415 if (packet->payload_length > 0) {
1416 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1417 payload_bus = dma_map_single(ohci->card.device,
1419 packet->payload_length,
1421 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1422 packet->ack = RCODE_SEND_ERROR;
1425 packet->payload_bus = payload_bus;
1426 packet->payload_mapped = true;
1428 memcpy(driver_data->inline_data, packet->payload,
1429 packet->payload_length);
1430 payload_bus = d_bus + 3 * sizeof(*d);
1433 d[2].req_count = cpu_to_le16(packet->payload_length);
1434 d[2].data_address = cpu_to_le32(payload_bus);
1442 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1443 DESCRIPTOR_IRQ_ALWAYS |
1444 DESCRIPTOR_BRANCH_ALWAYS);
1446 /* FIXME: Document how the locking works. */
1447 if (ohci->generation != packet->generation) {
1448 if (packet->payload_mapped)
1449 dma_unmap_single(ohci->card.device, payload_bus,
1450 packet->payload_length, DMA_TO_DEVICE);
1451 packet->ack = RCODE_GENERATION;
1455 context_append(ctx, d, z, 4 - z);
1458 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1460 context_run(ctx, 0);
1465 static void at_context_flush(struct context *ctx)
1467 tasklet_disable(&ctx->tasklet);
1469 ctx->flushing = true;
1470 context_tasklet((unsigned long)ctx);
1471 ctx->flushing = false;
1473 tasklet_enable(&ctx->tasklet);
1476 static int handle_at_packet(struct context *context,
1477 struct descriptor *d,
1478 struct descriptor *last)
1480 struct driver_data *driver_data;
1481 struct fw_packet *packet;
1482 struct fw_ohci *ohci = context->ohci;
1485 if (last->transfer_status == 0 && !context->flushing)
1486 /* This descriptor isn't done yet, stop iteration. */
1489 driver_data = (struct driver_data *) &d[3];
1490 packet = driver_data->packet;
1492 /* This packet was cancelled, just continue. */
1495 if (packet->payload_mapped)
1496 dma_unmap_single(ohci->card.device, packet->payload_bus,
1497 packet->payload_length, DMA_TO_DEVICE);
1499 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1500 packet->timestamp = le16_to_cpu(last->res_count);
1502 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1505 case OHCI1394_evt_timeout:
1506 /* Async response transmit timed out. */
1507 packet->ack = RCODE_CANCELLED;
1510 case OHCI1394_evt_flushed:
1512 * The packet was flushed should give same error as
1513 * when we try to use a stale generation count.
1515 packet->ack = RCODE_GENERATION;
1518 case OHCI1394_evt_missing_ack:
1519 if (context->flushing)
1520 packet->ack = RCODE_GENERATION;
1523 * Using a valid (current) generation count, but the
1524 * node is not on the bus or not sending acks.
1526 packet->ack = RCODE_NO_ACK;
1530 case ACK_COMPLETE + 0x10:
1531 case ACK_PENDING + 0x10:
1532 case ACK_BUSY_X + 0x10:
1533 case ACK_BUSY_A + 0x10:
1534 case ACK_BUSY_B + 0x10:
1535 case ACK_DATA_ERROR + 0x10:
1536 case ACK_TYPE_ERROR + 0x10:
1537 packet->ack = evt - 0x10;
1540 case OHCI1394_evt_no_status:
1541 if (context->flushing) {
1542 packet->ack = RCODE_GENERATION;
1548 packet->ack = RCODE_SEND_ERROR;
1552 packet->callback(packet, &ohci->card, packet->ack);
1557 static u32 get_cycle_time(struct fw_ohci *ohci);
1559 static void handle_local_rom(struct fw_ohci *ohci,
1560 struct fw_packet *packet, u32 csr)
1562 struct fw_packet response;
1563 int tcode, length, i;
1565 tcode = async_header_get_tcode(packet->header);
1566 if (tcode_is_block_packet(tcode))
1567 length = async_header_get_data_length(packet->header);
1571 i = csr - CSR_CONFIG_ROM;
1572 if (i + length > CONFIG_ROM_SIZE) {
1573 fw_fill_response(&response, packet->header,
1574 RCODE_ADDRESS_ERROR, NULL, 0);
1575 } else if (!tcode_is_read_request(tcode)) {
1576 fw_fill_response(&response, packet->header,
1577 RCODE_TYPE_ERROR, NULL, 0);
1579 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1580 (void *) ohci->config_rom + i, length);
1583 // Timestamping on behalf of the hardware.
1584 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1585 fw_core_handle_response(&ohci->card, &response);
1588 static void handle_local_lock(struct fw_ohci *ohci,
1589 struct fw_packet *packet, u32 csr)
1591 struct fw_packet response;
1592 int tcode, length, ext_tcode, sel, try;
1593 __be32 *payload, lock_old;
1594 u32 lock_arg, lock_data;
1596 tcode = async_header_get_tcode(packet->header);
1597 length = async_header_get_data_length(packet->header);
1598 payload = packet->payload;
1599 ext_tcode = async_header_get_extended_tcode(packet->header);
1601 if (tcode == TCODE_LOCK_REQUEST &&
1602 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1603 lock_arg = be32_to_cpu(payload[0]);
1604 lock_data = be32_to_cpu(payload[1]);
1605 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1609 fw_fill_response(&response, packet->header,
1610 RCODE_TYPE_ERROR, NULL, 0);
1614 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1615 reg_write(ohci, OHCI1394_CSRData, lock_data);
1616 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1617 reg_write(ohci, OHCI1394_CSRControl, sel);
1619 for (try = 0; try < 20; try++)
1620 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1621 lock_old = cpu_to_be32(reg_read(ohci,
1623 fw_fill_response(&response, packet->header,
1625 &lock_old, sizeof(lock_old));
1629 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1630 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1633 // Timestamping on behalf of the hardware.
1634 response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
1635 fw_core_handle_response(&ohci->card, &response);
1638 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1642 if (ctx == &ctx->ohci->at_request_ctx) {
1643 packet->ack = ACK_PENDING;
1644 packet->callback(packet, &ctx->ohci->card, packet->ack);
1647 offset = async_header_get_offset(packet->header);
1648 csr = offset - CSR_REGISTER_BASE;
1650 /* Handle config rom reads. */
1651 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1652 handle_local_rom(ctx->ohci, packet, csr);
1654 case CSR_BUS_MANAGER_ID:
1655 case CSR_BANDWIDTH_AVAILABLE:
1656 case CSR_CHANNELS_AVAILABLE_HI:
1657 case CSR_CHANNELS_AVAILABLE_LO:
1658 handle_local_lock(ctx->ohci, packet, csr);
1661 if (ctx == &ctx->ohci->at_request_ctx)
1662 fw_core_handle_request(&ctx->ohci->card, packet);
1664 fw_core_handle_response(&ctx->ohci->card, packet);
1668 if (ctx == &ctx->ohci->at_response_ctx) {
1669 packet->ack = ACK_COMPLETE;
1670 packet->callback(packet, &ctx->ohci->card, packet->ack);
1674 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1676 unsigned long flags;
1679 spin_lock_irqsave(&ctx->ohci->lock, flags);
1681 if (async_header_get_destination(packet->header) == ctx->ohci->node_id &&
1682 ctx->ohci->generation == packet->generation) {
1683 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1685 // Timestamping on behalf of the hardware.
1686 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1688 handle_local_request(ctx, packet);
1692 ret = at_context_queue_packet(ctx, packet);
1693 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1696 // Timestamping on behalf of the hardware.
1697 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1699 packet->callback(packet, &ctx->ohci->card, packet->ack);
1703 static void detect_dead_context(struct fw_ohci *ohci,
1704 const char *name, unsigned int regs)
1708 ctl = reg_read(ohci, CONTROL_SET(regs));
1709 if (ctl & CONTEXT_DEAD)
1710 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1711 name, evts[ctl & 0x1f]);
1714 static void handle_dead_contexts(struct fw_ohci *ohci)
1719 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1720 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1721 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1722 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1723 for (i = 0; i < 32; ++i) {
1724 if (!(ohci->it_context_support & (1 << i)))
1726 sprintf(name, "IT%u", i);
1727 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1729 for (i = 0; i < 32; ++i) {
1730 if (!(ohci->ir_context_support & (1 << i)))
1732 sprintf(name, "IR%u", i);
1733 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1735 /* TODO: maybe try to flush and restart the dead contexts */
1738 static u32 cycle_timer_ticks(u32 cycle_timer)
1742 ticks = cycle_timer & 0xfff;
1743 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1744 ticks += (3072 * 8000) * (cycle_timer >> 25);
1750 * Some controllers exhibit one or more of the following bugs when updating the
1751 * iso cycle timer register:
1752 * - When the lowest six bits are wrapping around to zero, a read that happens
1753 * at the same time will return garbage in the lowest ten bits.
1754 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1755 * not incremented for about 60 ns.
1756 * - Occasionally, the entire register reads zero.
1758 * To catch these, we read the register three times and ensure that the
1759 * difference between each two consecutive reads is approximately the same, i.e.
1760 * less than twice the other. Furthermore, any negative difference indicates an
1761 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1762 * execute, so we have enough precision to compute the ratio of the differences.)
1764 static u32 get_cycle_time(struct fw_ohci *ohci)
1771 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1774 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1776 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1779 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1783 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1784 t0 = cycle_timer_ticks(c0);
1785 t1 = cycle_timer_ticks(c1);
1786 t2 = cycle_timer_ticks(c2);
1789 } while ((diff01 <= 0 || diff12 <= 0 ||
1790 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1798 * This function has to be called at least every 64 seconds. The bus_time
1799 * field stores not only the upper 25 bits of the BUS_TIME register but also
1800 * the most significant bit of the cycle timer in bit 6 so that we can detect
1801 * changes in this bit.
1803 static u32 update_bus_time(struct fw_ohci *ohci)
1805 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1807 if (unlikely(!ohci->bus_time_running)) {
1808 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1809 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1810 (cycle_time_seconds & 0x40);
1811 ohci->bus_time_running = true;
1814 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1815 ohci->bus_time += 0x40;
1817 return ohci->bus_time | cycle_time_seconds;
1820 static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1824 mutex_lock(&ohci->phy_reg_mutex);
1825 reg = write_phy_reg(ohci, 7, port_index);
1827 reg = read_phy_reg(ohci, 8);
1828 mutex_unlock(&ohci->phy_reg_mutex);
1832 switch (reg & 0x0f) {
1834 return 2; /* is child node (connected to parent node) */
1836 return 3; /* is parent node (connected to child node) */
1838 return 1; /* not connected */
1841 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1847 for (i = 0; i < self_id_count; i++) {
1848 entry = ohci->self_id_buffer[i];
1849 if ((self_id & 0xff000000) == (entry & 0xff000000))
1851 if ((self_id & 0xff000000) < (entry & 0xff000000))
1857 static int initiated_reset(struct fw_ohci *ohci)
1862 mutex_lock(&ohci->phy_reg_mutex);
1863 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1865 reg = read_phy_reg(ohci, 8);
1867 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1869 reg = read_phy_reg(ohci, 12); /* read register 12 */
1871 if ((reg & 0x08) == 0x08) {
1872 /* bit 3 indicates "initiated reset" */
1878 mutex_unlock(&ohci->phy_reg_mutex);
1883 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1884 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1885 * Construct the selfID from phy register contents.
1887 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1889 int reg, i, pos, status;
1890 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1891 u32 self_id = 0x8040c800;
1893 reg = reg_read(ohci, OHCI1394_NodeID);
1894 if (!(reg & OHCI1394_NodeID_idValid)) {
1896 "node ID not valid, new bus reset in progress\n");
1899 self_id |= ((reg & 0x3f) << 24); /* phy ID */
1901 reg = ohci_read_phy_reg(&ohci->card, 4);
1904 self_id |= ((reg & 0x07) << 8); /* power class */
1906 reg = ohci_read_phy_reg(&ohci->card, 1);
1909 self_id |= ((reg & 0x3f) << 16); /* gap count */
1911 for (i = 0; i < 3; i++) {
1912 status = get_status_for_port(ohci, i);
1915 self_id |= ((status & 0x3) << (6 - (i * 2)));
1918 self_id |= initiated_reset(ohci);
1920 pos = get_self_id_pos(ohci, self_id, self_id_count);
1922 memmove(&(ohci->self_id_buffer[pos+1]),
1923 &(ohci->self_id_buffer[pos]),
1924 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1925 ohci->self_id_buffer[pos] = self_id;
1928 return self_id_count;
1931 static void bus_reset_work(struct work_struct *work)
1933 struct fw_ohci *ohci =
1934 container_of(work, struct fw_ohci, bus_reset_work);
1935 int self_id_count, generation, new_generation, i, j;
1937 void *free_rom = NULL;
1938 dma_addr_t free_rom_bus = 0;
1941 reg = reg_read(ohci, OHCI1394_NodeID);
1942 if (!(reg & OHCI1394_NodeID_idValid)) {
1944 "node ID not valid, new bus reset in progress\n");
1947 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1948 ohci_notice(ohci, "malconfigured bus\n");
1951 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1952 OHCI1394_NodeID_nodeNumber);
1954 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1955 if (!(ohci->is_root && is_new_root))
1956 reg_write(ohci, OHCI1394_LinkControlSet,
1957 OHCI1394_LinkControl_cycleMaster);
1958 ohci->is_root = is_new_root;
1960 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1961 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1962 ohci_notice(ohci, "self ID receive error\n");
1966 * The count in the SelfIDCount register is the number of
1967 * bytes in the self ID receive buffer. Since we also receive
1968 * the inverted quadlets and a header quadlet, we shift one
1969 * bit extra to get the actual number of self IDs.
1971 self_id_count = (reg >> 3) & 0xff;
1973 if (self_id_count > 252) {
1974 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1978 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
1981 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1982 u32 id = cond_le32_to_cpu(ohci->self_id[i]);
1983 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
1987 * If the invalid data looks like a cycle start packet,
1988 * it's likely to be the result of the cycle master
1989 * having a wrong gap count. In this case, the self IDs
1990 * so far are valid and should be processed so that the
1991 * bus manager can then correct the gap count.
1993 if (id == 0xffff008f) {
1994 ohci_notice(ohci, "ignoring spurious self IDs\n");
1999 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
2000 j, self_id_count, id, id2);
2003 ohci->self_id_buffer[j] = id;
2006 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2007 self_id_count = find_and_insert_self_id(ohci, self_id_count);
2008 if (self_id_count < 0) {
2010 "could not construct local self ID\n");
2015 if (self_id_count == 0) {
2016 ohci_notice(ohci, "no self IDs\n");
2022 * Check the consistency of the self IDs we just read. The
2023 * problem we face is that a new bus reset can start while we
2024 * read out the self IDs from the DMA buffer. If this happens,
2025 * the DMA buffer will be overwritten with new self IDs and we
2026 * will read out inconsistent data. The OHCI specification
2027 * (section 11.2) recommends a technique similar to
2028 * linux/seqlock.h, where we remember the generation of the
2029 * self IDs in the buffer before reading them out and compare
2030 * it to the current generation after reading them out. If
2031 * the two generations match we know we have a consistent set
2035 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
2036 if (new_generation != generation) {
2037 ohci_notice(ohci, "new bus reset, discarding self ids\n");
2041 /* FIXME: Document how the locking works. */
2042 spin_lock_irq(&ohci->lock);
2044 ohci->generation = -1; /* prevent AT packet queueing */
2045 context_stop(&ohci->at_request_ctx);
2046 context_stop(&ohci->at_response_ctx);
2048 spin_unlock_irq(&ohci->lock);
2051 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2052 * packets in the AT queues and software needs to drain them.
2053 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2055 at_context_flush(&ohci->at_request_ctx);
2056 at_context_flush(&ohci->at_response_ctx);
2058 spin_lock_irq(&ohci->lock);
2060 ohci->generation = generation;
2061 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2062 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2064 if (ohci->quirks & QUIRK_RESET_PACKET)
2065 ohci->request_generation = generation;
2068 * This next bit is unrelated to the AT context stuff but we
2069 * have to do it under the spinlock also. If a new config rom
2070 * was set up before this reset, the old one is now no longer
2071 * in use and we can free it. Update the config rom pointers
2072 * to point to the current config rom and clear the
2073 * next_config_rom pointer so a new update can take place.
2076 if (ohci->next_config_rom != NULL) {
2077 if (ohci->next_config_rom != ohci->config_rom) {
2078 free_rom = ohci->config_rom;
2079 free_rom_bus = ohci->config_rom_bus;
2081 ohci->config_rom = ohci->next_config_rom;
2082 ohci->config_rom_bus = ohci->next_config_rom_bus;
2083 ohci->next_config_rom = NULL;
2086 * Restore config_rom image and manually update
2087 * config_rom registers. Writing the header quadlet
2088 * will indicate that the config rom is ready, so we
2091 reg_write(ohci, OHCI1394_BusOptions,
2092 be32_to_cpu(ohci->config_rom[2]));
2093 ohci->config_rom[0] = ohci->next_header;
2094 reg_write(ohci, OHCI1394_ConfigROMhdr,
2095 be32_to_cpu(ohci->next_header));
2098 if (param_remote_dma) {
2099 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2100 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2103 spin_unlock_irq(&ohci->lock);
2106 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2108 log_selfids(ohci, generation, self_id_count);
2110 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2111 self_id_count, ohci->self_id_buffer,
2112 ohci->csr_state_setclear_abdicate);
2113 ohci->csr_state_setclear_abdicate = false;
2116 static irqreturn_t irq_handler(int irq, void *data)
2118 struct fw_ohci *ohci = data;
2119 u32 event, iso_event;
2122 event = reg_read(ohci, OHCI1394_IntEventClear);
2124 if (!event || !~event)
2128 * busReset and postedWriteErr events must not be cleared yet
2129 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2131 reg_write(ohci, OHCI1394_IntEventClear,
2132 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2133 log_irqs(ohci, event);
2134 // The flag is masked again at bus_reset_work() scheduled by selfID event.
2135 if (event & OHCI1394_busReset)
2136 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2138 if (event & OHCI1394_selfIDComplete)
2139 queue_work(selfid_workqueue, &ohci->bus_reset_work);
2141 if (event & OHCI1394_RQPkt)
2142 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2144 if (event & OHCI1394_RSPkt)
2145 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2147 if (event & OHCI1394_reqTxComplete)
2148 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2150 if (event & OHCI1394_respTxComplete)
2151 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2153 if (event & OHCI1394_isochRx) {
2154 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2155 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2158 i = ffs(iso_event) - 1;
2160 &ohci->ir_context_list[i].context.tasklet);
2161 iso_event &= ~(1 << i);
2165 if (event & OHCI1394_isochTx) {
2166 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2167 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2170 i = ffs(iso_event) - 1;
2172 &ohci->it_context_list[i].context.tasklet);
2173 iso_event &= ~(1 << i);
2177 if (unlikely(event & OHCI1394_regAccessFail))
2178 ohci_err(ohci, "register access failure\n");
2180 if (unlikely(event & OHCI1394_postedWriteErr)) {
2181 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2182 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2183 reg_write(ohci, OHCI1394_IntEventClear,
2184 OHCI1394_postedWriteErr);
2185 if (printk_ratelimit())
2186 ohci_err(ohci, "PCI posted write error\n");
2189 if (unlikely(event & OHCI1394_cycleTooLong)) {
2190 if (printk_ratelimit())
2191 ohci_notice(ohci, "isochronous cycle too long\n");
2192 reg_write(ohci, OHCI1394_LinkControlSet,
2193 OHCI1394_LinkControl_cycleMaster);
2196 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2198 * We need to clear this event bit in order to make
2199 * cycleMatch isochronous I/O work. In theory we should
2200 * stop active cycleMatch iso contexts now and restart
2201 * them at least two cycles later. (FIXME?)
2203 if (printk_ratelimit())
2204 ohci_notice(ohci, "isochronous cycle inconsistent\n");
2207 if (unlikely(event & OHCI1394_unrecoverableError))
2208 handle_dead_contexts(ohci);
2210 if (event & OHCI1394_cycle64Seconds) {
2211 spin_lock(&ohci->lock);
2212 update_bus_time(ohci);
2213 spin_unlock(&ohci->lock);
2220 static int software_reset(struct fw_ohci *ohci)
2225 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2226 for (i = 0; i < 500; i++) {
2227 val = reg_read(ohci, OHCI1394_HCControlSet);
2229 return -ENODEV; /* Card was ejected. */
2231 if (!(val & OHCI1394_HCControl_softReset))
2240 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2242 size_t size = length * 4;
2244 memcpy(dest, src, size);
2245 if (size < CONFIG_ROM_SIZE)
2246 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2249 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2252 int ret, clear, set, offset;
2254 /* Check if the driver should configure link and PHY. */
2255 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2256 OHCI1394_HCControl_programPhyEnable))
2259 /* Paranoia: check whether the PHY supports 1394a, too. */
2260 enable_1394a = false;
2261 ret = read_phy_reg(ohci, 2);
2264 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2265 ret = read_paged_phy_reg(ohci, 1, 8);
2269 enable_1394a = true;
2272 if (ohci->quirks & QUIRK_NO_1394A)
2273 enable_1394a = false;
2275 /* Configure PHY and link consistently. */
2278 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2280 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2283 ret = update_phy_reg(ohci, 5, clear, set);
2288 offset = OHCI1394_HCControlSet;
2290 offset = OHCI1394_HCControlClear;
2291 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2293 /* Clean up: configuration has been taken care of. */
2294 reg_write(ohci, OHCI1394_HCControlClear,
2295 OHCI1394_HCControl_programPhyEnable);
2300 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2302 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2303 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2306 reg = read_phy_reg(ohci, 2);
2309 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2312 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2313 reg = read_paged_phy_reg(ohci, 1, i + 10);
2322 static int ohci_enable(struct fw_card *card,
2323 const __be32 *config_rom, size_t length)
2325 struct fw_ohci *ohci = fw_ohci(card);
2326 u32 lps, version, irqs;
2329 ret = software_reset(ohci);
2331 ohci_err(ohci, "failed to reset ohci card\n");
2336 * Now enable LPS, which we need in order to start accessing
2337 * most of the registers. In fact, on some cards (ALI M5251),
2338 * accessing registers in the SClk domain without LPS enabled
2339 * will lock up the machine. Wait 50msec to make sure we have
2340 * full link enabled. However, with some cards (well, at least
2341 * a JMicron PCIe card), we have to try again sometimes.
2343 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2344 * cannot actually use the phy at that time. These need tens of
2345 * millisecods pause between LPS write and first phy access too.
2348 reg_write(ohci, OHCI1394_HCControlSet,
2349 OHCI1394_HCControl_LPS |
2350 OHCI1394_HCControl_postedWriteEnable);
2353 for (lps = 0, i = 0; !lps && i < 3; i++) {
2355 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2356 OHCI1394_HCControl_LPS;
2360 ohci_err(ohci, "failed to set Link Power Status\n");
2364 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2365 ret = probe_tsb41ba3d(ohci);
2369 ohci_notice(ohci, "local TSB41BA3D phy\n");
2371 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2374 reg_write(ohci, OHCI1394_HCControlClear,
2375 OHCI1394_HCControl_noByteSwapData);
2377 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2378 reg_write(ohci, OHCI1394_LinkControlSet,
2379 OHCI1394_LinkControl_cycleTimerEnable |
2380 OHCI1394_LinkControl_cycleMaster);
2382 reg_write(ohci, OHCI1394_ATRetries,
2383 OHCI1394_MAX_AT_REQ_RETRIES |
2384 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2385 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2388 ohci->bus_time_running = false;
2390 for (i = 0; i < 32; i++)
2391 if (ohci->ir_context_support & (1 << i))
2392 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2393 IR_CONTEXT_MULTI_CHANNEL_MODE);
2395 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2396 if (version >= OHCI_VERSION_1_1) {
2397 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2399 card->broadcast_channel_auto_allocated = true;
2402 /* Get implemented bits of the priority arbitration request counter. */
2403 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2404 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2405 reg_write(ohci, OHCI1394_FairnessControl, 0);
2406 card->priority_budget_implemented = ohci->pri_req_max != 0;
2408 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2409 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2410 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2412 ret = configure_1394a_enhancements(ohci);
2416 /* Activate link_on bit and contender bit in our self ID packets.*/
2417 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2422 * When the link is not yet enabled, the atomic config rom
2423 * update mechanism described below in ohci_set_config_rom()
2424 * is not active. We have to update ConfigRomHeader and
2425 * BusOptions manually, and the write to ConfigROMmap takes
2426 * effect immediately. We tie this to the enabling of the
2427 * link, so we have a valid config rom before enabling - the
2428 * OHCI requires that ConfigROMhdr and BusOptions have valid
2429 * values before enabling.
2431 * However, when the ConfigROMmap is written, some controllers
2432 * always read back quadlets 0 and 2 from the config rom to
2433 * the ConfigRomHeader and BusOptions registers on bus reset.
2434 * They shouldn't do that in this initial case where the link
2435 * isn't enabled. This means we have to use the same
2436 * workaround here, setting the bus header to 0 and then write
2437 * the right values in the bus reset tasklet.
2441 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2442 &ohci->next_config_rom_bus, GFP_KERNEL);
2443 if (ohci->next_config_rom == NULL)
2446 copy_config_rom(ohci->next_config_rom, config_rom, length);
2449 * In the suspend case, config_rom is NULL, which
2450 * means that we just reuse the old config rom.
2452 ohci->next_config_rom = ohci->config_rom;
2453 ohci->next_config_rom_bus = ohci->config_rom_bus;
2456 ohci->next_header = ohci->next_config_rom[0];
2457 ohci->next_config_rom[0] = 0;
2458 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2459 reg_write(ohci, OHCI1394_BusOptions,
2460 be32_to_cpu(ohci->next_config_rom[2]));
2461 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2463 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2465 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2466 OHCI1394_RQPkt | OHCI1394_RSPkt |
2467 OHCI1394_isochTx | OHCI1394_isochRx |
2468 OHCI1394_postedWriteErr |
2469 OHCI1394_selfIDComplete |
2470 OHCI1394_regAccessFail |
2471 OHCI1394_cycleInconsistent |
2472 OHCI1394_unrecoverableError |
2473 OHCI1394_cycleTooLong |
2474 OHCI1394_masterIntEnable |
2476 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2478 reg_write(ohci, OHCI1394_HCControlSet,
2479 OHCI1394_HCControl_linkEnable |
2480 OHCI1394_HCControl_BIBimageValid);
2482 reg_write(ohci, OHCI1394_LinkControlSet,
2483 OHCI1394_LinkControl_rcvSelfID |
2484 OHCI1394_LinkControl_rcvPhyPkt);
2486 ar_context_run(&ohci->ar_request_ctx);
2487 ar_context_run(&ohci->ar_response_ctx);
2491 /* We are ready to go, reset bus to finish initialization. */
2492 fw_schedule_bus_reset(&ohci->card, false, true);
2497 static int ohci_set_config_rom(struct fw_card *card,
2498 const __be32 *config_rom, size_t length)
2500 struct fw_ohci *ohci;
2501 __be32 *next_config_rom;
2502 dma_addr_t next_config_rom_bus;
2504 ohci = fw_ohci(card);
2507 * When the OHCI controller is enabled, the config rom update
2508 * mechanism is a bit tricky, but easy enough to use. See
2509 * section 5.5.6 in the OHCI specification.
2511 * The OHCI controller caches the new config rom address in a
2512 * shadow register (ConfigROMmapNext) and needs a bus reset
2513 * for the changes to take place. When the bus reset is
2514 * detected, the controller loads the new values for the
2515 * ConfigRomHeader and BusOptions registers from the specified
2516 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2517 * shadow register. All automatically and atomically.
2519 * Now, there's a twist to this story. The automatic load of
2520 * ConfigRomHeader and BusOptions doesn't honor the
2521 * noByteSwapData bit, so with a be32 config rom, the
2522 * controller will load be32 values in to these registers
2523 * during the atomic update, even on litte endian
2524 * architectures. The workaround we use is to put a 0 in the
2525 * header quadlet; 0 is endian agnostic and means that the
2526 * config rom isn't ready yet. In the bus reset tasklet we
2527 * then set up the real values for the two registers.
2529 * We use ohci->lock to avoid racing with the code that sets
2530 * ohci->next_config_rom to NULL (see bus_reset_work).
2533 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2534 &next_config_rom_bus, GFP_KERNEL);
2535 if (next_config_rom == NULL)
2538 spin_lock_irq(&ohci->lock);
2541 * If there is not an already pending config_rom update,
2542 * push our new allocation into the ohci->next_config_rom
2543 * and then mark the local variable as null so that we
2544 * won't deallocate the new buffer.
2546 * OTOH, if there is a pending config_rom update, just
2547 * use that buffer with the new config_rom data, and
2548 * let this routine free the unused DMA allocation.
2551 if (ohci->next_config_rom == NULL) {
2552 ohci->next_config_rom = next_config_rom;
2553 ohci->next_config_rom_bus = next_config_rom_bus;
2554 next_config_rom = NULL;
2557 copy_config_rom(ohci->next_config_rom, config_rom, length);
2559 ohci->next_header = config_rom[0];
2560 ohci->next_config_rom[0] = 0;
2562 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2564 spin_unlock_irq(&ohci->lock);
2566 /* If we didn't use the DMA allocation, delete it. */
2567 if (next_config_rom != NULL) {
2568 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2569 next_config_rom_bus);
2573 * Now initiate a bus reset to have the changes take
2574 * effect. We clean up the old config rom memory and DMA
2575 * mappings in the bus reset tasklet, since the OHCI
2576 * controller could need to access it before the bus reset
2580 fw_schedule_bus_reset(&ohci->card, true, true);
2585 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2587 struct fw_ohci *ohci = fw_ohci(card);
2589 at_context_transmit(&ohci->at_request_ctx, packet);
2592 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2594 struct fw_ohci *ohci = fw_ohci(card);
2596 at_context_transmit(&ohci->at_response_ctx, packet);
2599 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2601 struct fw_ohci *ohci = fw_ohci(card);
2602 struct context *ctx = &ohci->at_request_ctx;
2603 struct driver_data *driver_data = packet->driver_data;
2606 tasklet_disable_in_atomic(&ctx->tasklet);
2608 if (packet->ack != 0)
2611 if (packet->payload_mapped)
2612 dma_unmap_single(ohci->card.device, packet->payload_bus,
2613 packet->payload_length, DMA_TO_DEVICE);
2615 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2616 driver_data->packet = NULL;
2617 packet->ack = RCODE_CANCELLED;
2619 // Timestamping on behalf of the hardware.
2620 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2622 packet->callback(packet, &ohci->card, packet->ack);
2625 tasklet_enable(&ctx->tasklet);
2630 static int ohci_enable_phys_dma(struct fw_card *card,
2631 int node_id, int generation)
2633 struct fw_ohci *ohci = fw_ohci(card);
2634 unsigned long flags;
2637 if (param_remote_dma)
2641 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2642 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2645 spin_lock_irqsave(&ohci->lock, flags);
2647 if (ohci->generation != generation) {
2653 * Note, if the node ID contains a non-local bus ID, physical DMA is
2654 * enabled for _all_ nodes on remote buses.
2657 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2659 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2661 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2665 spin_unlock_irqrestore(&ohci->lock, flags);
2670 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2672 struct fw_ohci *ohci = fw_ohci(card);
2673 unsigned long flags;
2676 switch (csr_offset) {
2677 case CSR_STATE_CLEAR:
2679 if (ohci->is_root &&
2680 (reg_read(ohci, OHCI1394_LinkControlSet) &
2681 OHCI1394_LinkControl_cycleMaster))
2682 value = CSR_STATE_BIT_CMSTR;
2685 if (ohci->csr_state_setclear_abdicate)
2686 value |= CSR_STATE_BIT_ABDICATE;
2691 return reg_read(ohci, OHCI1394_NodeID) << 16;
2693 case CSR_CYCLE_TIME:
2694 return get_cycle_time(ohci);
2698 * We might be called just after the cycle timer has wrapped
2699 * around but just before the cycle64Seconds handler, so we
2700 * better check here, too, if the bus time needs to be updated.
2702 spin_lock_irqsave(&ohci->lock, flags);
2703 value = update_bus_time(ohci);
2704 spin_unlock_irqrestore(&ohci->lock, flags);
2707 case CSR_BUSY_TIMEOUT:
2708 value = reg_read(ohci, OHCI1394_ATRetries);
2709 return (value >> 4) & 0x0ffff00f;
2711 case CSR_PRIORITY_BUDGET:
2712 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2713 (ohci->pri_req_max << 8);
2721 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2723 struct fw_ohci *ohci = fw_ohci(card);
2724 unsigned long flags;
2726 switch (csr_offset) {
2727 case CSR_STATE_CLEAR:
2728 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2729 reg_write(ohci, OHCI1394_LinkControlClear,
2730 OHCI1394_LinkControl_cycleMaster);
2733 if (value & CSR_STATE_BIT_ABDICATE)
2734 ohci->csr_state_setclear_abdicate = false;
2738 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2739 reg_write(ohci, OHCI1394_LinkControlSet,
2740 OHCI1394_LinkControl_cycleMaster);
2743 if (value & CSR_STATE_BIT_ABDICATE)
2744 ohci->csr_state_setclear_abdicate = true;
2748 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2752 case CSR_CYCLE_TIME:
2753 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2754 reg_write(ohci, OHCI1394_IntEventSet,
2755 OHCI1394_cycleInconsistent);
2760 spin_lock_irqsave(&ohci->lock, flags);
2761 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2763 spin_unlock_irqrestore(&ohci->lock, flags);
2766 case CSR_BUSY_TIMEOUT:
2767 value = (value & 0xf) | ((value & 0xf) << 4) |
2768 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2769 reg_write(ohci, OHCI1394_ATRetries, value);
2773 case CSR_PRIORITY_BUDGET:
2774 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2784 static void flush_iso_completions(struct iso_context *ctx)
2786 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2787 ctx->header_length, ctx->header,
2788 ctx->base.callback_data);
2789 ctx->header_length = 0;
2792 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2796 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2797 if (ctx->base.drop_overflow_headers)
2799 flush_iso_completions(ctx);
2802 ctx_hdr = ctx->header + ctx->header_length;
2803 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2806 * The two iso header quadlets are byteswapped to little
2807 * endian by the controller, but we want to present them
2808 * as big endian for consistency with the bus endianness.
2810 if (ctx->base.header_size > 0)
2811 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2812 if (ctx->base.header_size > 4)
2813 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2814 if (ctx->base.header_size > 8)
2815 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2816 ctx->header_length += ctx->base.header_size;
2819 static int handle_ir_packet_per_buffer(struct context *context,
2820 struct descriptor *d,
2821 struct descriptor *last)
2823 struct iso_context *ctx =
2824 container_of(context, struct iso_context, context);
2825 struct descriptor *pd;
2828 for (pd = d; pd <= last; pd++)
2829 if (pd->transfer_status)
2832 /* Descriptor(s) not done yet, stop iteration */
2835 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2837 buffer_dma = le32_to_cpu(d->data_address);
2838 dma_sync_single_range_for_cpu(context->ohci->card.device,
2839 buffer_dma & PAGE_MASK,
2840 buffer_dma & ~PAGE_MASK,
2841 le16_to_cpu(d->req_count),
2845 copy_iso_headers(ctx, (u32 *) (last + 1));
2847 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2848 flush_iso_completions(ctx);
2853 /* d == last because each descriptor block is only a single descriptor. */
2854 static int handle_ir_buffer_fill(struct context *context,
2855 struct descriptor *d,
2856 struct descriptor *last)
2858 struct iso_context *ctx =
2859 container_of(context, struct iso_context, context);
2860 unsigned int req_count, res_count, completed;
2863 req_count = le16_to_cpu(last->req_count);
2864 res_count = le16_to_cpu(READ_ONCE(last->res_count));
2865 completed = req_count - res_count;
2866 buffer_dma = le32_to_cpu(last->data_address);
2868 if (completed > 0) {
2869 ctx->mc_buffer_bus = buffer_dma;
2870 ctx->mc_completed = completed;
2874 /* Descriptor(s) not done yet, stop iteration */
2877 dma_sync_single_range_for_cpu(context->ohci->card.device,
2878 buffer_dma & PAGE_MASK,
2879 buffer_dma & ~PAGE_MASK,
2880 completed, DMA_FROM_DEVICE);
2882 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2883 ctx->base.callback.mc(&ctx->base,
2884 buffer_dma + completed,
2885 ctx->base.callback_data);
2886 ctx->mc_completed = 0;
2892 static void flush_ir_buffer_fill(struct iso_context *ctx)
2894 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2895 ctx->mc_buffer_bus & PAGE_MASK,
2896 ctx->mc_buffer_bus & ~PAGE_MASK,
2897 ctx->mc_completed, DMA_FROM_DEVICE);
2899 ctx->base.callback.mc(&ctx->base,
2900 ctx->mc_buffer_bus + ctx->mc_completed,
2901 ctx->base.callback_data);
2902 ctx->mc_completed = 0;
2905 static inline void sync_it_packet_for_cpu(struct context *context,
2906 struct descriptor *pd)
2911 /* only packets beginning with OUTPUT_MORE* have data buffers */
2912 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2915 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2919 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2920 * data buffer is in the context program's coherent page and must not
2923 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2924 (context->current_bus & PAGE_MASK)) {
2925 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2931 buffer_dma = le32_to_cpu(pd->data_address);
2932 dma_sync_single_range_for_cpu(context->ohci->card.device,
2933 buffer_dma & PAGE_MASK,
2934 buffer_dma & ~PAGE_MASK,
2935 le16_to_cpu(pd->req_count),
2937 control = pd->control;
2939 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2942 static int handle_it_packet(struct context *context,
2943 struct descriptor *d,
2944 struct descriptor *last)
2946 struct iso_context *ctx =
2947 container_of(context, struct iso_context, context);
2948 struct descriptor *pd;
2951 for (pd = d; pd <= last; pd++)
2952 if (pd->transfer_status)
2955 /* Descriptor(s) not done yet, stop iteration */
2958 sync_it_packet_for_cpu(context, d);
2960 if (ctx->header_length + 4 > PAGE_SIZE) {
2961 if (ctx->base.drop_overflow_headers)
2963 flush_iso_completions(ctx);
2966 ctx_hdr = ctx->header + ctx->header_length;
2967 ctx->last_timestamp = le16_to_cpu(last->res_count);
2968 /* Present this value as big-endian to match the receive code */
2969 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2970 le16_to_cpu(pd->res_count));
2971 ctx->header_length += 4;
2973 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2974 flush_iso_completions(ctx);
2979 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2981 u32 hi = channels >> 32, lo = channels;
2983 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2984 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2985 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2986 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2987 ohci->mc_channels = channels;
2990 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2991 int type, int channel, size_t header_size)
2993 struct fw_ohci *ohci = fw_ohci(card);
2994 struct iso_context *ctx;
2995 descriptor_callback_t callback;
2998 int index, ret = -EBUSY;
3000 spin_lock_irq(&ohci->lock);
3003 case FW_ISO_CONTEXT_TRANSMIT:
3004 mask = &ohci->it_context_mask;
3005 callback = handle_it_packet;
3006 index = ffs(*mask) - 1;
3008 *mask &= ~(1 << index);
3009 regs = OHCI1394_IsoXmitContextBase(index);
3010 ctx = &ohci->it_context_list[index];
3014 case FW_ISO_CONTEXT_RECEIVE:
3015 channels = &ohci->ir_context_channels;
3016 mask = &ohci->ir_context_mask;
3017 callback = handle_ir_packet_per_buffer;
3018 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
3020 *channels &= ~(1ULL << channel);
3021 *mask &= ~(1 << index);
3022 regs = OHCI1394_IsoRcvContextBase(index);
3023 ctx = &ohci->ir_context_list[index];
3027 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3028 mask = &ohci->ir_context_mask;
3029 callback = handle_ir_buffer_fill;
3030 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
3032 ohci->mc_allocated = true;
3033 *mask &= ~(1 << index);
3034 regs = OHCI1394_IsoRcvContextBase(index);
3035 ctx = &ohci->ir_context_list[index];
3044 spin_unlock_irq(&ohci->lock);
3047 return ERR_PTR(ret);
3049 memset(ctx, 0, sizeof(*ctx));
3050 ctx->header_length = 0;
3051 ctx->header = (void *) __get_free_page(GFP_KERNEL);
3052 if (ctx->header == NULL) {
3056 ret = context_init(&ctx->context, ohci, regs, callback);
3058 goto out_with_header;
3060 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3061 set_multichannel_mask(ohci, 0);
3062 ctx->mc_completed = 0;
3068 free_page((unsigned long)ctx->header);
3070 spin_lock_irq(&ohci->lock);
3073 case FW_ISO_CONTEXT_RECEIVE:
3074 *channels |= 1ULL << channel;
3077 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3078 ohci->mc_allocated = false;
3081 *mask |= 1 << index;
3083 spin_unlock_irq(&ohci->lock);
3085 return ERR_PTR(ret);
3088 static int ohci_start_iso(struct fw_iso_context *base,
3089 s32 cycle, u32 sync, u32 tags)
3091 struct iso_context *ctx = container_of(base, struct iso_context, base);
3092 struct fw_ohci *ohci = ctx->context.ohci;
3093 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3096 /* the controller cannot start without any queued packets */
3097 if (ctx->context.last->branch_address == 0)
3100 switch (ctx->base.type) {
3101 case FW_ISO_CONTEXT_TRANSMIT:
3102 index = ctx - ohci->it_context_list;
3105 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3106 (cycle & 0x7fff) << 16;
3108 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3109 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3110 context_run(&ctx->context, match);
3113 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3114 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3116 case FW_ISO_CONTEXT_RECEIVE:
3117 index = ctx - ohci->ir_context_list;
3118 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3120 match |= (cycle & 0x07fff) << 12;
3121 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3124 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3125 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3126 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3127 context_run(&ctx->context, control);
3138 static int ohci_stop_iso(struct fw_iso_context *base)
3140 struct fw_ohci *ohci = fw_ohci(base->card);
3141 struct iso_context *ctx = container_of(base, struct iso_context, base);
3144 switch (ctx->base.type) {
3145 case FW_ISO_CONTEXT_TRANSMIT:
3146 index = ctx - ohci->it_context_list;
3147 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3150 case FW_ISO_CONTEXT_RECEIVE:
3151 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3152 index = ctx - ohci->ir_context_list;
3153 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3157 context_stop(&ctx->context);
3158 tasklet_kill(&ctx->context.tasklet);
3163 static void ohci_free_iso_context(struct fw_iso_context *base)
3165 struct fw_ohci *ohci = fw_ohci(base->card);
3166 struct iso_context *ctx = container_of(base, struct iso_context, base);
3167 unsigned long flags;
3170 ohci_stop_iso(base);
3171 context_release(&ctx->context);
3172 free_page((unsigned long)ctx->header);
3174 spin_lock_irqsave(&ohci->lock, flags);
3176 switch (base->type) {
3177 case FW_ISO_CONTEXT_TRANSMIT:
3178 index = ctx - ohci->it_context_list;
3179 ohci->it_context_mask |= 1 << index;
3182 case FW_ISO_CONTEXT_RECEIVE:
3183 index = ctx - ohci->ir_context_list;
3184 ohci->ir_context_mask |= 1 << index;
3185 ohci->ir_context_channels |= 1ULL << base->channel;
3188 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3189 index = ctx - ohci->ir_context_list;
3190 ohci->ir_context_mask |= 1 << index;
3191 ohci->ir_context_channels |= ohci->mc_channels;
3192 ohci->mc_channels = 0;
3193 ohci->mc_allocated = false;
3197 spin_unlock_irqrestore(&ohci->lock, flags);
3200 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3202 struct fw_ohci *ohci = fw_ohci(base->card);
3203 unsigned long flags;
3206 switch (base->type) {
3207 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3209 spin_lock_irqsave(&ohci->lock, flags);
3211 /* Don't allow multichannel to grab other contexts' channels. */
3212 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3213 *channels = ohci->ir_context_channels;
3216 set_multichannel_mask(ohci, *channels);
3220 spin_unlock_irqrestore(&ohci->lock, flags);
3231 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3234 struct iso_context *ctx;
3236 for (i = 0 ; i < ohci->n_ir ; i++) {
3237 ctx = &ohci->ir_context_list[i];
3238 if (ctx->context.running)
3239 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3242 for (i = 0 ; i < ohci->n_it ; i++) {
3243 ctx = &ohci->it_context_list[i];
3244 if (ctx->context.running)
3245 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3250 static int queue_iso_transmit(struct iso_context *ctx,
3251 struct fw_iso_packet *packet,
3252 struct fw_iso_buffer *buffer,
3253 unsigned long payload)
3255 struct descriptor *d, *last, *pd;
3256 struct fw_iso_packet *p;
3258 dma_addr_t d_bus, page_bus;
3259 u32 z, header_z, payload_z, irq;
3260 u32 payload_index, payload_end_index, next_page_index;
3261 int page, end_page, i, length, offset;
3264 payload_index = payload;
3270 if (p->header_length > 0)
3273 /* Determine the first page the payload isn't contained in. */
3274 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3275 if (p->payload_length > 0)
3276 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3282 /* Get header size in number of descriptors. */
3283 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3285 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3290 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3291 d[0].req_count = cpu_to_le16(8);
3293 * Link the skip address to this descriptor itself. This causes
3294 * a context to skip a cycle whenever lost cycles or FIFO
3295 * overruns occur, without dropping the data. The application
3296 * should then decide whether this is an error condition or not.
3297 * FIXME: Make the context's cycle-lost behaviour configurable?
3299 d[0].branch_address = cpu_to_le32(d_bus | z);
3301 header = (__le32 *) &d[1];
3302 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3303 IT_HEADER_TAG(p->tag) |
3304 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3305 IT_HEADER_CHANNEL(ctx->base.channel) |
3306 IT_HEADER_SPEED(ctx->base.speed));
3308 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3309 p->payload_length));
3312 if (p->header_length > 0) {
3313 d[2].req_count = cpu_to_le16(p->header_length);
3314 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3315 memcpy(&d[z], p->header, p->header_length);
3318 pd = d + z - payload_z;
3319 payload_end_index = payload_index + p->payload_length;
3320 for (i = 0; i < payload_z; i++) {
3321 page = payload_index >> PAGE_SHIFT;
3322 offset = payload_index & ~PAGE_MASK;
3323 next_page_index = (page + 1) << PAGE_SHIFT;
3325 min(next_page_index, payload_end_index) - payload_index;
3326 pd[i].req_count = cpu_to_le16(length);
3328 page_bus = page_private(buffer->pages[page]);
3329 pd[i].data_address = cpu_to_le32(page_bus + offset);
3331 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3332 page_bus, offset, length,
3335 payload_index += length;
3339 irq = DESCRIPTOR_IRQ_ALWAYS;
3341 irq = DESCRIPTOR_NO_IRQ;
3343 last = z == 2 ? d : d + z - 1;
3344 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3346 DESCRIPTOR_BRANCH_ALWAYS |
3349 context_append(&ctx->context, d, z, header_z);
3354 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3355 struct fw_iso_packet *packet,
3356 struct fw_iso_buffer *buffer,
3357 unsigned long payload)
3359 struct device *device = ctx->context.ohci->card.device;
3360 struct descriptor *d, *pd;
3361 dma_addr_t d_bus, page_bus;
3362 u32 z, header_z, rest;
3364 int page, offset, packet_count, header_size, payload_per_buffer;
3367 * The OHCI controller puts the isochronous header and trailer in the
3368 * buffer, so we need at least 8 bytes.
3370 packet_count = packet->header_length / ctx->base.header_size;
3371 header_size = max(ctx->base.header_size, (size_t)8);
3373 /* Get header size in number of descriptors. */
3374 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3375 page = payload >> PAGE_SHIFT;
3376 offset = payload & ~PAGE_MASK;
3377 payload_per_buffer = packet->payload_length / packet_count;
3379 for (i = 0; i < packet_count; i++) {
3380 /* d points to the header descriptor */
3381 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3382 d = context_get_descriptors(&ctx->context,
3383 z + header_z, &d_bus);
3387 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3388 DESCRIPTOR_INPUT_MORE);
3389 if (packet->skip && i == 0)
3390 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3391 d->req_count = cpu_to_le16(header_size);
3392 d->res_count = d->req_count;
3393 d->transfer_status = 0;
3394 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3396 rest = payload_per_buffer;
3398 for (j = 1; j < z; j++) {
3400 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3401 DESCRIPTOR_INPUT_MORE);
3403 if (offset + rest < PAGE_SIZE)
3406 length = PAGE_SIZE - offset;
3407 pd->req_count = cpu_to_le16(length);
3408 pd->res_count = pd->req_count;
3409 pd->transfer_status = 0;
3411 page_bus = page_private(buffer->pages[page]);
3412 pd->data_address = cpu_to_le32(page_bus + offset);
3414 dma_sync_single_range_for_device(device, page_bus,
3418 offset = (offset + length) & ~PAGE_MASK;
3423 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3424 DESCRIPTOR_INPUT_LAST |
3425 DESCRIPTOR_BRANCH_ALWAYS);
3426 if (packet->interrupt && i == packet_count - 1)
3427 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3429 context_append(&ctx->context, d, z, header_z);
3435 static int queue_iso_buffer_fill(struct iso_context *ctx,
3436 struct fw_iso_packet *packet,
3437 struct fw_iso_buffer *buffer,
3438 unsigned long payload)
3440 struct descriptor *d;
3441 dma_addr_t d_bus, page_bus;
3442 int page, offset, rest, z, i, length;
3444 page = payload >> PAGE_SHIFT;
3445 offset = payload & ~PAGE_MASK;
3446 rest = packet->payload_length;
3448 /* We need one descriptor for each page in the buffer. */
3449 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3451 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3454 for (i = 0; i < z; i++) {
3455 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3459 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3460 DESCRIPTOR_BRANCH_ALWAYS);
3461 if (packet->skip && i == 0)
3462 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3463 if (packet->interrupt && i == z - 1)
3464 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3466 if (offset + rest < PAGE_SIZE)
3469 length = PAGE_SIZE - offset;
3470 d->req_count = cpu_to_le16(length);
3471 d->res_count = d->req_count;
3472 d->transfer_status = 0;
3474 page_bus = page_private(buffer->pages[page]);
3475 d->data_address = cpu_to_le32(page_bus + offset);
3477 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3478 page_bus, offset, length,
3485 context_append(&ctx->context, d, 1, 0);
3491 static int ohci_queue_iso(struct fw_iso_context *base,
3492 struct fw_iso_packet *packet,
3493 struct fw_iso_buffer *buffer,
3494 unsigned long payload)
3496 struct iso_context *ctx = container_of(base, struct iso_context, base);
3497 unsigned long flags;
3500 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3501 switch (base->type) {
3502 case FW_ISO_CONTEXT_TRANSMIT:
3503 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3505 case FW_ISO_CONTEXT_RECEIVE:
3506 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3508 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3509 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3512 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3517 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3519 struct context *ctx =
3520 &container_of(base, struct iso_context, base)->context;
3522 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3525 static int ohci_flush_iso_completions(struct fw_iso_context *base)
3527 struct iso_context *ctx = container_of(base, struct iso_context, base);
3530 tasklet_disable_in_atomic(&ctx->context.tasklet);
3532 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3533 context_tasklet((unsigned long)&ctx->context);
3535 switch (base->type) {
3536 case FW_ISO_CONTEXT_TRANSMIT:
3537 case FW_ISO_CONTEXT_RECEIVE:
3538 if (ctx->header_length != 0)
3539 flush_iso_completions(ctx);
3541 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3542 if (ctx->mc_completed != 0)
3543 flush_ir_buffer_fill(ctx);
3549 clear_bit_unlock(0, &ctx->flushing_completions);
3550 smp_mb__after_atomic();
3553 tasklet_enable(&ctx->context.tasklet);
3558 static const struct fw_card_driver ohci_driver = {
3559 .enable = ohci_enable,
3560 .read_phy_reg = ohci_read_phy_reg,
3561 .update_phy_reg = ohci_update_phy_reg,
3562 .set_config_rom = ohci_set_config_rom,
3563 .send_request = ohci_send_request,
3564 .send_response = ohci_send_response,
3565 .cancel_packet = ohci_cancel_packet,
3566 .enable_phys_dma = ohci_enable_phys_dma,
3567 .read_csr = ohci_read_csr,
3568 .write_csr = ohci_write_csr,
3570 .allocate_iso_context = ohci_allocate_iso_context,
3571 .free_iso_context = ohci_free_iso_context,
3572 .set_iso_channels = ohci_set_iso_channels,
3573 .queue_iso = ohci_queue_iso,
3574 .flush_queue_iso = ohci_flush_queue_iso,
3575 .flush_iso_completions = ohci_flush_iso_completions,
3576 .start_iso = ohci_start_iso,
3577 .stop_iso = ohci_stop_iso,
3580 #ifdef CONFIG_PPC_PMAC
3581 static void pmac_ohci_on(struct pci_dev *dev)
3583 if (machine_is(powermac)) {
3584 struct device_node *ofn = pci_device_to_OF_node(dev);
3587 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3588 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3593 static void pmac_ohci_off(struct pci_dev *dev)
3595 if (machine_is(powermac)) {
3596 struct device_node *ofn = pci_device_to_OF_node(dev);
3599 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3600 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3605 static inline void pmac_ohci_on(struct pci_dev *dev) {}
3606 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3607 #endif /* CONFIG_PPC_PMAC */
3609 static void release_ohci(struct device *dev, void *data)
3611 struct pci_dev *pdev = to_pci_dev(dev);
3612 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3614 pmac_ohci_off(pdev);
3616 ar_context_release(&ohci->ar_response_ctx);
3617 ar_context_release(&ohci->ar_request_ctx);
3619 dev_notice(dev, "removed fw-ohci device\n");
3622 static int pci_probe(struct pci_dev *dev,
3623 const struct pci_device_id *ent)
3625 struct fw_ohci *ohci;
3626 u32 bus_options, max_receive, link_speed, version;
3628 int i, flags, irq, err;
3631 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3632 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3636 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3639 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3640 pci_set_drvdata(dev, ohci);
3642 devres_add(&dev->dev, ohci);
3644 err = pcim_enable_device(dev);
3646 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3650 pci_set_master(dev);
3651 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3653 spin_lock_init(&ohci->lock);
3654 mutex_init(&ohci->phy_reg_mutex);
3656 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3658 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3659 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3660 ohci_err(ohci, "invalid MMIO resource\n");
3664 err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
3666 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3669 ohci->registers = pcim_iomap_table(dev)[0];
3671 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3672 if ((ohci_quirks[i].vendor == dev->vendor) &&
3673 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3674 ohci_quirks[i].device == dev->device) &&
3675 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3676 ohci_quirks[i].revision >= dev->revision)) {
3677 ohci->quirks = ohci_quirks[i].flags;
3681 ohci->quirks = param_quirks;
3683 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3684 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3687 * Because dma_alloc_coherent() allocates at least one page,
3688 * we save space by using a common buffer for the AR request/
3689 * response descriptors and the self IDs buffer.
3691 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3692 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3693 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3695 if (!ohci->misc_buffer)
3698 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3699 OHCI1394_AsReqRcvContextControlSet);
3703 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3704 OHCI1394_AsRspRcvContextControlSet);
3708 err = context_init(&ohci->at_request_ctx, ohci,
3709 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3713 err = context_init(&ohci->at_response_ctx, ohci,
3714 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3718 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3719 ohci->ir_context_channels = ~0ULL;
3720 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3721 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3722 ohci->ir_context_mask = ohci->ir_context_support;
3723 ohci->n_ir = hweight32(ohci->ir_context_mask);
3724 size = sizeof(struct iso_context) * ohci->n_ir;
3725 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3726 if (!ohci->ir_context_list)
3729 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3730 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3731 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3732 if (!ohci->it_context_support) {
3733 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3734 ohci->it_context_support = 0xf;
3736 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3737 ohci->it_context_mask = ohci->it_context_support;
3738 ohci->n_it = hweight32(ohci->it_context_mask);
3739 size = sizeof(struct iso_context) * ohci->n_it;
3740 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3741 if (!ohci->it_context_list)
3744 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3745 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3747 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3748 max_receive = (bus_options >> 12) & 0xf;
3749 link_speed = bus_options & 0x7;
3750 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3751 reg_read(ohci, OHCI1394_GUIDLo);
3753 flags = PCI_IRQ_INTX;
3754 if (!(ohci->quirks & QUIRK_NO_MSI))
3755 flags |= PCI_IRQ_MSI;
3756 err = pci_alloc_irq_vectors(dev, 1, 1, flags);
3759 irq = pci_irq_vector(dev, 0);
3765 err = request_threaded_irq(irq, irq_handler, NULL,
3766 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name,
3769 ohci_err(ohci, "failed to allocate interrupt %d\n", irq);
3773 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3777 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3779 "added OHCI v%x.%x device as card %d, "
3780 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3781 version >> 16, version & 0xff, ohci->card.index,
3782 ohci->n_ir, ohci->n_it, ohci->quirks,
3783 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3789 free_irq(irq, ohci);
3791 pci_free_irq_vectors(dev);
3796 static void pci_remove(struct pci_dev *dev)
3798 struct fw_ohci *ohci = pci_get_drvdata(dev);
3802 * If the removal is happening from the suspend state, LPS won't be
3803 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3805 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3806 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3809 cancel_work_sync(&ohci->bus_reset_work);
3810 fw_core_remove_card(&ohci->card);
3813 * FIXME: Fail all pending packets here, now that the upper
3814 * layers can't queue any more.
3817 software_reset(ohci);
3819 irq = pci_irq_vector(dev, 0);
3821 free_irq(irq, ohci);
3822 pci_free_irq_vectors(dev);
3824 dev_notice(&dev->dev, "removing fw-ohci device\n");
3828 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3830 struct fw_ohci *ohci = pci_get_drvdata(dev);
3833 software_reset(ohci);
3834 err = pci_save_state(dev);
3836 ohci_err(ohci, "pci_save_state failed\n");
3839 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3841 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3847 static int pci_resume(struct pci_dev *dev)
3849 struct fw_ohci *ohci = pci_get_drvdata(dev);
3853 pci_set_power_state(dev, PCI_D0);
3854 pci_restore_state(dev);
3855 err = pci_enable_device(dev);
3857 ohci_err(ohci, "pci_enable_device failed\n");
3861 /* Some systems don't setup GUID register on resume from ram */
3862 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3863 !reg_read(ohci, OHCI1394_GUIDHi)) {
3864 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3865 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3868 err = ohci_enable(&ohci->card, NULL, 0);
3872 ohci_resume_iso_dma(ohci);
3878 static const struct pci_device_id pci_table[] = {
3879 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3883 MODULE_DEVICE_TABLE(pci, pci_table);
3885 static struct pci_driver fw_ohci_pci_driver = {
3886 .name = ohci_driver_name,
3887 .id_table = pci_table,
3889 .remove = pci_remove,
3891 .resume = pci_resume,
3892 .suspend = pci_suspend,
3896 static int __init fw_ohci_init(void)
3898 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
3899 if (!selfid_workqueue)
3902 return pci_register_driver(&fw_ohci_pci_driver);
3905 static void __exit fw_ohci_cleanup(void)
3907 pci_unregister_driver(&fw_ohci_pci_driver);
3908 destroy_workqueue(selfid_workqueue);
3911 module_init(fw_ohci_init);
3912 module_exit(fw_ohci_cleanup);
3914 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3915 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3916 MODULE_LICENSE("GPL");
3918 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3919 MODULE_ALIAS("ohci1394");