1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for OHCI 1394 controllers
5 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
8 #include <linux/bitops.h>
10 #include <linux/compiler.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/firewire.h>
15 #include <linux/firewire-constants.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/mutex.h>
25 #include <linux/pci.h>
26 #include <linux/pci_ids.h>
27 #include <linux/slab.h>
28 #include <linux/spinlock.h>
29 #include <linux/string.h>
30 #include <linux/time.h>
31 #include <linux/vmalloc.h>
32 #include <linux/workqueue.h>
34 #include <asm/byteorder.h>
37 #ifdef CONFIG_PPC_PMAC
38 #include <asm/pmac_feature.h>
44 #define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
45 #define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
46 #define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
48 #define DESCRIPTOR_OUTPUT_MORE 0
49 #define DESCRIPTOR_OUTPUT_LAST (1 << 12)
50 #define DESCRIPTOR_INPUT_MORE (2 << 12)
51 #define DESCRIPTOR_INPUT_LAST (3 << 12)
52 #define DESCRIPTOR_STATUS (1 << 11)
53 #define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
54 #define DESCRIPTOR_PING (1 << 7)
55 #define DESCRIPTOR_YY (1 << 6)
56 #define DESCRIPTOR_NO_IRQ (0 << 4)
57 #define DESCRIPTOR_IRQ_ERROR (1 << 4)
58 #define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
59 #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
60 #define DESCRIPTOR_WAIT (3 << 0)
62 #define DESCRIPTOR_CMD (0xf << 12)
68 __le32 branch_address;
70 __le16 transfer_status;
71 } __attribute__((aligned(16)));
73 #define CONTROL_SET(regs) (regs)
74 #define CONTROL_CLEAR(regs) ((regs) + 4)
75 #define COMMAND_PTR(regs) ((regs) + 12)
76 #define CONTEXT_MATCH(regs) ((regs) + 16)
78 #define AR_BUFFER_SIZE (32*1024)
79 #define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
80 /* we need at least two pages for proper list management */
81 #define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
83 #define MAX_ASYNC_PAYLOAD 4096
84 #define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
85 #define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
89 struct page *pages[AR_BUFFERS];
91 struct descriptor *descriptors;
92 dma_addr_t descriptors_bus;
94 unsigned int last_buffer_index;
96 struct tasklet_struct tasklet;
101 typedef int (*descriptor_callback_t)(struct context *ctx,
102 struct descriptor *d,
103 struct descriptor *last);
106 * A buffer that contains a block of DMA-able coherent memory used for
107 * storing a portion of a DMA descriptor program.
109 struct descriptor_buffer {
110 struct list_head list;
111 dma_addr_t buffer_bus;
114 struct descriptor buffer[];
118 struct fw_ohci *ohci;
120 int total_allocation;
126 * List of page-sized buffers for storing DMA descriptors.
127 * Head of list contains buffers in use and tail of list contains
130 struct list_head buffer_list;
133 * Pointer to a buffer inside buffer_list that contains the tail
134 * end of the current DMA program.
136 struct descriptor_buffer *buffer_tail;
139 * The descriptor containing the branch address of the first
140 * descriptor that has not yet been filled by the device.
142 struct descriptor *last;
145 * The last descriptor block in the DMA program. It contains the branch
146 * address that must be updated upon appending a new descriptor.
148 struct descriptor *prev;
151 descriptor_callback_t callback;
153 struct tasklet_struct tasklet;
156 #define IT_HEADER_SY(v) ((v) << 0)
157 #define IT_HEADER_TCODE(v) ((v) << 4)
158 #define IT_HEADER_CHANNEL(v) ((v) << 8)
159 #define IT_HEADER_TAG(v) ((v) << 14)
160 #define IT_HEADER_SPEED(v) ((v) << 16)
161 #define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
164 struct fw_iso_context base;
165 struct context context;
167 size_t header_length;
168 unsigned long flushing_completions;
176 #define CONFIG_ROM_SIZE 1024
181 __iomem char *registers;
184 int request_generation; /* for timestamping incoming requests */
186 unsigned int pri_req_max;
188 bool bus_time_running;
190 bool csr_state_setclear_abdicate;
194 * Spinlock for accessing fw_ohci data. Never call out of
195 * this driver with this lock held.
199 struct mutex phy_reg_mutex;
202 dma_addr_t misc_buffer_bus;
204 struct ar_context ar_request_ctx;
205 struct ar_context ar_response_ctx;
206 struct context at_request_ctx;
207 struct context at_response_ctx;
209 u32 it_context_support;
210 u32 it_context_mask; /* unoccupied IT contexts */
211 struct iso_context *it_context_list;
212 u64 ir_context_channels; /* unoccupied channels */
213 u32 ir_context_support;
214 u32 ir_context_mask; /* unoccupied IR contexts */
215 struct iso_context *ir_context_list;
216 u64 mc_channels; /* channels in use by the multichannel IR context */
220 dma_addr_t config_rom_bus;
221 __be32 *next_config_rom;
222 dma_addr_t next_config_rom_bus;
226 dma_addr_t self_id_bus;
227 struct work_struct bus_reset_work;
229 u32 self_id_buffer[512];
232 static struct workqueue_struct *selfid_workqueue;
234 static inline struct fw_ohci *fw_ohci(struct fw_card *card)
236 return container_of(card, struct fw_ohci, card);
239 #define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
240 #define IR_CONTEXT_BUFFER_FILL 0x80000000
241 #define IR_CONTEXT_ISOCH_HEADER 0x40000000
242 #define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
243 #define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
244 #define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
246 #define CONTEXT_RUN 0x8000
247 #define CONTEXT_WAKE 0x1000
248 #define CONTEXT_DEAD 0x0800
249 #define CONTEXT_ACTIVE 0x0400
251 #define OHCI1394_MAX_AT_REQ_RETRIES 0xf
252 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
253 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
255 #define OHCI1394_REGISTER_SIZE 0x800
256 #define OHCI1394_PCI_HCI_Control 0x40
257 #define SELF_ID_BUF_SIZE 0x800
258 #define OHCI_TCODE_PHY_PACKET 0x0e
259 #define OHCI_VERSION_1_1 0x010010
261 static char ohci_driver_name[] = KBUILD_MODNAME;
263 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
264 #define PCI_DEVICE_ID_AGERE_FW643 0x5901
265 #define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
266 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
267 #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
268 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
269 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
270 #define PCI_DEVICE_ID_VIA_VT630X 0x3044
271 #define PCI_REV_ID_VIA_VT6306 0x46
272 #define PCI_DEVICE_ID_VIA_VT6315 0x3403
274 #define QUIRK_CYCLE_TIMER 0x1
275 #define QUIRK_RESET_PACKET 0x2
276 #define QUIRK_BE_HEADERS 0x4
277 #define QUIRK_NO_1394A 0x8
278 #define QUIRK_NO_MSI 0x10
279 #define QUIRK_TI_SLLZ059 0x20
280 #define QUIRK_IR_WAKE 0x40
282 // On PCI Express Root Complex in any type of AMD Ryzen machine, VIA VT6306/6307/6308 with Asmedia
283 // ASM1083/1085 brings an inconvenience that the read accesses to 'Isochronous Cycle Timer' register
284 // (at offset 0xf0 in PCI I/O space) often causes unexpected system reboot. The mechanism is not
285 // clear, since the read access to the other registers is enough safe; e.g. 'Node ID' register,
286 // while it is probable due to detection of any type of PCIe error.
287 #define QUIRK_REBOOT_BY_CYCLE_TIMER_READ 0x80000000
289 #if IS_ENABLED(CONFIG_X86)
291 static bool has_reboot_by_cycle_timer_read_quirk(const struct fw_ohci *ohci)
293 return !!(ohci->quirks & QUIRK_REBOOT_BY_CYCLE_TIMER_READ);
296 #define PCI_DEVICE_ID_ASMEDIA_ASM108X 0x1080
298 static bool detect_vt630x_with_asm1083_on_amd_ryzen_machine(const struct pci_dev *pdev)
300 const struct pci_dev *pcie_to_pci_bridge;
302 // Detect any type of AMD Ryzen machine.
303 if (!static_cpu_has(X86_FEATURE_ZEN))
306 // Detect VIA VT6306/6307/6308.
307 if (pdev->vendor != PCI_VENDOR_ID_VIA)
309 if (pdev->device != PCI_DEVICE_ID_VIA_VT630X)
312 // Detect Asmedia ASM1083/1085.
313 pcie_to_pci_bridge = pdev->bus->self;
314 if (pcie_to_pci_bridge->vendor != PCI_VENDOR_ID_ASMEDIA)
316 if (pcie_to_pci_bridge->device != PCI_DEVICE_ID_ASMEDIA_ASM108X)
323 #define has_reboot_by_cycle_timer_read_quirk(ohci) false
324 #define detect_vt630x_with_asm1083_on_amd_ryzen_machine(pdev) false
327 /* In case of multiple matches in ohci_quirks[], only the first one is used. */
328 static const struct {
329 unsigned short vendor, device, revision, flags;
331 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
334 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
337 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
340 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
343 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
346 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
349 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
352 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
353 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
355 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
356 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
358 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
359 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
361 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
362 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
364 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
367 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
368 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
370 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
371 QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
373 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
376 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
377 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
380 /* This overrides anything that was found in ohci_quirks[]. */
381 static int param_quirks;
382 module_param_named(quirks, param_quirks, int, 0644);
383 MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
384 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
385 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
386 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
387 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
388 ", disable MSI = " __stringify(QUIRK_NO_MSI)
389 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
390 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
393 #define OHCI_PARAM_DEBUG_AT_AR 1
394 #define OHCI_PARAM_DEBUG_SELFIDS 2
395 #define OHCI_PARAM_DEBUG_IRQS 4
396 #define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
398 static int param_debug;
399 module_param_named(debug, param_debug, int, 0644);
400 MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
401 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
402 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
403 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
404 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
405 ", or a combination, or all = -1)");
407 static bool param_remote_dma;
408 module_param_named(remote_dma, param_remote_dma, bool, 0444);
409 MODULE_PARM_DESC(remote_dma, "Enable unfiltered remote DMA (default = N)");
411 static void log_irqs(struct fw_ohci *ohci, u32 evt)
413 if (likely(!(param_debug &
414 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
417 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
418 !(evt & OHCI1394_busReset))
421 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
422 evt & OHCI1394_selfIDComplete ? " selfID" : "",
423 evt & OHCI1394_RQPkt ? " AR_req" : "",
424 evt & OHCI1394_RSPkt ? " AR_resp" : "",
425 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
426 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
427 evt & OHCI1394_isochRx ? " IR" : "",
428 evt & OHCI1394_isochTx ? " IT" : "",
429 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
430 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
431 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
432 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
433 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
434 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
435 evt & OHCI1394_busReset ? " busReset" : "",
436 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
437 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
438 OHCI1394_respTxComplete | OHCI1394_isochRx |
439 OHCI1394_isochTx | OHCI1394_postedWriteErr |
440 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
441 OHCI1394_cycleInconsistent |
442 OHCI1394_regAccessFail | OHCI1394_busReset)
446 static const char *speed[] = {
447 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
449 static const char *power[] = {
450 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
451 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
453 static const char port[] = { '.', '-', 'p', 'c', };
455 static char _p(u32 *s, int shift)
457 return port[*s >> shift & 3];
460 static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
464 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
467 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
468 self_id_count, generation, ohci->node_id);
470 for (s = ohci->self_id_buffer; self_id_count--; ++s)
471 if ((*s & 1 << 23) == 0)
473 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
474 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
475 speed[*s >> 14 & 3], *s >> 16 & 63,
476 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
477 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
480 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
482 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
483 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
486 static const char *evts[] = {
487 [0x00] = "evt_no_status", [0x01] = "-reserved-",
488 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
489 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
490 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
491 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
492 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
493 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
494 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
495 [0x10] = "-reserved-", [0x11] = "ack_complete",
496 [0x12] = "ack_pending ", [0x13] = "-reserved-",
497 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
498 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
499 [0x18] = "-reserved-", [0x19] = "-reserved-",
500 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
501 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
502 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
503 [0x20] = "pending/cancelled",
505 static const char *tcodes[] = {
506 [0x0] = "QW req", [0x1] = "BW req",
507 [0x2] = "W resp", [0x3] = "-reserved-",
508 [0x4] = "QR req", [0x5] = "BR req",
509 [0x6] = "QR resp", [0x7] = "BR resp",
510 [0x8] = "cycle start", [0x9] = "Lk req",
511 [0xa] = "async stream packet", [0xb] = "Lk resp",
512 [0xc] = "-reserved-", [0xd] = "-reserved-",
513 [0xe] = "link internal", [0xf] = "-reserved-",
516 static void log_ar_at_event(struct fw_ohci *ohci,
517 char dir, int speed, u32 *header, int evt)
519 int tcode = header[0] >> 4 & 0xf;
522 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
525 if (unlikely(evt >= ARRAY_SIZE(evts)))
528 if (evt == OHCI1394_evt_bus_reset) {
529 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
530 dir, (header[2] >> 16) & 0xff);
535 case 0x0: case 0x6: case 0x8:
536 snprintf(specific, sizeof(specific), " = %08x",
537 be32_to_cpu((__force __be32)header[3]));
539 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
540 snprintf(specific, sizeof(specific), " %x,%x",
541 header[3] >> 16, header[3] & 0xffff);
549 ohci_notice(ohci, "A%c %s, %s\n",
550 dir, evts[evt], tcodes[tcode]);
553 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
554 dir, evts[evt], header[1], header[2]);
556 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
558 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
559 dir, speed, header[0] >> 10 & 0x3f,
560 header[1] >> 16, header[0] >> 16, evts[evt],
561 tcodes[tcode], header[1] & 0xffff, header[2], specific);
565 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
566 dir, speed, header[0] >> 10 & 0x3f,
567 header[1] >> 16, header[0] >> 16, evts[evt],
568 tcodes[tcode], specific);
572 static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
574 writel(data, ohci->registers + offset);
577 static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
579 return readl(ohci->registers + offset);
582 static inline void flush_writes(const struct fw_ohci *ohci)
584 /* Do a dummy read to flush writes. */
585 reg_read(ohci, OHCI1394_Version);
589 * Beware! read_phy_reg(), write_phy_reg(), update_phy_reg(), and
590 * read_paged_phy_reg() require the caller to hold ohci->phy_reg_mutex.
591 * In other words, only use ohci_read_phy_reg() and ohci_update_phy_reg()
592 * directly. Exceptions are intrinsically serialized contexts like pci_probe.
594 static int read_phy_reg(struct fw_ohci *ohci, int addr)
599 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
600 for (i = 0; i < 3 + 100; i++) {
601 val = reg_read(ohci, OHCI1394_PhyControl);
603 return -ENODEV; /* Card was ejected. */
605 if (val & OHCI1394_PhyControl_ReadDone)
606 return OHCI1394_PhyControl_ReadData(val);
609 * Try a few times without waiting. Sleeping is necessary
610 * only when the link/PHY interface is busy.
615 ohci_err(ohci, "failed to read phy reg %d\n", addr);
621 static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
625 reg_write(ohci, OHCI1394_PhyControl,
626 OHCI1394_PhyControl_Write(addr, val));
627 for (i = 0; i < 3 + 100; i++) {
628 val = reg_read(ohci, OHCI1394_PhyControl);
630 return -ENODEV; /* Card was ejected. */
632 if (!(val & OHCI1394_PhyControl_WritePending))
638 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
644 static int update_phy_reg(struct fw_ohci *ohci, int addr,
645 int clear_bits, int set_bits)
647 int ret = read_phy_reg(ohci, addr);
652 * The interrupt status bits are cleared by writing a one bit.
653 * Avoid clearing them unless explicitly requested in set_bits.
656 clear_bits |= PHY_INT_STATUS_BITS;
658 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
661 static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
665 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
669 return read_phy_reg(ohci, addr);
672 static int ohci_read_phy_reg(struct fw_card *card, int addr)
674 struct fw_ohci *ohci = fw_ohci(card);
677 mutex_lock(&ohci->phy_reg_mutex);
678 ret = read_phy_reg(ohci, addr);
679 mutex_unlock(&ohci->phy_reg_mutex);
684 static int ohci_update_phy_reg(struct fw_card *card, int addr,
685 int clear_bits, int set_bits)
687 struct fw_ohci *ohci = fw_ohci(card);
690 mutex_lock(&ohci->phy_reg_mutex);
691 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
692 mutex_unlock(&ohci->phy_reg_mutex);
697 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
699 return page_private(ctx->pages[i]);
702 static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
704 struct descriptor *d;
706 d = &ctx->descriptors[index];
707 d->branch_address &= cpu_to_le32(~0xf);
708 d->res_count = cpu_to_le16(PAGE_SIZE);
709 d->transfer_status = 0;
711 wmb(); /* finish init of new descriptors before branch_address update */
712 d = &ctx->descriptors[ctx->last_buffer_index];
713 d->branch_address |= cpu_to_le32(1);
715 ctx->last_buffer_index = index;
717 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
720 static void ar_context_release(struct ar_context *ctx)
722 struct device *dev = ctx->ohci->card.device;
730 for (i = 0; i < AR_BUFFERS; i++) {
732 dma_free_pages(dev, PAGE_SIZE, ctx->pages[i],
733 ar_buffer_bus(ctx, i), DMA_FROM_DEVICE);
737 static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
739 struct fw_ohci *ohci = ctx->ohci;
741 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
742 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
745 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
747 /* FIXME: restart? */
750 static inline unsigned int ar_next_buffer_index(unsigned int index)
752 return (index + 1) % AR_BUFFERS;
755 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
757 return ar_next_buffer_index(ctx->last_buffer_index);
761 * We search for the buffer that contains the last AR packet DMA data written
764 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
765 unsigned int *buffer_offset)
767 unsigned int i, next_i, last = ctx->last_buffer_index;
768 __le16 res_count, next_res_count;
770 i = ar_first_buffer_index(ctx);
771 res_count = READ_ONCE(ctx->descriptors[i].res_count);
773 /* A buffer that is not yet completely filled must be the last one. */
774 while (i != last && res_count == 0) {
776 /* Peek at the next descriptor. */
777 next_i = ar_next_buffer_index(i);
778 rmb(); /* read descriptors in order */
779 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
781 * If the next descriptor is still empty, we must stop at this
784 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
786 * The exception is when the DMA data for one packet is
787 * split over three buffers; in this case, the middle
788 * buffer's descriptor might be never updated by the
789 * controller and look still empty, and we have to peek
792 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
793 next_i = ar_next_buffer_index(next_i);
795 next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count);
796 if (next_res_count != cpu_to_le16(PAGE_SIZE))
797 goto next_buffer_is_active;
803 next_buffer_is_active:
805 res_count = next_res_count;
808 rmb(); /* read res_count before the DMA data */
810 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
811 if (*buffer_offset > PAGE_SIZE) {
813 ar_context_abort(ctx, "corrupted descriptor");
819 static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
820 unsigned int end_buffer_index,
821 unsigned int end_buffer_offset)
825 i = ar_first_buffer_index(ctx);
826 while (i != end_buffer_index) {
827 dma_sync_single_for_cpu(ctx->ohci->card.device,
828 ar_buffer_bus(ctx, i),
829 PAGE_SIZE, DMA_FROM_DEVICE);
830 i = ar_next_buffer_index(i);
832 if (end_buffer_offset > 0)
833 dma_sync_single_for_cpu(ctx->ohci->card.device,
834 ar_buffer_bus(ctx, i),
835 end_buffer_offset, DMA_FROM_DEVICE);
838 #if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
839 #define cond_le32_to_cpu(v) \
840 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
842 #define cond_le32_to_cpu(v) le32_to_cpu(v)
845 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
847 struct fw_ohci *ohci = ctx->ohci;
849 u32 status, length, tcode;
852 p.header[0] = cond_le32_to_cpu(buffer[0]);
853 p.header[1] = cond_le32_to_cpu(buffer[1]);
854 p.header[2] = cond_le32_to_cpu(buffer[2]);
856 tcode = (p.header[0] >> 4) & 0x0f;
858 case TCODE_WRITE_QUADLET_REQUEST:
859 case TCODE_READ_QUADLET_RESPONSE:
860 p.header[3] = (__force __u32) buffer[3];
861 p.header_length = 16;
862 p.payload_length = 0;
865 case TCODE_READ_BLOCK_REQUEST :
866 p.header[3] = cond_le32_to_cpu(buffer[3]);
867 p.header_length = 16;
868 p.payload_length = 0;
871 case TCODE_WRITE_BLOCK_REQUEST:
872 case TCODE_READ_BLOCK_RESPONSE:
873 case TCODE_LOCK_REQUEST:
874 case TCODE_LOCK_RESPONSE:
875 p.header[3] = cond_le32_to_cpu(buffer[3]);
876 p.header_length = 16;
877 p.payload_length = p.header[3] >> 16;
878 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
879 ar_context_abort(ctx, "invalid packet length");
884 case TCODE_WRITE_RESPONSE:
885 case TCODE_READ_QUADLET_REQUEST:
886 case OHCI_TCODE_PHY_PACKET:
887 p.header_length = 12;
888 p.payload_length = 0;
892 ar_context_abort(ctx, "invalid tcode");
896 p.payload = (void *) buffer + p.header_length;
898 /* FIXME: What to do about evt_* errors? */
899 length = (p.header_length + p.payload_length + 3) / 4;
900 status = cond_le32_to_cpu(buffer[length]);
901 evt = (status >> 16) & 0x1f;
904 p.speed = (status >> 21) & 0x7;
905 p.timestamp = status & 0xffff;
906 p.generation = ohci->request_generation;
908 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
911 * Several controllers, notably from NEC and VIA, forget to
912 * write ack_complete status at PHY packet reception.
914 if (evt == OHCI1394_evt_no_status &&
915 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
916 p.ack = ACK_COMPLETE;
919 * The OHCI bus reset handler synthesizes a PHY packet with
920 * the new generation number when a bus reset happens (see
921 * section 8.4.2.3). This helps us determine when a request
922 * was received and make sure we send the response in the same
923 * generation. We only need this for requests; for responses
924 * we use the unique tlabel for finding the matching
927 * Alas some chips sometimes emit bus reset packets with a
928 * wrong generation. We set the correct generation for these
929 * at a slightly incorrect time (in bus_reset_work).
931 if (evt == OHCI1394_evt_bus_reset) {
932 if (!(ohci->quirks & QUIRK_RESET_PACKET))
933 ohci->request_generation = (p.header[2] >> 16) & 0xff;
934 } else if (ctx == &ohci->ar_request_ctx) {
935 fw_core_handle_request(&ohci->card, &p);
937 fw_core_handle_response(&ohci->card, &p);
940 return buffer + length + 1;
943 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
948 next = handle_ar_packet(ctx, p);
957 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
961 i = ar_first_buffer_index(ctx);
962 while (i != end_buffer) {
963 dma_sync_single_for_device(ctx->ohci->card.device,
964 ar_buffer_bus(ctx, i),
965 PAGE_SIZE, DMA_FROM_DEVICE);
966 ar_context_link_page(ctx, i);
967 i = ar_next_buffer_index(i);
971 static void ar_context_tasklet(unsigned long data)
973 struct ar_context *ctx = (struct ar_context *)data;
974 unsigned int end_buffer_index, end_buffer_offset;
981 end_buffer_index = ar_search_last_active_buffer(ctx,
983 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
984 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
986 if (end_buffer_index < ar_first_buffer_index(ctx)) {
988 * The filled part of the overall buffer wraps around; handle
989 * all packets up to the buffer end here. If the last packet
990 * wraps around, its tail will be visible after the buffer end
991 * because the buffer start pages are mapped there again.
993 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
994 p = handle_ar_packets(ctx, p, buffer_end);
997 /* adjust p to point back into the actual buffer */
998 p -= AR_BUFFERS * PAGE_SIZE;
1001 p = handle_ar_packets(ctx, p, end);
1004 ar_context_abort(ctx, "inconsistent descriptor");
1009 ar_recycle_buffers(ctx, end_buffer_index);
1014 ctx->pointer = NULL;
1017 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
1018 unsigned int descriptors_offset, u32 regs)
1020 struct device *dev = ohci->card.device;
1022 dma_addr_t dma_addr;
1023 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
1024 struct descriptor *d;
1028 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
1030 for (i = 0; i < AR_BUFFERS; i++) {
1031 ctx->pages[i] = dma_alloc_pages(dev, PAGE_SIZE, &dma_addr,
1032 DMA_FROM_DEVICE, GFP_KERNEL);
1035 set_page_private(ctx->pages[i], dma_addr);
1036 dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE,
1040 for (i = 0; i < AR_BUFFERS; i++)
1041 pages[i] = ctx->pages[i];
1042 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1043 pages[AR_BUFFERS + i] = ctx->pages[i];
1044 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL);
1048 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1049 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1051 for (i = 0; i < AR_BUFFERS; i++) {
1052 d = &ctx->descriptors[i];
1053 d->req_count = cpu_to_le16(PAGE_SIZE);
1054 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1056 DESCRIPTOR_BRANCH_ALWAYS);
1057 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1058 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1059 ar_next_buffer_index(i) * sizeof(struct descriptor));
1065 ar_context_release(ctx);
1070 static void ar_context_run(struct ar_context *ctx)
1074 for (i = 0; i < AR_BUFFERS; i++)
1075 ar_context_link_page(ctx, i);
1077 ctx->pointer = ctx->buffer;
1079 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1080 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1083 static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1087 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1089 /* figure out which descriptor the branch address goes in */
1090 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1096 static void context_tasklet(unsigned long data)
1098 struct context *ctx = (struct context *) data;
1099 struct descriptor *d, *last;
1102 struct descriptor_buffer *desc;
1104 desc = list_entry(ctx->buffer_list.next,
1105 struct descriptor_buffer, list);
1107 while (last->branch_address != 0) {
1108 struct descriptor_buffer *old_desc = desc;
1109 address = le32_to_cpu(last->branch_address);
1112 ctx->current_bus = address;
1114 /* If the branch address points to a buffer outside of the
1115 * current buffer, advance to the next buffer. */
1116 if (address < desc->buffer_bus ||
1117 address >= desc->buffer_bus + desc->used)
1118 desc = list_entry(desc->list.next,
1119 struct descriptor_buffer, list);
1120 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1121 last = find_branch_descriptor(d, z);
1123 if (!ctx->callback(ctx, d, last))
1126 if (old_desc != desc) {
1127 /* If we've advanced to the next buffer, move the
1128 * previous buffer to the free list. */
1129 unsigned long flags;
1131 spin_lock_irqsave(&ctx->ohci->lock, flags);
1132 list_move_tail(&old_desc->list, &ctx->buffer_list);
1133 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1140 * Allocate a new buffer and add it to the list of free buffers for this
1141 * context. Must be called with ohci->lock held.
1143 static int context_add_buffer(struct context *ctx)
1145 struct descriptor_buffer *desc;
1146 dma_addr_t bus_addr;
1150 * 16MB of descriptors should be far more than enough for any DMA
1151 * program. This will catch run-away userspace or DoS attacks.
1153 if (ctx->total_allocation >= 16*1024*1024)
1156 desc = dmam_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, &bus_addr, GFP_ATOMIC);
1160 offset = (void *)&desc->buffer - (void *)desc;
1162 * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1163 * for descriptors, even 0x10-byte ones. This can cause page faults when
1164 * an IOMMU is in use and the oversized read crosses a page boundary.
1165 * Work around this by always leaving at least 0x10 bytes of padding.
1167 desc->buffer_size = PAGE_SIZE - offset - 0x10;
1168 desc->buffer_bus = bus_addr + offset;
1171 list_add_tail(&desc->list, &ctx->buffer_list);
1172 ctx->total_allocation += PAGE_SIZE;
1177 static int context_init(struct context *ctx, struct fw_ohci *ohci,
1178 u32 regs, descriptor_callback_t callback)
1182 ctx->total_allocation = 0;
1184 INIT_LIST_HEAD(&ctx->buffer_list);
1185 if (context_add_buffer(ctx) < 0)
1188 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1189 struct descriptor_buffer, list);
1191 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1192 ctx->callback = callback;
1195 * We put a dummy descriptor in the buffer that has a NULL
1196 * branch address and looks like it's been sent. That way we
1197 * have a descriptor to append DMA programs to.
1199 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1200 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1201 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1202 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1203 ctx->last = ctx->buffer_tail->buffer;
1204 ctx->prev = ctx->buffer_tail->buffer;
1210 static void context_release(struct context *ctx)
1212 struct fw_card *card = &ctx->ohci->card;
1213 struct descriptor_buffer *desc, *tmp;
1215 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) {
1216 dmam_free_coherent(card->device, PAGE_SIZE, desc,
1217 desc->buffer_bus - ((void *)&desc->buffer - (void *)desc));
1221 /* Must be called with ohci->lock held */
1222 static struct descriptor *context_get_descriptors(struct context *ctx,
1223 int z, dma_addr_t *d_bus)
1225 struct descriptor *d = NULL;
1226 struct descriptor_buffer *desc = ctx->buffer_tail;
1228 if (z * sizeof(*d) > desc->buffer_size)
1231 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1232 /* No room for the descriptor in this buffer, so advance to the
1235 if (desc->list.next == &ctx->buffer_list) {
1236 /* If there is no free buffer next in the list,
1238 if (context_add_buffer(ctx) < 0)
1241 desc = list_entry(desc->list.next,
1242 struct descriptor_buffer, list);
1243 ctx->buffer_tail = desc;
1246 d = desc->buffer + desc->used / sizeof(*d);
1247 memset(d, 0, z * sizeof(*d));
1248 *d_bus = desc->buffer_bus + desc->used;
1253 static void context_run(struct context *ctx, u32 extra)
1255 struct fw_ohci *ohci = ctx->ohci;
1257 reg_write(ohci, COMMAND_PTR(ctx->regs),
1258 le32_to_cpu(ctx->last->branch_address));
1259 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1260 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1261 ctx->running = true;
1265 static void context_append(struct context *ctx,
1266 struct descriptor *d, int z, int extra)
1269 struct descriptor_buffer *desc = ctx->buffer_tail;
1270 struct descriptor *d_branch;
1272 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1274 desc->used += (z + extra) * sizeof(*d);
1276 wmb(); /* finish init of new descriptors before branch_address update */
1278 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1279 d_branch->branch_address = cpu_to_le32(d_bus | z);
1282 * VT6306 incorrectly checks only the single descriptor at the
1283 * CommandPtr when the wake bit is written, so if it's a
1284 * multi-descriptor block starting with an INPUT_MORE, put a copy of
1285 * the branch address in the first descriptor.
1287 * Not doing this for transmit contexts since not sure how it interacts
1288 * with skip addresses.
1290 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1291 d_branch != ctx->prev &&
1292 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1293 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1294 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1301 static void context_stop(struct context *ctx)
1303 struct fw_ohci *ohci = ctx->ohci;
1307 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1308 ctx->running = false;
1310 for (i = 0; i < 1000; i++) {
1311 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1312 if ((reg & CONTEXT_ACTIVE) == 0)
1318 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1321 struct driver_data {
1323 struct fw_packet *packet;
1327 * This function apppends a packet to the DMA queue for transmission.
1328 * Must always be called with the ochi->lock held to ensure proper
1329 * generation handling and locking around packet queue manipulation.
1331 static int at_context_queue_packet(struct context *ctx,
1332 struct fw_packet *packet)
1334 struct fw_ohci *ohci = ctx->ohci;
1335 dma_addr_t d_bus, payload_bus;
1336 struct driver_data *driver_data;
1337 struct descriptor *d, *last;
1341 d = context_get_descriptors(ctx, 4, &d_bus);
1343 packet->ack = RCODE_SEND_ERROR;
1347 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1348 d[0].res_count = cpu_to_le16(packet->timestamp);
1351 * The DMA format for asynchronous link packets is different
1352 * from the IEEE1394 layout, so shift the fields around
1356 tcode = (packet->header[0] >> 4) & 0x0f;
1357 header = (__le32 *) &d[1];
1359 case TCODE_WRITE_QUADLET_REQUEST:
1360 case TCODE_WRITE_BLOCK_REQUEST:
1361 case TCODE_WRITE_RESPONSE:
1362 case TCODE_READ_QUADLET_REQUEST:
1363 case TCODE_READ_BLOCK_REQUEST:
1364 case TCODE_READ_QUADLET_RESPONSE:
1365 case TCODE_READ_BLOCK_RESPONSE:
1366 case TCODE_LOCK_REQUEST:
1367 case TCODE_LOCK_RESPONSE:
1368 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1369 (packet->speed << 16));
1370 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1371 (packet->header[0] & 0xffff0000));
1372 header[2] = cpu_to_le32(packet->header[2]);
1374 if (TCODE_IS_BLOCK_PACKET(tcode))
1375 header[3] = cpu_to_le32(packet->header[3]);
1377 header[3] = (__force __le32) packet->header[3];
1379 d[0].req_count = cpu_to_le16(packet->header_length);
1382 case TCODE_LINK_INTERNAL:
1383 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1384 (packet->speed << 16));
1385 header[1] = cpu_to_le32(packet->header[1]);
1386 header[2] = cpu_to_le32(packet->header[2]);
1387 d[0].req_count = cpu_to_le16(12);
1389 if (is_ping_packet(&packet->header[1]))
1390 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1393 case TCODE_STREAM_DATA:
1394 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1395 (packet->speed << 16));
1396 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1397 d[0].req_count = cpu_to_le16(8);
1402 packet->ack = RCODE_SEND_ERROR;
1406 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1407 driver_data = (struct driver_data *) &d[3];
1408 driver_data->packet = packet;
1409 packet->driver_data = driver_data;
1411 if (packet->payload_length > 0) {
1412 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1413 payload_bus = dma_map_single(ohci->card.device,
1415 packet->payload_length,
1417 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1418 packet->ack = RCODE_SEND_ERROR;
1421 packet->payload_bus = payload_bus;
1422 packet->payload_mapped = true;
1424 memcpy(driver_data->inline_data, packet->payload,
1425 packet->payload_length);
1426 payload_bus = d_bus + 3 * sizeof(*d);
1429 d[2].req_count = cpu_to_le16(packet->payload_length);
1430 d[2].data_address = cpu_to_le32(payload_bus);
1438 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1439 DESCRIPTOR_IRQ_ALWAYS |
1440 DESCRIPTOR_BRANCH_ALWAYS);
1442 /* FIXME: Document how the locking works. */
1443 if (ohci->generation != packet->generation) {
1444 if (packet->payload_mapped)
1445 dma_unmap_single(ohci->card.device, payload_bus,
1446 packet->payload_length, DMA_TO_DEVICE);
1447 packet->ack = RCODE_GENERATION;
1451 context_append(ctx, d, z, 4 - z);
1454 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1456 context_run(ctx, 0);
1461 static void at_context_flush(struct context *ctx)
1463 tasklet_disable(&ctx->tasklet);
1465 ctx->flushing = true;
1466 context_tasklet((unsigned long)ctx);
1467 ctx->flushing = false;
1469 tasklet_enable(&ctx->tasklet);
1472 static int handle_at_packet(struct context *context,
1473 struct descriptor *d,
1474 struct descriptor *last)
1476 struct driver_data *driver_data;
1477 struct fw_packet *packet;
1478 struct fw_ohci *ohci = context->ohci;
1481 if (last->transfer_status == 0 && !context->flushing)
1482 /* This descriptor isn't done yet, stop iteration. */
1485 driver_data = (struct driver_data *) &d[3];
1486 packet = driver_data->packet;
1488 /* This packet was cancelled, just continue. */
1491 if (packet->payload_mapped)
1492 dma_unmap_single(ohci->card.device, packet->payload_bus,
1493 packet->payload_length, DMA_TO_DEVICE);
1495 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1496 packet->timestamp = le16_to_cpu(last->res_count);
1498 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1501 case OHCI1394_evt_timeout:
1502 /* Async response transmit timed out. */
1503 packet->ack = RCODE_CANCELLED;
1506 case OHCI1394_evt_flushed:
1508 * The packet was flushed should give same error as
1509 * when we try to use a stale generation count.
1511 packet->ack = RCODE_GENERATION;
1514 case OHCI1394_evt_missing_ack:
1515 if (context->flushing)
1516 packet->ack = RCODE_GENERATION;
1519 * Using a valid (current) generation count, but the
1520 * node is not on the bus or not sending acks.
1522 packet->ack = RCODE_NO_ACK;
1526 case ACK_COMPLETE + 0x10:
1527 case ACK_PENDING + 0x10:
1528 case ACK_BUSY_X + 0x10:
1529 case ACK_BUSY_A + 0x10:
1530 case ACK_BUSY_B + 0x10:
1531 case ACK_DATA_ERROR + 0x10:
1532 case ACK_TYPE_ERROR + 0x10:
1533 packet->ack = evt - 0x10;
1536 case OHCI1394_evt_no_status:
1537 if (context->flushing) {
1538 packet->ack = RCODE_GENERATION;
1544 packet->ack = RCODE_SEND_ERROR;
1548 packet->callback(packet, &ohci->card, packet->ack);
1553 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1554 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1555 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1556 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1557 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1559 static void handle_local_rom(struct fw_ohci *ohci,
1560 struct fw_packet *packet, u32 csr)
1562 struct fw_packet response;
1563 int tcode, length, i;
1565 tcode = HEADER_GET_TCODE(packet->header[0]);
1566 if (TCODE_IS_BLOCK_PACKET(tcode))
1567 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1571 i = csr - CSR_CONFIG_ROM;
1572 if (i + length > CONFIG_ROM_SIZE) {
1573 fw_fill_response(&response, packet->header,
1574 RCODE_ADDRESS_ERROR, NULL, 0);
1575 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1576 fw_fill_response(&response, packet->header,
1577 RCODE_TYPE_ERROR, NULL, 0);
1579 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1580 (void *) ohci->config_rom + i, length);
1583 fw_core_handle_response(&ohci->card, &response);
1586 static void handle_local_lock(struct fw_ohci *ohci,
1587 struct fw_packet *packet, u32 csr)
1589 struct fw_packet response;
1590 int tcode, length, ext_tcode, sel, try;
1591 __be32 *payload, lock_old;
1592 u32 lock_arg, lock_data;
1594 tcode = HEADER_GET_TCODE(packet->header[0]);
1595 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1596 payload = packet->payload;
1597 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1599 if (tcode == TCODE_LOCK_REQUEST &&
1600 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1601 lock_arg = be32_to_cpu(payload[0]);
1602 lock_data = be32_to_cpu(payload[1]);
1603 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1607 fw_fill_response(&response, packet->header,
1608 RCODE_TYPE_ERROR, NULL, 0);
1612 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1613 reg_write(ohci, OHCI1394_CSRData, lock_data);
1614 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1615 reg_write(ohci, OHCI1394_CSRControl, sel);
1617 for (try = 0; try < 20; try++)
1618 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1619 lock_old = cpu_to_be32(reg_read(ohci,
1621 fw_fill_response(&response, packet->header,
1623 &lock_old, sizeof(lock_old));
1627 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1628 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1631 fw_core_handle_response(&ohci->card, &response);
1634 static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1638 if (ctx == &ctx->ohci->at_request_ctx) {
1639 packet->ack = ACK_PENDING;
1640 packet->callback(packet, &ctx->ohci->card, packet->ack);
1644 ((unsigned long long)
1645 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1647 csr = offset - CSR_REGISTER_BASE;
1649 /* Handle config rom reads. */
1650 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1651 handle_local_rom(ctx->ohci, packet, csr);
1653 case CSR_BUS_MANAGER_ID:
1654 case CSR_BANDWIDTH_AVAILABLE:
1655 case CSR_CHANNELS_AVAILABLE_HI:
1656 case CSR_CHANNELS_AVAILABLE_LO:
1657 handle_local_lock(ctx->ohci, packet, csr);
1660 if (ctx == &ctx->ohci->at_request_ctx)
1661 fw_core_handle_request(&ctx->ohci->card, packet);
1663 fw_core_handle_response(&ctx->ohci->card, packet);
1667 if (ctx == &ctx->ohci->at_response_ctx) {
1668 packet->ack = ACK_COMPLETE;
1669 packet->callback(packet, &ctx->ohci->card, packet->ack);
1673 static u32 get_cycle_time(struct fw_ohci *ohci);
1675 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1677 unsigned long flags;
1680 spin_lock_irqsave(&ctx->ohci->lock, flags);
1682 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1683 ctx->ohci->generation == packet->generation) {
1684 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1686 // Timestamping on behalf of the hardware.
1687 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1689 handle_local_request(ctx, packet);
1693 ret = at_context_queue_packet(ctx, packet);
1694 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1697 // Timestamping on behalf of the hardware.
1698 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ctx->ohci));
1700 packet->callback(packet, &ctx->ohci->card, packet->ack);
1704 static void detect_dead_context(struct fw_ohci *ohci,
1705 const char *name, unsigned int regs)
1709 ctl = reg_read(ohci, CONTROL_SET(regs));
1710 if (ctl & CONTEXT_DEAD)
1711 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1712 name, evts[ctl & 0x1f]);
1715 static void handle_dead_contexts(struct fw_ohci *ohci)
1720 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1721 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1722 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1723 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1724 for (i = 0; i < 32; ++i) {
1725 if (!(ohci->it_context_support & (1 << i)))
1727 sprintf(name, "IT%u", i);
1728 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1730 for (i = 0; i < 32; ++i) {
1731 if (!(ohci->ir_context_support & (1 << i)))
1733 sprintf(name, "IR%u", i);
1734 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1736 /* TODO: maybe try to flush and restart the dead contexts */
1739 static u32 cycle_timer_ticks(u32 cycle_timer)
1743 ticks = cycle_timer & 0xfff;
1744 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1745 ticks += (3072 * 8000) * (cycle_timer >> 25);
1751 * Some controllers exhibit one or more of the following bugs when updating the
1752 * iso cycle timer register:
1753 * - When the lowest six bits are wrapping around to zero, a read that happens
1754 * at the same time will return garbage in the lowest ten bits.
1755 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1756 * not incremented for about 60 ns.
1757 * - Occasionally, the entire register reads zero.
1759 * To catch these, we read the register three times and ensure that the
1760 * difference between each two consecutive reads is approximately the same, i.e.
1761 * less than twice the other. Furthermore, any negative difference indicates an
1762 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1763 * execute, so we have enough precision to compute the ratio of the differences.)
1765 static u32 get_cycle_time(struct fw_ohci *ohci)
1772 if (has_reboot_by_cycle_timer_read_quirk(ohci))
1775 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1777 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1780 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1784 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1785 t0 = cycle_timer_ticks(c0);
1786 t1 = cycle_timer_ticks(c1);
1787 t2 = cycle_timer_ticks(c2);
1790 } while ((diff01 <= 0 || diff12 <= 0 ||
1791 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1799 * This function has to be called at least every 64 seconds. The bus_time
1800 * field stores not only the upper 25 bits of the BUS_TIME register but also
1801 * the most significant bit of the cycle timer in bit 6 so that we can detect
1802 * changes in this bit.
1804 static u32 update_bus_time(struct fw_ohci *ohci)
1806 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1808 if (unlikely(!ohci->bus_time_running)) {
1809 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1810 ohci->bus_time = (lower_32_bits(ktime_get_seconds()) & ~0x7f) |
1811 (cycle_time_seconds & 0x40);
1812 ohci->bus_time_running = true;
1815 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1816 ohci->bus_time += 0x40;
1818 return ohci->bus_time | cycle_time_seconds;
1821 static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1825 mutex_lock(&ohci->phy_reg_mutex);
1826 reg = write_phy_reg(ohci, 7, port_index);
1828 reg = read_phy_reg(ohci, 8);
1829 mutex_unlock(&ohci->phy_reg_mutex);
1833 switch (reg & 0x0f) {
1835 return 2; /* is child node (connected to parent node) */
1837 return 3; /* is parent node (connected to child node) */
1839 return 1; /* not connected */
1842 static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1848 for (i = 0; i < self_id_count; i++) {
1849 entry = ohci->self_id_buffer[i];
1850 if ((self_id & 0xff000000) == (entry & 0xff000000))
1852 if ((self_id & 0xff000000) < (entry & 0xff000000))
1858 static int initiated_reset(struct fw_ohci *ohci)
1863 mutex_lock(&ohci->phy_reg_mutex);
1864 reg = write_phy_reg(ohci, 7, 0xe0); /* Select page 7 */
1866 reg = read_phy_reg(ohci, 8);
1868 reg = write_phy_reg(ohci, 8, reg); /* set PMODE bit */
1870 reg = read_phy_reg(ohci, 12); /* read register 12 */
1872 if ((reg & 0x08) == 0x08) {
1873 /* bit 3 indicates "initiated reset" */
1879 mutex_unlock(&ohci->phy_reg_mutex);
1884 * TI TSB82AA2B and TSB12LV26 do not receive the selfID of a locally
1885 * attached TSB41BA3D phy; see http://www.ti.com/litv/pdf/sllz059.
1886 * Construct the selfID from phy register contents.
1888 static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1890 int reg, i, pos, status;
1891 /* link active 1, speed 3, bridge 0, contender 1, more packets 0 */
1892 u32 self_id = 0x8040c800;
1894 reg = reg_read(ohci, OHCI1394_NodeID);
1895 if (!(reg & OHCI1394_NodeID_idValid)) {
1897 "node ID not valid, new bus reset in progress\n");
1900 self_id |= ((reg & 0x3f) << 24); /* phy ID */
1902 reg = ohci_read_phy_reg(&ohci->card, 4);
1905 self_id |= ((reg & 0x07) << 8); /* power class */
1907 reg = ohci_read_phy_reg(&ohci->card, 1);
1910 self_id |= ((reg & 0x3f) << 16); /* gap count */
1912 for (i = 0; i < 3; i++) {
1913 status = get_status_for_port(ohci, i);
1916 self_id |= ((status & 0x3) << (6 - (i * 2)));
1919 self_id |= initiated_reset(ohci);
1921 pos = get_self_id_pos(ohci, self_id, self_id_count);
1923 memmove(&(ohci->self_id_buffer[pos+1]),
1924 &(ohci->self_id_buffer[pos]),
1925 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1926 ohci->self_id_buffer[pos] = self_id;
1929 return self_id_count;
1932 static void bus_reset_work(struct work_struct *work)
1934 struct fw_ohci *ohci =
1935 container_of(work, struct fw_ohci, bus_reset_work);
1936 int self_id_count, generation, new_generation, i, j;
1938 void *free_rom = NULL;
1939 dma_addr_t free_rom_bus = 0;
1942 reg = reg_read(ohci, OHCI1394_NodeID);
1943 if (!(reg & OHCI1394_NodeID_idValid)) {
1945 "node ID not valid, new bus reset in progress\n");
1948 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1949 ohci_notice(ohci, "malconfigured bus\n");
1952 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1953 OHCI1394_NodeID_nodeNumber);
1955 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1956 if (!(ohci->is_root && is_new_root))
1957 reg_write(ohci, OHCI1394_LinkControlSet,
1958 OHCI1394_LinkControl_cycleMaster);
1959 ohci->is_root = is_new_root;
1961 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1962 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1963 ohci_notice(ohci, "self ID receive error\n");
1967 * The count in the SelfIDCount register is the number of
1968 * bytes in the self ID receive buffer. Since we also receive
1969 * the inverted quadlets and a header quadlet, we shift one
1970 * bit extra to get the actual number of self IDs.
1972 self_id_count = (reg >> 3) & 0xff;
1974 if (self_id_count > 252) {
1975 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1979 generation = (cond_le32_to_cpu(ohci->self_id[0]) >> 16) & 0xff;
1982 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1983 u32 id = cond_le32_to_cpu(ohci->self_id[i]);
1984 u32 id2 = cond_le32_to_cpu(ohci->self_id[i + 1]);
1988 * If the invalid data looks like a cycle start packet,
1989 * it's likely to be the result of the cycle master
1990 * having a wrong gap count. In this case, the self IDs
1991 * so far are valid and should be processed so that the
1992 * bus manager can then correct the gap count.
1994 if (id == 0xffff008f) {
1995 ohci_notice(ohci, "ignoring spurious self IDs\n");
2000 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
2001 j, self_id_count, id, id2);
2004 ohci->self_id_buffer[j] = id;
2007 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2008 self_id_count = find_and_insert_self_id(ohci, self_id_count);
2009 if (self_id_count < 0) {
2011 "could not construct local self ID\n");
2016 if (self_id_count == 0) {
2017 ohci_notice(ohci, "no self IDs\n");
2023 * Check the consistency of the self IDs we just read. The
2024 * problem we face is that a new bus reset can start while we
2025 * read out the self IDs from the DMA buffer. If this happens,
2026 * the DMA buffer will be overwritten with new self IDs and we
2027 * will read out inconsistent data. The OHCI specification
2028 * (section 11.2) recommends a technique similar to
2029 * linux/seqlock.h, where we remember the generation of the
2030 * self IDs in the buffer before reading them out and compare
2031 * it to the current generation after reading them out. If
2032 * the two generations match we know we have a consistent set
2036 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
2037 if (new_generation != generation) {
2038 ohci_notice(ohci, "new bus reset, discarding self ids\n");
2042 /* FIXME: Document how the locking works. */
2043 spin_lock_irq(&ohci->lock);
2045 ohci->generation = -1; /* prevent AT packet queueing */
2046 context_stop(&ohci->at_request_ctx);
2047 context_stop(&ohci->at_response_ctx);
2049 spin_unlock_irq(&ohci->lock);
2052 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
2053 * packets in the AT queues and software needs to drain them.
2054 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
2056 at_context_flush(&ohci->at_request_ctx);
2057 at_context_flush(&ohci->at_response_ctx);
2059 spin_lock_irq(&ohci->lock);
2061 ohci->generation = generation;
2062 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2064 if (ohci->quirks & QUIRK_RESET_PACKET)
2065 ohci->request_generation = generation;
2068 * This next bit is unrelated to the AT context stuff but we
2069 * have to do it under the spinlock also. If a new config rom
2070 * was set up before this reset, the old one is now no longer
2071 * in use and we can free it. Update the config rom pointers
2072 * to point to the current config rom and clear the
2073 * next_config_rom pointer so a new update can take place.
2076 if (ohci->next_config_rom != NULL) {
2077 if (ohci->next_config_rom != ohci->config_rom) {
2078 free_rom = ohci->config_rom;
2079 free_rom_bus = ohci->config_rom_bus;
2081 ohci->config_rom = ohci->next_config_rom;
2082 ohci->config_rom_bus = ohci->next_config_rom_bus;
2083 ohci->next_config_rom = NULL;
2086 * Restore config_rom image and manually update
2087 * config_rom registers. Writing the header quadlet
2088 * will indicate that the config rom is ready, so we
2091 reg_write(ohci, OHCI1394_BusOptions,
2092 be32_to_cpu(ohci->config_rom[2]));
2093 ohci->config_rom[0] = ohci->next_header;
2094 reg_write(ohci, OHCI1394_ConfigROMhdr,
2095 be32_to_cpu(ohci->next_header));
2098 if (param_remote_dma) {
2099 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2100 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2103 spin_unlock_irq(&ohci->lock);
2106 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, free_rom, free_rom_bus);
2108 log_selfids(ohci, generation, self_id_count);
2110 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2111 self_id_count, ohci->self_id_buffer,
2112 ohci->csr_state_setclear_abdicate);
2113 ohci->csr_state_setclear_abdicate = false;
2116 static irqreturn_t irq_handler(int irq, void *data)
2118 struct fw_ohci *ohci = data;
2119 u32 event, iso_event;
2122 event = reg_read(ohci, OHCI1394_IntEventClear);
2124 if (!event || !~event)
2128 * busReset and postedWriteErr must not be cleared yet
2129 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
2131 reg_write(ohci, OHCI1394_IntEventClear,
2132 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2133 log_irqs(ohci, event);
2135 if (event & OHCI1394_selfIDComplete)
2136 queue_work(selfid_workqueue, &ohci->bus_reset_work);
2138 if (event & OHCI1394_RQPkt)
2139 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2141 if (event & OHCI1394_RSPkt)
2142 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2144 if (event & OHCI1394_reqTxComplete)
2145 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2147 if (event & OHCI1394_respTxComplete)
2148 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2150 if (event & OHCI1394_isochRx) {
2151 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2152 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2155 i = ffs(iso_event) - 1;
2157 &ohci->ir_context_list[i].context.tasklet);
2158 iso_event &= ~(1 << i);
2162 if (event & OHCI1394_isochTx) {
2163 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2164 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2167 i = ffs(iso_event) - 1;
2169 &ohci->it_context_list[i].context.tasklet);
2170 iso_event &= ~(1 << i);
2174 if (unlikely(event & OHCI1394_regAccessFail))
2175 ohci_err(ohci, "register access failure\n");
2177 if (unlikely(event & OHCI1394_postedWriteErr)) {
2178 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2179 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2180 reg_write(ohci, OHCI1394_IntEventClear,
2181 OHCI1394_postedWriteErr);
2182 if (printk_ratelimit())
2183 ohci_err(ohci, "PCI posted write error\n");
2186 if (unlikely(event & OHCI1394_cycleTooLong)) {
2187 if (printk_ratelimit())
2188 ohci_notice(ohci, "isochronous cycle too long\n");
2189 reg_write(ohci, OHCI1394_LinkControlSet,
2190 OHCI1394_LinkControl_cycleMaster);
2193 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2195 * We need to clear this event bit in order to make
2196 * cycleMatch isochronous I/O work. In theory we should
2197 * stop active cycleMatch iso contexts now and restart
2198 * them at least two cycles later. (FIXME?)
2200 if (printk_ratelimit())
2201 ohci_notice(ohci, "isochronous cycle inconsistent\n");
2204 if (unlikely(event & OHCI1394_unrecoverableError))
2205 handle_dead_contexts(ohci);
2207 if (event & OHCI1394_cycle64Seconds) {
2208 spin_lock(&ohci->lock);
2209 update_bus_time(ohci);
2210 spin_unlock(&ohci->lock);
2217 static int software_reset(struct fw_ohci *ohci)
2222 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2223 for (i = 0; i < 500; i++) {
2224 val = reg_read(ohci, OHCI1394_HCControlSet);
2226 return -ENODEV; /* Card was ejected. */
2228 if (!(val & OHCI1394_HCControl_softReset))
2237 static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2239 size_t size = length * 4;
2241 memcpy(dest, src, size);
2242 if (size < CONFIG_ROM_SIZE)
2243 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2246 static int configure_1394a_enhancements(struct fw_ohci *ohci)
2249 int ret, clear, set, offset;
2251 /* Check if the driver should configure link and PHY. */
2252 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2253 OHCI1394_HCControl_programPhyEnable))
2256 /* Paranoia: check whether the PHY supports 1394a, too. */
2257 enable_1394a = false;
2258 ret = read_phy_reg(ohci, 2);
2261 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2262 ret = read_paged_phy_reg(ohci, 1, 8);
2266 enable_1394a = true;
2269 if (ohci->quirks & QUIRK_NO_1394A)
2270 enable_1394a = false;
2272 /* Configure PHY and link consistently. */
2275 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2277 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2280 ret = update_phy_reg(ohci, 5, clear, set);
2285 offset = OHCI1394_HCControlSet;
2287 offset = OHCI1394_HCControlClear;
2288 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2290 /* Clean up: configuration has been taken care of. */
2291 reg_write(ohci, OHCI1394_HCControlClear,
2292 OHCI1394_HCControl_programPhyEnable);
2297 static int probe_tsb41ba3d(struct fw_ohci *ohci)
2299 /* TI vendor ID = 0x080028, TSB41BA3D product ID = 0x833005 (sic) */
2300 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2303 reg = read_phy_reg(ohci, 2);
2306 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2309 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2310 reg = read_paged_phy_reg(ohci, 1, i + 10);
2319 static int ohci_enable(struct fw_card *card,
2320 const __be32 *config_rom, size_t length)
2322 struct fw_ohci *ohci = fw_ohci(card);
2323 u32 lps, version, irqs;
2326 ret = software_reset(ohci);
2328 ohci_err(ohci, "failed to reset ohci card\n");
2333 * Now enable LPS, which we need in order to start accessing
2334 * most of the registers. In fact, on some cards (ALI M5251),
2335 * accessing registers in the SClk domain without LPS enabled
2336 * will lock up the machine. Wait 50msec to make sure we have
2337 * full link enabled. However, with some cards (well, at least
2338 * a JMicron PCIe card), we have to try again sometimes.
2340 * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
2341 * cannot actually use the phy at that time. These need tens of
2342 * millisecods pause between LPS write and first phy access too.
2345 reg_write(ohci, OHCI1394_HCControlSet,
2346 OHCI1394_HCControl_LPS |
2347 OHCI1394_HCControl_postedWriteEnable);
2350 for (lps = 0, i = 0; !lps && i < 3; i++) {
2352 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2353 OHCI1394_HCControl_LPS;
2357 ohci_err(ohci, "failed to set Link Power Status\n");
2361 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2362 ret = probe_tsb41ba3d(ohci);
2366 ohci_notice(ohci, "local TSB41BA3D phy\n");
2368 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2371 reg_write(ohci, OHCI1394_HCControlClear,
2372 OHCI1394_HCControl_noByteSwapData);
2374 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2375 reg_write(ohci, OHCI1394_LinkControlSet,
2376 OHCI1394_LinkControl_cycleTimerEnable |
2377 OHCI1394_LinkControl_cycleMaster);
2379 reg_write(ohci, OHCI1394_ATRetries,
2380 OHCI1394_MAX_AT_REQ_RETRIES |
2381 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2382 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2385 ohci->bus_time_running = false;
2387 for (i = 0; i < 32; i++)
2388 if (ohci->ir_context_support & (1 << i))
2389 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2390 IR_CONTEXT_MULTI_CHANNEL_MODE);
2392 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2393 if (version >= OHCI_VERSION_1_1) {
2394 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2396 card->broadcast_channel_auto_allocated = true;
2399 /* Get implemented bits of the priority arbitration request counter. */
2400 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2401 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2402 reg_write(ohci, OHCI1394_FairnessControl, 0);
2403 card->priority_budget_implemented = ohci->pri_req_max != 0;
2405 reg_write(ohci, OHCI1394_PhyUpperBound, FW_MAX_PHYSICAL_RANGE >> 16);
2406 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2407 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2409 ret = configure_1394a_enhancements(ohci);
2413 /* Activate link_on bit and contender bit in our self ID packets.*/
2414 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2419 * When the link is not yet enabled, the atomic config rom
2420 * update mechanism described below in ohci_set_config_rom()
2421 * is not active. We have to update ConfigRomHeader and
2422 * BusOptions manually, and the write to ConfigROMmap takes
2423 * effect immediately. We tie this to the enabling of the
2424 * link, so we have a valid config rom before enabling - the
2425 * OHCI requires that ConfigROMhdr and BusOptions have valid
2426 * values before enabling.
2428 * However, when the ConfigROMmap is written, some controllers
2429 * always read back quadlets 0 and 2 from the config rom to
2430 * the ConfigRomHeader and BusOptions registers on bus reset.
2431 * They shouldn't do that in this initial case where the link
2432 * isn't enabled. This means we have to use the same
2433 * workaround here, setting the bus header to 0 and then write
2434 * the right values in the bus reset tasklet.
2438 ohci->next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2439 &ohci->next_config_rom_bus, GFP_KERNEL);
2440 if (ohci->next_config_rom == NULL)
2443 copy_config_rom(ohci->next_config_rom, config_rom, length);
2446 * In the suspend case, config_rom is NULL, which
2447 * means that we just reuse the old config rom.
2449 ohci->next_config_rom = ohci->config_rom;
2450 ohci->next_config_rom_bus = ohci->config_rom_bus;
2453 ohci->next_header = ohci->next_config_rom[0];
2454 ohci->next_config_rom[0] = 0;
2455 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2456 reg_write(ohci, OHCI1394_BusOptions,
2457 be32_to_cpu(ohci->next_config_rom[2]));
2458 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2460 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2462 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2463 OHCI1394_RQPkt | OHCI1394_RSPkt |
2464 OHCI1394_isochTx | OHCI1394_isochRx |
2465 OHCI1394_postedWriteErr |
2466 OHCI1394_selfIDComplete |
2467 OHCI1394_regAccessFail |
2468 OHCI1394_cycleInconsistent |
2469 OHCI1394_unrecoverableError |
2470 OHCI1394_cycleTooLong |
2471 OHCI1394_masterIntEnable;
2472 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2473 irqs |= OHCI1394_busReset;
2474 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2476 reg_write(ohci, OHCI1394_HCControlSet,
2477 OHCI1394_HCControl_linkEnable |
2478 OHCI1394_HCControl_BIBimageValid);
2480 reg_write(ohci, OHCI1394_LinkControlSet,
2481 OHCI1394_LinkControl_rcvSelfID |
2482 OHCI1394_LinkControl_rcvPhyPkt);
2484 ar_context_run(&ohci->ar_request_ctx);
2485 ar_context_run(&ohci->ar_response_ctx);
2489 /* We are ready to go, reset bus to finish initialization. */
2490 fw_schedule_bus_reset(&ohci->card, false, true);
2495 static int ohci_set_config_rom(struct fw_card *card,
2496 const __be32 *config_rom, size_t length)
2498 struct fw_ohci *ohci;
2499 __be32 *next_config_rom;
2500 dma_addr_t next_config_rom_bus;
2502 ohci = fw_ohci(card);
2505 * When the OHCI controller is enabled, the config rom update
2506 * mechanism is a bit tricky, but easy enough to use. See
2507 * section 5.5.6 in the OHCI specification.
2509 * The OHCI controller caches the new config rom address in a
2510 * shadow register (ConfigROMmapNext) and needs a bus reset
2511 * for the changes to take place. When the bus reset is
2512 * detected, the controller loads the new values for the
2513 * ConfigRomHeader and BusOptions registers from the specified
2514 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2515 * shadow register. All automatically and atomically.
2517 * Now, there's a twist to this story. The automatic load of
2518 * ConfigRomHeader and BusOptions doesn't honor the
2519 * noByteSwapData bit, so with a be32 config rom, the
2520 * controller will load be32 values in to these registers
2521 * during the atomic update, even on litte endian
2522 * architectures. The workaround we use is to put a 0 in the
2523 * header quadlet; 0 is endian agnostic and means that the
2524 * config rom isn't ready yet. In the bus reset tasklet we
2525 * then set up the real values for the two registers.
2527 * We use ohci->lock to avoid racing with the code that sets
2528 * ohci->next_config_rom to NULL (see bus_reset_work).
2531 next_config_rom = dmam_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2532 &next_config_rom_bus, GFP_KERNEL);
2533 if (next_config_rom == NULL)
2536 spin_lock_irq(&ohci->lock);
2539 * If there is not an already pending config_rom update,
2540 * push our new allocation into the ohci->next_config_rom
2541 * and then mark the local variable as null so that we
2542 * won't deallocate the new buffer.
2544 * OTOH, if there is a pending config_rom update, just
2545 * use that buffer with the new config_rom data, and
2546 * let this routine free the unused DMA allocation.
2549 if (ohci->next_config_rom == NULL) {
2550 ohci->next_config_rom = next_config_rom;
2551 ohci->next_config_rom_bus = next_config_rom_bus;
2552 next_config_rom = NULL;
2555 copy_config_rom(ohci->next_config_rom, config_rom, length);
2557 ohci->next_header = config_rom[0];
2558 ohci->next_config_rom[0] = 0;
2560 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2562 spin_unlock_irq(&ohci->lock);
2564 /* If we didn't use the DMA allocation, delete it. */
2565 if (next_config_rom != NULL) {
2566 dmam_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, next_config_rom,
2567 next_config_rom_bus);
2571 * Now initiate a bus reset to have the changes take
2572 * effect. We clean up the old config rom memory and DMA
2573 * mappings in the bus reset tasklet, since the OHCI
2574 * controller could need to access it before the bus reset
2578 fw_schedule_bus_reset(&ohci->card, true, true);
2583 static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2585 struct fw_ohci *ohci = fw_ohci(card);
2587 at_context_transmit(&ohci->at_request_ctx, packet);
2590 static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2592 struct fw_ohci *ohci = fw_ohci(card);
2594 at_context_transmit(&ohci->at_response_ctx, packet);
2597 static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2599 struct fw_ohci *ohci = fw_ohci(card);
2600 struct context *ctx = &ohci->at_request_ctx;
2601 struct driver_data *driver_data = packet->driver_data;
2604 tasklet_disable_in_atomic(&ctx->tasklet);
2606 if (packet->ack != 0)
2609 if (packet->payload_mapped)
2610 dma_unmap_single(ohci->card.device, packet->payload_bus,
2611 packet->payload_length, DMA_TO_DEVICE);
2613 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2614 driver_data->packet = NULL;
2615 packet->ack = RCODE_CANCELLED;
2617 // Timestamping on behalf of the hardware.
2618 packet->timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
2620 packet->callback(packet, &ohci->card, packet->ack);
2623 tasklet_enable(&ctx->tasklet);
2628 static int ohci_enable_phys_dma(struct fw_card *card,
2629 int node_id, int generation)
2631 struct fw_ohci *ohci = fw_ohci(card);
2632 unsigned long flags;
2635 if (param_remote_dma)
2639 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2640 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2643 spin_lock_irqsave(&ohci->lock, flags);
2645 if (ohci->generation != generation) {
2651 * Note, if the node ID contains a non-local bus ID, physical DMA is
2652 * enabled for _all_ nodes on remote buses.
2655 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2657 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2659 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2663 spin_unlock_irqrestore(&ohci->lock, flags);
2668 static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2670 struct fw_ohci *ohci = fw_ohci(card);
2671 unsigned long flags;
2674 switch (csr_offset) {
2675 case CSR_STATE_CLEAR:
2677 if (ohci->is_root &&
2678 (reg_read(ohci, OHCI1394_LinkControlSet) &
2679 OHCI1394_LinkControl_cycleMaster))
2680 value = CSR_STATE_BIT_CMSTR;
2683 if (ohci->csr_state_setclear_abdicate)
2684 value |= CSR_STATE_BIT_ABDICATE;
2689 return reg_read(ohci, OHCI1394_NodeID) << 16;
2691 case CSR_CYCLE_TIME:
2692 return get_cycle_time(ohci);
2696 * We might be called just after the cycle timer has wrapped
2697 * around but just before the cycle64Seconds handler, so we
2698 * better check here, too, if the bus time needs to be updated.
2700 spin_lock_irqsave(&ohci->lock, flags);
2701 value = update_bus_time(ohci);
2702 spin_unlock_irqrestore(&ohci->lock, flags);
2705 case CSR_BUSY_TIMEOUT:
2706 value = reg_read(ohci, OHCI1394_ATRetries);
2707 return (value >> 4) & 0x0ffff00f;
2709 case CSR_PRIORITY_BUDGET:
2710 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2711 (ohci->pri_req_max << 8);
2719 static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2721 struct fw_ohci *ohci = fw_ohci(card);
2722 unsigned long flags;
2724 switch (csr_offset) {
2725 case CSR_STATE_CLEAR:
2726 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2727 reg_write(ohci, OHCI1394_LinkControlClear,
2728 OHCI1394_LinkControl_cycleMaster);
2731 if (value & CSR_STATE_BIT_ABDICATE)
2732 ohci->csr_state_setclear_abdicate = false;
2736 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2737 reg_write(ohci, OHCI1394_LinkControlSet,
2738 OHCI1394_LinkControl_cycleMaster);
2741 if (value & CSR_STATE_BIT_ABDICATE)
2742 ohci->csr_state_setclear_abdicate = true;
2746 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2750 case CSR_CYCLE_TIME:
2751 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2752 reg_write(ohci, OHCI1394_IntEventSet,
2753 OHCI1394_cycleInconsistent);
2758 spin_lock_irqsave(&ohci->lock, flags);
2759 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2761 spin_unlock_irqrestore(&ohci->lock, flags);
2764 case CSR_BUSY_TIMEOUT:
2765 value = (value & 0xf) | ((value & 0xf) << 4) |
2766 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2767 reg_write(ohci, OHCI1394_ATRetries, value);
2771 case CSR_PRIORITY_BUDGET:
2772 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2782 static void flush_iso_completions(struct iso_context *ctx)
2784 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2785 ctx->header_length, ctx->header,
2786 ctx->base.callback_data);
2787 ctx->header_length = 0;
2790 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2794 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
2795 if (ctx->base.drop_overflow_headers)
2797 flush_iso_completions(ctx);
2800 ctx_hdr = ctx->header + ctx->header_length;
2801 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2804 * The two iso header quadlets are byteswapped to little
2805 * endian by the controller, but we want to present them
2806 * as big endian for consistency with the bus endianness.
2808 if (ctx->base.header_size > 0)
2809 ctx_hdr[0] = swab32(dma_hdr[1]); /* iso packet header */
2810 if (ctx->base.header_size > 4)
2811 ctx_hdr[1] = swab32(dma_hdr[0]); /* timestamp */
2812 if (ctx->base.header_size > 8)
2813 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2814 ctx->header_length += ctx->base.header_size;
2817 static int handle_ir_packet_per_buffer(struct context *context,
2818 struct descriptor *d,
2819 struct descriptor *last)
2821 struct iso_context *ctx =
2822 container_of(context, struct iso_context, context);
2823 struct descriptor *pd;
2826 for (pd = d; pd <= last; pd++)
2827 if (pd->transfer_status)
2830 /* Descriptor(s) not done yet, stop iteration */
2833 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2835 buffer_dma = le32_to_cpu(d->data_address);
2836 dma_sync_single_range_for_cpu(context->ohci->card.device,
2837 buffer_dma & PAGE_MASK,
2838 buffer_dma & ~PAGE_MASK,
2839 le16_to_cpu(d->req_count),
2843 copy_iso_headers(ctx, (u32 *) (last + 1));
2845 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2846 flush_iso_completions(ctx);
2851 /* d == last because each descriptor block is only a single descriptor. */
2852 static int handle_ir_buffer_fill(struct context *context,
2853 struct descriptor *d,
2854 struct descriptor *last)
2856 struct iso_context *ctx =
2857 container_of(context, struct iso_context, context);
2858 unsigned int req_count, res_count, completed;
2861 req_count = le16_to_cpu(last->req_count);
2862 res_count = le16_to_cpu(READ_ONCE(last->res_count));
2863 completed = req_count - res_count;
2864 buffer_dma = le32_to_cpu(last->data_address);
2866 if (completed > 0) {
2867 ctx->mc_buffer_bus = buffer_dma;
2868 ctx->mc_completed = completed;
2872 /* Descriptor(s) not done yet, stop iteration */
2875 dma_sync_single_range_for_cpu(context->ohci->card.device,
2876 buffer_dma & PAGE_MASK,
2877 buffer_dma & ~PAGE_MASK,
2878 completed, DMA_FROM_DEVICE);
2880 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2881 ctx->base.callback.mc(&ctx->base,
2882 buffer_dma + completed,
2883 ctx->base.callback_data);
2884 ctx->mc_completed = 0;
2890 static void flush_ir_buffer_fill(struct iso_context *ctx)
2892 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2893 ctx->mc_buffer_bus & PAGE_MASK,
2894 ctx->mc_buffer_bus & ~PAGE_MASK,
2895 ctx->mc_completed, DMA_FROM_DEVICE);
2897 ctx->base.callback.mc(&ctx->base,
2898 ctx->mc_buffer_bus + ctx->mc_completed,
2899 ctx->base.callback_data);
2900 ctx->mc_completed = 0;
2903 static inline void sync_it_packet_for_cpu(struct context *context,
2904 struct descriptor *pd)
2909 /* only packets beginning with OUTPUT_MORE* have data buffers */
2910 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2913 /* skip over the OUTPUT_MORE_IMMEDIATE descriptor */
2917 * If the packet has a header, the first OUTPUT_MORE/LAST descriptor's
2918 * data buffer is in the context program's coherent page and must not
2921 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2922 (context->current_bus & PAGE_MASK)) {
2923 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2929 buffer_dma = le32_to_cpu(pd->data_address);
2930 dma_sync_single_range_for_cpu(context->ohci->card.device,
2931 buffer_dma & PAGE_MASK,
2932 buffer_dma & ~PAGE_MASK,
2933 le16_to_cpu(pd->req_count),
2935 control = pd->control;
2937 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2940 static int handle_it_packet(struct context *context,
2941 struct descriptor *d,
2942 struct descriptor *last)
2944 struct iso_context *ctx =
2945 container_of(context, struct iso_context, context);
2946 struct descriptor *pd;
2949 for (pd = d; pd <= last; pd++)
2950 if (pd->transfer_status)
2953 /* Descriptor(s) not done yet, stop iteration */
2956 sync_it_packet_for_cpu(context, d);
2958 if (ctx->header_length + 4 > PAGE_SIZE) {
2959 if (ctx->base.drop_overflow_headers)
2961 flush_iso_completions(ctx);
2964 ctx_hdr = ctx->header + ctx->header_length;
2965 ctx->last_timestamp = le16_to_cpu(last->res_count);
2966 /* Present this value as big-endian to match the receive code */
2967 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2968 le16_to_cpu(pd->res_count));
2969 ctx->header_length += 4;
2971 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2972 flush_iso_completions(ctx);
2977 static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2979 u32 hi = channels >> 32, lo = channels;
2981 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2982 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2983 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2984 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2985 ohci->mc_channels = channels;
2988 static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2989 int type, int channel, size_t header_size)
2991 struct fw_ohci *ohci = fw_ohci(card);
2992 struct iso_context *ctx;
2993 descriptor_callback_t callback;
2996 int index, ret = -EBUSY;
2998 spin_lock_irq(&ohci->lock);
3001 case FW_ISO_CONTEXT_TRANSMIT:
3002 mask = &ohci->it_context_mask;
3003 callback = handle_it_packet;
3004 index = ffs(*mask) - 1;
3006 *mask &= ~(1 << index);
3007 regs = OHCI1394_IsoXmitContextBase(index);
3008 ctx = &ohci->it_context_list[index];
3012 case FW_ISO_CONTEXT_RECEIVE:
3013 channels = &ohci->ir_context_channels;
3014 mask = &ohci->ir_context_mask;
3015 callback = handle_ir_packet_per_buffer;
3016 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
3018 *channels &= ~(1ULL << channel);
3019 *mask &= ~(1 << index);
3020 regs = OHCI1394_IsoRcvContextBase(index);
3021 ctx = &ohci->ir_context_list[index];
3025 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3026 mask = &ohci->ir_context_mask;
3027 callback = handle_ir_buffer_fill;
3028 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
3030 ohci->mc_allocated = true;
3031 *mask &= ~(1 << index);
3032 regs = OHCI1394_IsoRcvContextBase(index);
3033 ctx = &ohci->ir_context_list[index];
3042 spin_unlock_irq(&ohci->lock);
3045 return ERR_PTR(ret);
3047 memset(ctx, 0, sizeof(*ctx));
3048 ctx->header_length = 0;
3049 ctx->header = (void *) __get_free_page(GFP_KERNEL);
3050 if (ctx->header == NULL) {
3054 ret = context_init(&ctx->context, ohci, regs, callback);
3056 goto out_with_header;
3058 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3059 set_multichannel_mask(ohci, 0);
3060 ctx->mc_completed = 0;
3066 free_page((unsigned long)ctx->header);
3068 spin_lock_irq(&ohci->lock);
3071 case FW_ISO_CONTEXT_RECEIVE:
3072 *channels |= 1ULL << channel;
3075 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3076 ohci->mc_allocated = false;
3079 *mask |= 1 << index;
3081 spin_unlock_irq(&ohci->lock);
3083 return ERR_PTR(ret);
3086 static int ohci_start_iso(struct fw_iso_context *base,
3087 s32 cycle, u32 sync, u32 tags)
3089 struct iso_context *ctx = container_of(base, struct iso_context, base);
3090 struct fw_ohci *ohci = ctx->context.ohci;
3091 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3094 /* the controller cannot start without any queued packets */
3095 if (ctx->context.last->branch_address == 0)
3098 switch (ctx->base.type) {
3099 case FW_ISO_CONTEXT_TRANSMIT:
3100 index = ctx - ohci->it_context_list;
3103 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3104 (cycle & 0x7fff) << 16;
3106 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3107 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3108 context_run(&ctx->context, match);
3111 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3112 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3114 case FW_ISO_CONTEXT_RECEIVE:
3115 index = ctx - ohci->ir_context_list;
3116 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3118 match |= (cycle & 0x07fff) << 12;
3119 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3122 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3123 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3124 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3125 context_run(&ctx->context, control);
3136 static int ohci_stop_iso(struct fw_iso_context *base)
3138 struct fw_ohci *ohci = fw_ohci(base->card);
3139 struct iso_context *ctx = container_of(base, struct iso_context, base);
3142 switch (ctx->base.type) {
3143 case FW_ISO_CONTEXT_TRANSMIT:
3144 index = ctx - ohci->it_context_list;
3145 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3148 case FW_ISO_CONTEXT_RECEIVE:
3149 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3150 index = ctx - ohci->ir_context_list;
3151 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3155 context_stop(&ctx->context);
3156 tasklet_kill(&ctx->context.tasklet);
3161 static void ohci_free_iso_context(struct fw_iso_context *base)
3163 struct fw_ohci *ohci = fw_ohci(base->card);
3164 struct iso_context *ctx = container_of(base, struct iso_context, base);
3165 unsigned long flags;
3168 ohci_stop_iso(base);
3169 context_release(&ctx->context);
3170 free_page((unsigned long)ctx->header);
3172 spin_lock_irqsave(&ohci->lock, flags);
3174 switch (base->type) {
3175 case FW_ISO_CONTEXT_TRANSMIT:
3176 index = ctx - ohci->it_context_list;
3177 ohci->it_context_mask |= 1 << index;
3180 case FW_ISO_CONTEXT_RECEIVE:
3181 index = ctx - ohci->ir_context_list;
3182 ohci->ir_context_mask |= 1 << index;
3183 ohci->ir_context_channels |= 1ULL << base->channel;
3186 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3187 index = ctx - ohci->ir_context_list;
3188 ohci->ir_context_mask |= 1 << index;
3189 ohci->ir_context_channels |= ohci->mc_channels;
3190 ohci->mc_channels = 0;
3191 ohci->mc_allocated = false;
3195 spin_unlock_irqrestore(&ohci->lock, flags);
3198 static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3200 struct fw_ohci *ohci = fw_ohci(base->card);
3201 unsigned long flags;
3204 switch (base->type) {
3205 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3207 spin_lock_irqsave(&ohci->lock, flags);
3209 /* Don't allow multichannel to grab other contexts' channels. */
3210 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3211 *channels = ohci->ir_context_channels;
3214 set_multichannel_mask(ohci, *channels);
3218 spin_unlock_irqrestore(&ohci->lock, flags);
3229 static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3232 struct iso_context *ctx;
3234 for (i = 0 ; i < ohci->n_ir ; i++) {
3235 ctx = &ohci->ir_context_list[i];
3236 if (ctx->context.running)
3237 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3240 for (i = 0 ; i < ohci->n_it ; i++) {
3241 ctx = &ohci->it_context_list[i];
3242 if (ctx->context.running)
3243 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3248 static int queue_iso_transmit(struct iso_context *ctx,
3249 struct fw_iso_packet *packet,
3250 struct fw_iso_buffer *buffer,
3251 unsigned long payload)
3253 struct descriptor *d, *last, *pd;
3254 struct fw_iso_packet *p;
3256 dma_addr_t d_bus, page_bus;
3257 u32 z, header_z, payload_z, irq;
3258 u32 payload_index, payload_end_index, next_page_index;
3259 int page, end_page, i, length, offset;
3262 payload_index = payload;
3268 if (p->header_length > 0)
3271 /* Determine the first page the payload isn't contained in. */
3272 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3273 if (p->payload_length > 0)
3274 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3280 /* Get header size in number of descriptors. */
3281 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3283 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3288 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3289 d[0].req_count = cpu_to_le16(8);
3291 * Link the skip address to this descriptor itself. This causes
3292 * a context to skip a cycle whenever lost cycles or FIFO
3293 * overruns occur, without dropping the data. The application
3294 * should then decide whether this is an error condition or not.
3295 * FIXME: Make the context's cycle-lost behaviour configurable?
3297 d[0].branch_address = cpu_to_le32(d_bus | z);
3299 header = (__le32 *) &d[1];
3300 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3301 IT_HEADER_TAG(p->tag) |
3302 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3303 IT_HEADER_CHANNEL(ctx->base.channel) |
3304 IT_HEADER_SPEED(ctx->base.speed));
3306 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3307 p->payload_length));
3310 if (p->header_length > 0) {
3311 d[2].req_count = cpu_to_le16(p->header_length);
3312 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3313 memcpy(&d[z], p->header, p->header_length);
3316 pd = d + z - payload_z;
3317 payload_end_index = payload_index + p->payload_length;
3318 for (i = 0; i < payload_z; i++) {
3319 page = payload_index >> PAGE_SHIFT;
3320 offset = payload_index & ~PAGE_MASK;
3321 next_page_index = (page + 1) << PAGE_SHIFT;
3323 min(next_page_index, payload_end_index) - payload_index;
3324 pd[i].req_count = cpu_to_le16(length);
3326 page_bus = page_private(buffer->pages[page]);
3327 pd[i].data_address = cpu_to_le32(page_bus + offset);
3329 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3330 page_bus, offset, length,
3333 payload_index += length;
3337 irq = DESCRIPTOR_IRQ_ALWAYS;
3339 irq = DESCRIPTOR_NO_IRQ;
3341 last = z == 2 ? d : d + z - 1;
3342 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3344 DESCRIPTOR_BRANCH_ALWAYS |
3347 context_append(&ctx->context, d, z, header_z);
3352 static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3353 struct fw_iso_packet *packet,
3354 struct fw_iso_buffer *buffer,
3355 unsigned long payload)
3357 struct device *device = ctx->context.ohci->card.device;
3358 struct descriptor *d, *pd;
3359 dma_addr_t d_bus, page_bus;
3360 u32 z, header_z, rest;
3362 int page, offset, packet_count, header_size, payload_per_buffer;
3365 * The OHCI controller puts the isochronous header and trailer in the
3366 * buffer, so we need at least 8 bytes.
3368 packet_count = packet->header_length / ctx->base.header_size;
3369 header_size = max(ctx->base.header_size, (size_t)8);
3371 /* Get header size in number of descriptors. */
3372 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3373 page = payload >> PAGE_SHIFT;
3374 offset = payload & ~PAGE_MASK;
3375 payload_per_buffer = packet->payload_length / packet_count;
3377 for (i = 0; i < packet_count; i++) {
3378 /* d points to the header descriptor */
3379 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3380 d = context_get_descriptors(&ctx->context,
3381 z + header_z, &d_bus);
3385 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3386 DESCRIPTOR_INPUT_MORE);
3387 if (packet->skip && i == 0)
3388 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3389 d->req_count = cpu_to_le16(header_size);
3390 d->res_count = d->req_count;
3391 d->transfer_status = 0;
3392 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3394 rest = payload_per_buffer;
3396 for (j = 1; j < z; j++) {
3398 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3399 DESCRIPTOR_INPUT_MORE);
3401 if (offset + rest < PAGE_SIZE)
3404 length = PAGE_SIZE - offset;
3405 pd->req_count = cpu_to_le16(length);
3406 pd->res_count = pd->req_count;
3407 pd->transfer_status = 0;
3409 page_bus = page_private(buffer->pages[page]);
3410 pd->data_address = cpu_to_le32(page_bus + offset);
3412 dma_sync_single_range_for_device(device, page_bus,
3416 offset = (offset + length) & ~PAGE_MASK;
3421 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3422 DESCRIPTOR_INPUT_LAST |
3423 DESCRIPTOR_BRANCH_ALWAYS);
3424 if (packet->interrupt && i == packet_count - 1)
3425 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3427 context_append(&ctx->context, d, z, header_z);
3433 static int queue_iso_buffer_fill(struct iso_context *ctx,
3434 struct fw_iso_packet *packet,
3435 struct fw_iso_buffer *buffer,
3436 unsigned long payload)
3438 struct descriptor *d;
3439 dma_addr_t d_bus, page_bus;
3440 int page, offset, rest, z, i, length;
3442 page = payload >> PAGE_SHIFT;
3443 offset = payload & ~PAGE_MASK;
3444 rest = packet->payload_length;
3446 /* We need one descriptor for each page in the buffer. */
3447 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3449 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3452 for (i = 0; i < z; i++) {
3453 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3457 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3458 DESCRIPTOR_BRANCH_ALWAYS);
3459 if (packet->skip && i == 0)
3460 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3461 if (packet->interrupt && i == z - 1)
3462 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3464 if (offset + rest < PAGE_SIZE)
3467 length = PAGE_SIZE - offset;
3468 d->req_count = cpu_to_le16(length);
3469 d->res_count = d->req_count;
3470 d->transfer_status = 0;
3472 page_bus = page_private(buffer->pages[page]);
3473 d->data_address = cpu_to_le32(page_bus + offset);
3475 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3476 page_bus, offset, length,
3483 context_append(&ctx->context, d, 1, 0);
3489 static int ohci_queue_iso(struct fw_iso_context *base,
3490 struct fw_iso_packet *packet,
3491 struct fw_iso_buffer *buffer,
3492 unsigned long payload)
3494 struct iso_context *ctx = container_of(base, struct iso_context, base);
3495 unsigned long flags;
3498 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3499 switch (base->type) {
3500 case FW_ISO_CONTEXT_TRANSMIT:
3501 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3503 case FW_ISO_CONTEXT_RECEIVE:
3504 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3506 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3507 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3510 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3515 static void ohci_flush_queue_iso(struct fw_iso_context *base)
3517 struct context *ctx =
3518 &container_of(base, struct iso_context, base)->context;
3520 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3523 static int ohci_flush_iso_completions(struct fw_iso_context *base)
3525 struct iso_context *ctx = container_of(base, struct iso_context, base);
3528 tasklet_disable_in_atomic(&ctx->context.tasklet);
3530 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3531 context_tasklet((unsigned long)&ctx->context);
3533 switch (base->type) {
3534 case FW_ISO_CONTEXT_TRANSMIT:
3535 case FW_ISO_CONTEXT_RECEIVE:
3536 if (ctx->header_length != 0)
3537 flush_iso_completions(ctx);
3539 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3540 if (ctx->mc_completed != 0)
3541 flush_ir_buffer_fill(ctx);
3547 clear_bit_unlock(0, &ctx->flushing_completions);
3548 smp_mb__after_atomic();
3551 tasklet_enable(&ctx->context.tasklet);
3556 static const struct fw_card_driver ohci_driver = {
3557 .enable = ohci_enable,
3558 .read_phy_reg = ohci_read_phy_reg,
3559 .update_phy_reg = ohci_update_phy_reg,
3560 .set_config_rom = ohci_set_config_rom,
3561 .send_request = ohci_send_request,
3562 .send_response = ohci_send_response,
3563 .cancel_packet = ohci_cancel_packet,
3564 .enable_phys_dma = ohci_enable_phys_dma,
3565 .read_csr = ohci_read_csr,
3566 .write_csr = ohci_write_csr,
3568 .allocate_iso_context = ohci_allocate_iso_context,
3569 .free_iso_context = ohci_free_iso_context,
3570 .set_iso_channels = ohci_set_iso_channels,
3571 .queue_iso = ohci_queue_iso,
3572 .flush_queue_iso = ohci_flush_queue_iso,
3573 .flush_iso_completions = ohci_flush_iso_completions,
3574 .start_iso = ohci_start_iso,
3575 .stop_iso = ohci_stop_iso,
3578 #ifdef CONFIG_PPC_PMAC
3579 static void pmac_ohci_on(struct pci_dev *dev)
3581 if (machine_is(powermac)) {
3582 struct device_node *ofn = pci_device_to_OF_node(dev);
3585 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3586 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3591 static void pmac_ohci_off(struct pci_dev *dev)
3593 if (machine_is(powermac)) {
3594 struct device_node *ofn = pci_device_to_OF_node(dev);
3597 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3598 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3603 static inline void pmac_ohci_on(struct pci_dev *dev) {}
3604 static inline void pmac_ohci_off(struct pci_dev *dev) {}
3605 #endif /* CONFIG_PPC_PMAC */
3607 static void release_ohci(struct device *dev, void *data)
3609 struct pci_dev *pdev = to_pci_dev(dev);
3610 struct fw_ohci *ohci = pci_get_drvdata(pdev);
3612 pmac_ohci_off(pdev);
3614 ar_context_release(&ohci->ar_response_ctx);
3615 ar_context_release(&ohci->ar_request_ctx);
3617 dev_notice(dev, "removed fw-ohci device\n");
3620 static int pci_probe(struct pci_dev *dev,
3621 const struct pci_device_id *ent)
3623 struct fw_ohci *ohci;
3624 u32 bus_options, max_receive, link_speed, version;
3629 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3630 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3634 ohci = devres_alloc(release_ohci, sizeof(*ohci), GFP_KERNEL);
3637 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3638 pci_set_drvdata(dev, ohci);
3640 devres_add(&dev->dev, ohci);
3642 err = pcim_enable_device(dev);
3644 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3648 pci_set_master(dev);
3649 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3651 spin_lock_init(&ohci->lock);
3652 mutex_init(&ohci->phy_reg_mutex);
3654 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3656 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3657 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3658 ohci_err(ohci, "invalid MMIO resource\n");
3662 err = pcim_iomap_regions(dev, 1 << 0, ohci_driver_name);
3664 ohci_err(ohci, "request and map MMIO resource unavailable\n");
3667 ohci->registers = pcim_iomap_table(dev)[0];
3669 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3670 if ((ohci_quirks[i].vendor == dev->vendor) &&
3671 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3672 ohci_quirks[i].device == dev->device) &&
3673 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3674 ohci_quirks[i].revision >= dev->revision)) {
3675 ohci->quirks = ohci_quirks[i].flags;
3679 ohci->quirks = param_quirks;
3681 if (detect_vt630x_with_asm1083_on_amd_ryzen_machine(dev))
3682 ohci->quirks |= QUIRK_REBOOT_BY_CYCLE_TIMER_READ;
3685 * Because dma_alloc_coherent() allocates at least one page,
3686 * we save space by using a common buffer for the AR request/
3687 * response descriptors and the self IDs buffer.
3689 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3690 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3691 ohci->misc_buffer = dmam_alloc_coherent(&dev->dev, PAGE_SIZE, &ohci->misc_buffer_bus,
3693 if (!ohci->misc_buffer)
3696 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3697 OHCI1394_AsReqRcvContextControlSet);
3701 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3702 OHCI1394_AsRspRcvContextControlSet);
3706 err = context_init(&ohci->at_request_ctx, ohci,
3707 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3711 err = context_init(&ohci->at_response_ctx, ohci,
3712 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3716 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3717 ohci->ir_context_channels = ~0ULL;
3718 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3719 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3720 ohci->ir_context_mask = ohci->ir_context_support;
3721 ohci->n_ir = hweight32(ohci->ir_context_mask);
3722 size = sizeof(struct iso_context) * ohci->n_ir;
3723 ohci->ir_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3724 if (!ohci->ir_context_list)
3727 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3728 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3729 /* JMicron JMB38x often shows 0 at first read, just ignore it */
3730 if (!ohci->it_context_support) {
3731 ohci_notice(ohci, "overriding IsoXmitIntMask\n");
3732 ohci->it_context_support = 0xf;
3734 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3735 ohci->it_context_mask = ohci->it_context_support;
3736 ohci->n_it = hweight32(ohci->it_context_mask);
3737 size = sizeof(struct iso_context) * ohci->n_it;
3738 ohci->it_context_list = devm_kzalloc(&dev->dev, size, GFP_KERNEL);
3739 if (!ohci->it_context_list)
3742 ohci->self_id = ohci->misc_buffer + PAGE_SIZE/2;
3743 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3745 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3746 max_receive = (bus_options >> 12) & 0xf;
3747 link_speed = bus_options & 0x7;
3748 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3749 reg_read(ohci, OHCI1394_GUIDLo);
3751 if (!(ohci->quirks & QUIRK_NO_MSI))
3752 pci_enable_msi(dev);
3753 err = devm_request_irq(&dev->dev, dev->irq, irq_handler,
3754 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, ohci_driver_name, ohci);
3756 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
3760 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3764 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3766 "added OHCI v%x.%x device as card %d, "
3767 "%d IR + %d IT contexts, quirks 0x%x%s\n",
3768 version >> 16, version & 0xff, ohci->card.index,
3769 ohci->n_ir, ohci->n_it, ohci->quirks,
3770 reg_read(ohci, OHCI1394_PhyUpperBound) ?
3776 devm_free_irq(&dev->dev, dev->irq, ohci);
3777 pci_disable_msi(dev);
3782 static void pci_remove(struct pci_dev *dev)
3784 struct fw_ohci *ohci = pci_get_drvdata(dev);
3787 * If the removal is happening from the suspend state, LPS won't be
3788 * enabled and host registers (eg., IntMaskClear) won't be accessible.
3790 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3791 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3794 cancel_work_sync(&ohci->bus_reset_work);
3795 fw_core_remove_card(&ohci->card);
3798 * FIXME: Fail all pending packets here, now that the upper
3799 * layers can't queue any more.
3802 software_reset(ohci);
3804 devm_free_irq(&dev->dev, dev->irq, ohci);
3805 pci_disable_msi(dev);
3807 dev_notice(&dev->dev, "removing fw-ohci device\n");
3811 static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3813 struct fw_ohci *ohci = pci_get_drvdata(dev);
3816 software_reset(ohci);
3817 err = pci_save_state(dev);
3819 ohci_err(ohci, "pci_save_state failed\n");
3822 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3824 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3830 static int pci_resume(struct pci_dev *dev)
3832 struct fw_ohci *ohci = pci_get_drvdata(dev);
3836 pci_set_power_state(dev, PCI_D0);
3837 pci_restore_state(dev);
3838 err = pci_enable_device(dev);
3840 ohci_err(ohci, "pci_enable_device failed\n");
3844 /* Some systems don't setup GUID register on resume from ram */
3845 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3846 !reg_read(ohci, OHCI1394_GUIDHi)) {
3847 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3848 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3851 err = ohci_enable(&ohci->card, NULL, 0);
3855 ohci_resume_iso_dma(ohci);
3861 static const struct pci_device_id pci_table[] = {
3862 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3866 MODULE_DEVICE_TABLE(pci, pci_table);
3868 static struct pci_driver fw_ohci_pci_driver = {
3869 .name = ohci_driver_name,
3870 .id_table = pci_table,
3872 .remove = pci_remove,
3874 .resume = pci_resume,
3875 .suspend = pci_suspend,
3879 static int __init fw_ohci_init(void)
3881 selfid_workqueue = alloc_workqueue(KBUILD_MODNAME, WQ_MEM_RECLAIM, 0);
3882 if (!selfid_workqueue)
3885 return pci_register_driver(&fw_ohci_pci_driver);
3888 static void __exit fw_ohci_cleanup(void)
3890 pci_unregister_driver(&fw_ohci_pci_driver);
3891 destroy_workqueue(selfid_workqueue);
3894 module_init(fw_ohci_init);
3895 module_exit(fw_ohci_cleanup);
3897 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3898 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3899 MODULE_LICENSE("GPL");
3901 /* Provide a module alias so root-on-sbp2 initrds don't break. */
3902 MODULE_ALIAS("ohci1394");