Merge tag 'nfsd-6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
[linux-2.6-block.git] / drivers / firewire / core-card.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
c781c06d
KH
2/*
3 * Copyright (C) 2005-2007 Kristian Hoegsberg <krh@bitplanet.net>
3038e353
KH
4 */
5
e8ca9702 6#include <linux/bug.h>
459f7923
SR
7#include <linux/completion.h>
8#include <linux/crc-itu-t.h>
3038e353 9#include <linux/device.h>
459f7923 10#include <linux/errno.h>
77c9a5da
SR
11#include <linux/firewire.h>
12#include <linux/firewire-constants.h>
e8ca9702
SR
13#include <linux/jiffies.h>
14#include <linux/kernel.h>
459f7923 15#include <linux/kref.h>
e8ca9702 16#include <linux/list.h>
459f7923 17#include <linux/module.h>
6a5033be 18#include <linux/mutex.h>
e8ca9702 19#include <linux/spinlock.h>
e8ca9702
SR
20#include <linux/workqueue.h>
21
60063497 22#include <linux/atomic.h>
e8ca9702 23#include <asm/byteorder.h>
459f7923 24
77c9a5da 25#include "core.h"
08dd8602 26#include <trace/events/firewire.h>
3038e353 27
26b4950d
SR
28#define define_fw_printk_level(func, kern_level) \
29void func(const struct fw_card *card, const char *fmt, ...) \
30{ \
31 struct va_format vaf; \
32 va_list args; \
33 \
34 va_start(args, fmt); \
35 vaf.fmt = fmt; \
36 vaf.va = &args; \
37 printk(kern_level KBUILD_MODNAME " %s: %pV", \
38 dev_name(card->device), &vaf); \
39 va_end(args); \
40}
41define_fw_printk_level(fw_err, KERN_ERR);
42define_fw_printk_level(fw_notice, KERN_NOTICE);
43
cb7c96da 44int fw_compute_block_crc(__be32 *block)
8e85973e
SR
45{
46 int length;
47 u16 crc;
48
49 length = (be32_to_cpu(block[0]) >> 16) & 0xff;
50 crc = crc_itu_t(0, (u8 *)&block[1], length * 4);
51 *block |= cpu_to_be32(crc);
52
53 return length;
54}
55
6a5033be 56static DEFINE_MUTEX(card_mutex);
3038e353
KH
57static LIST_HEAD(card_list);
58
59static LIST_HEAD(descriptor_list);
60static int descriptor_count;
61
fe242579 62static __be32 tmp_config_rom[256];
e300839d
SR
63/* ROM header, bus info block, root dir header, capabilities = 7 quadlets */
64static size_t config_rom_length = 1 + 4 + 1 + 1;
fe242579 65
a77754a7
KH
66#define BIB_CRC(v) ((v) << 0)
67#define BIB_CRC_LENGTH(v) ((v) << 16)
68#define BIB_INFO_LENGTH(v) ((v) << 24)
edd5bdaf 69#define BIB_BUS_NAME 0x31333934 /* "1394" */
a77754a7
KH
70#define BIB_LINK_SPEED(v) ((v) << 0)
71#define BIB_GENERATION(v) ((v) << 4)
72#define BIB_MAX_ROM(v) ((v) << 8)
73#define BIB_MAX_RECEIVE(v) ((v) << 12)
74#define BIB_CYC_CLK_ACC(v) ((v) << 16)
75#define BIB_PMC ((1) << 27)
76#define BIB_BMC ((1) << 28)
77#define BIB_ISC ((1) << 29)
78#define BIB_CMC ((1) << 30)
edd5bdaf
SR
79#define BIB_IRMC ((1) << 31)
80#define NODE_CAPABILITIES 0x0c0083c0 /* per IEEE 1394 clause 8.3.2.6.5.2 */
3038e353 81
dd5eeb99
CL
82/*
83 * IEEE-1394 specifies a default SPLIT_TIMEOUT value of 800 cycles (100 ms),
84 * but we have to make it longer because there are many devices whose firmware
85 * is just too slow for that.
86 */
87#define DEFAULT_SPLIT_TIMEOUT (2 * 8000)
88
6044565a
SR
89#define CANON_OUI 0x000085
90
e300839d 91static void generate_config_rom(struct fw_card *card, __be32 *config_rom)
3038e353
KH
92{
93 struct fw_descriptor *desc;
8e85973e 94 int i, j, k, length;
3038e353 95
c781c06d
KH
96 /*
97 * Initialize contents of config rom buffer. On the OHCI
5e20c282
SR
98 * controller, block reads to the config rom accesses the host
99 * memory, but quadlet read access the hardware bus info block
100 * registers. That's just crack, but it means we should make
2cc489c2 101 * sure the contents of bus info block in host memory matches
c781c06d
KH
102 * the version stored in the OHCI registers.
103 */
3038e353 104
8e85973e
SR
105 config_rom[0] = cpu_to_be32(
106 BIB_CRC_LENGTH(4) | BIB_INFO_LENGTH(4) | BIB_CRC(0));
edd5bdaf 107 config_rom[1] = cpu_to_be32(BIB_BUS_NAME);
8e85973e 108 config_rom[2] = cpu_to_be32(
a77754a7
KH
109 BIB_LINK_SPEED(card->link_speed) |
110 BIB_GENERATION(card->config_rom_generation++ % 14 + 2) |
111 BIB_MAX_ROM(2) |
112 BIB_MAX_RECEIVE(card->max_receive) |
edd5bdaf 113 BIB_BMC | BIB_ISC | BIB_CMC | BIB_IRMC);
8e85973e
SR
114 config_rom[3] = cpu_to_be32(card->guid >> 32);
115 config_rom[4] = cpu_to_be32(card->guid);
3038e353
KH
116
117 /* Generate root directory. */
edd5bdaf 118 config_rom[6] = cpu_to_be32(NODE_CAPABILITIES);
8e85973e
SR
119 i = 7;
120 j = 7 + descriptor_count;
3038e353
KH
121
122 /* Generate root directory entries for descriptors. */
123 list_for_each_entry (desc, &descriptor_list, link) {
937f6879 124 if (desc->immediate > 0)
8e85973e
SR
125 config_rom[i++] = cpu_to_be32(desc->immediate);
126 config_rom[i] = cpu_to_be32(desc->key | (j - i));
3038e353
KH
127 i++;
128 j += desc->length;
129 }
130
131 /* Update root directory length. */
8e85973e 132 config_rom[5] = cpu_to_be32((i - 5 - 1) << 16);
3038e353
KH
133
134 /* End of root directory, now copy in descriptors. */
135 list_for_each_entry (desc, &descriptor_list, link) {
8e85973e
SR
136 for (k = 0; k < desc->length; k++)
137 config_rom[i + k] = cpu_to_be32(desc->data[k]);
3038e353
KH
138 i += desc->length;
139 }
140
141 /* Calculate CRCs for all blocks in the config rom. This
142 * assumes that CRC length and info length are identical for
143 * the bus info block, which is always the case for this
144 * implementation. */
e175569c 145 for (i = 0; i < j; i += length + 1)
cb7c96da 146 length = fw_compute_block_crc(config_rom + i);
3038e353 147
e300839d 148 WARN_ON(j != config_rom_length);
3038e353
KH
149}
150
53dca511 151static void update_config_roms(void)
3038e353
KH
152{
153 struct fw_card *card;
3038e353
KH
154
155 list_for_each_entry (card, &card_list, link) {
e300839d
SR
156 generate_config_rom(card, tmp_config_rom);
157 card->driver->set_config_rom(card, tmp_config_rom,
158 config_rom_length);
3038e353
KH
159 }
160}
161
e300839d
SR
162static size_t required_space(struct fw_descriptor *desc)
163{
164 /* descriptor + entry into root dir + optional immediate entry */
165 return desc->length + 1 + (desc->immediate > 0 ? 1 : 0);
166}
167
53dca511 168int fw_core_add_descriptor(struct fw_descriptor *desc)
3038e353
KH
169{
170 size_t i;
e300839d 171 int ret;
3038e353 172
c781c06d
KH
173 /*
174 * Check descriptor is valid; the length of all blocks in the
3038e353 175 * descriptor has to add up to exactly the length of the
c781c06d
KH
176 * block.
177 */
3038e353
KH
178 i = 0;
179 while (i < desc->length)
180 i += (desc->data[i] >> 16) + 1;
181
182 if (i != desc->length)
66dea3e5 183 return -EINVAL;
3038e353 184
6a5033be 185 mutex_lock(&card_mutex);
3038e353 186
e300839d
SR
187 if (config_rom_length + required_space(desc) > 256) {
188 ret = -EBUSY;
189 } else {
190 list_add_tail(&desc->link, &descriptor_list);
191 config_rom_length += required_space(desc);
937f6879 192 descriptor_count++;
e300839d
SR
193 if (desc->immediate > 0)
194 descriptor_count++;
195 update_config_roms();
196 ret = 0;
197 }
3038e353 198
6a5033be 199 mutex_unlock(&card_mutex);
3038e353 200
e300839d 201 return ret;
3038e353 202}
c76acec6 203EXPORT_SYMBOL(fw_core_add_descriptor);
3038e353 204
53dca511 205void fw_core_remove_descriptor(struct fw_descriptor *desc)
3038e353 206{
6a5033be 207 mutex_lock(&card_mutex);
3038e353
KH
208
209 list_del(&desc->link);
e300839d 210 config_rom_length -= required_space(desc);
3038e353 211 descriptor_count--;
937f6879
KH
212 if (desc->immediate > 0)
213 descriptor_count--;
3038e353
KH
214 update_config_roms();
215
6a5033be 216 mutex_unlock(&card_mutex);
3038e353 217}
c76acec6 218EXPORT_SYMBOL(fw_core_remove_descriptor);
3038e353 219
02d37bed
SR
220static int reset_bus(struct fw_card *card, bool short_reset)
221{
222 int reg = short_reset ? 5 : 1;
223 int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;
224
7507dbc4 225 trace_bus_reset_initiate(card->index, card->generation, short_reset);
08dd8602 226
02d37bed
SR
227 return card->driver->update_phy_reg(card, reg, 0, bit);
228}
229
230void fw_schedule_bus_reset(struct fw_card *card, bool delayed, bool short_reset)
231{
7507dbc4 232 trace_bus_reset_schedule(card->index, card->generation, short_reset);
08dd8602 233
02d37bed
SR
234 /* We don't try hard to sort out requests of long vs. short resets. */
235 card->br_short = short_reset;
236
237 /* Use an arbitrary short delay to combine multiple reset requests. */
238 fw_card_get(card);
105e53f8 239 if (!queue_delayed_work(fw_workqueue, &card->br_work,
6ea9e7bb 240 delayed ? DIV_ROUND_UP(HZ, 100) : 0))
02d37bed
SR
241 fw_card_put(card);
242}
243EXPORT_SYMBOL(fw_schedule_bus_reset);
244
245static void br_work(struct work_struct *work)
246{
247 struct fw_card *card = container_of(work, struct fw_card, br_work.work);
248
249 /* Delay for 2s after last reset per IEEE 1394 clause 8.2.1. */
250 if (card->reset_jiffies != 0 &&
e71084af 251 time_before64(get_jiffies_64(), card->reset_jiffies + 2 * HZ)) {
7507dbc4 252 trace_bus_reset_postpone(card->index, card->generation, card->br_short);
08dd8602 253
105e53f8 254 if (!queue_delayed_work(fw_workqueue, &card->br_work, 2 * HZ))
02d37bed
SR
255 fw_card_put(card);
256 return;
257 }
258
259 fw_send_phy_config(card, FW_PHY_CONFIG_NO_NODE_ID, card->generation,
260 FW_PHY_CONFIG_CURRENT_GAP_COUNT);
261 reset_bus(card, card->br_short);
262 fw_card_put(card);
263}
264
cbae787c 265static void allocate_broadcast_channel(struct fw_card *card, int generation)
6104ee92 266{
cbae787c
SR
267 int channel, bandwidth = 0;
268
e91b2787
CL
269 if (!card->broadcast_channel_allocated) {
270 fw_iso_resource_manage(card, generation, 1ULL << 31,
f30e6d3e 271 &channel, &bandwidth, true);
e91b2787 272 if (channel != 31) {
26b4950d 273 fw_notice(card, "failed to allocate broadcast channel\n");
e91b2787
CL
274 return;
275 }
7889b60e 276 card->broadcast_channel_allocated = true;
6104ee92 277 }
e91b2787
CL
278
279 device_for_each_child(card->device, (void *)(long)generation,
280 fw_device_set_broadcast_channel);
6104ee92 281}
6104ee92 282
83db801c
KH
283static const char gap_count_table[] = {
284 63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
285};
286
53dca511 287void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
0fa1986f 288{
0fa1986f 289 fw_card_get(card);
02d37bed 290 if (!schedule_delayed_work(&card->bm_work, delay))
0fa1986f
JF
291 fw_card_put(card);
292}
293
02d37bed 294static void bm_work(struct work_struct *work)
19a15b93 295{
02d37bed 296 struct fw_card *card = container_of(work, struct fw_card, bm_work.work);
10389536 297 struct fw_device *root_device, *irm_device;
cbae787c 298 struct fw_node *root_node;
250b2b6d 299 int root_id, new_root_id, irm_id, bm_id, local_id;
cbae787c 300 int gap_count, generation, grace, rcode;
25b1c3d8 301 bool do_reset = false;
62305823
SR
302 bool root_device_is_running;
303 bool root_device_is_cmc;
10389536 304 bool irm_is_1394_1995_only;
6044565a 305 bool keep_this_irm;
f30e6d3e 306 __be32 transaction_data[2];
19a15b93 307
ae948011 308 spin_lock_irq(&card->lock);
15803478 309
cbae787c 310 if (card->local_node == NULL) {
ae948011 311 spin_unlock_irq(&card->lock);
0fa1986f 312 goto out_put_card;
15803478 313 }
19a15b93
KH
314
315 generation = card->generation;
10389536 316
cbae787c
SR
317 root_node = card->root_node;
318 fw_node_get(root_node);
15803478 319 root_device = root_node->data;
62305823
SR
320 root_device_is_running = root_device &&
321 atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
322 root_device_is_cmc = root_device && root_device->cmc;
10389536
SR
323
324 irm_device = card->irm_node->data;
325 irm_is_1394_1995_only = irm_device && irm_device->config_rom &&
326 (irm_device->config_rom[2] & 0x000000f0) == 0;
327
6044565a
SR
328 /* Canon MV5i works unreliably if it is not root node. */
329 keep_this_irm = irm_device && irm_device->config_rom &&
330 irm_device->config_rom[3] >> 8 == CANON_OUI;
331
cbae787c
SR
332 root_id = root_node->node_id;
333 irm_id = card->irm_node->node_id;
334 local_id = card->local_node->node_id;
e1dc7cab 335
e71084af
CL
336 grace = time_after64(get_jiffies_64(),
337 card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
e1dc7cab 338
7e0e314f
CL
339 if ((is_next_generation(generation, card->bm_generation) &&
340 !card->bm_abdicate) ||
931c4834 341 (card->bm_generation != generation && grace)) {
c781c06d
KH
342 /*
343 * This first step is to figure out who is IRM and
931c4834
KH
344 * then try to become bus manager. If the IRM is not
345 * well defined (e.g. does not have an active link
346 * layer or does not responds to our lock request, we
347 * will have to do a little vigilante bus management.
348 * In that case, we do a goto into the gap count logic
349 * so that when we do the reset, we still optimize the
350 * gap count. That could well save a reset in the
c781c06d
KH
351 * next generation.
352 */
931c4834 353
cbae787c
SR
354 if (!card->irm_node->link_on) {
355 new_root_id = local_id;
26b4950d 356 fw_notice(card, "%s, making local node (%02x) root\n",
10389536
SR
357 "IRM has link off", new_root_id);
358 goto pick_me;
359 }
360
6044565a 361 if (irm_is_1394_1995_only && !keep_this_irm) {
10389536 362 new_root_id = local_id;
26b4950d 363 fw_notice(card, "%s, making local node (%02x) root\n",
10389536 364 "IRM is not 1394a compliant", new_root_id);
931c4834
KH
365 goto pick_me;
366 }
367
f30e6d3e
SR
368 transaction_data[0] = cpu_to_be32(0x3f);
369 transaction_data[1] = cpu_to_be32(local_id);
931c4834 370
ae948011 371 spin_unlock_irq(&card->lock);
931c4834 372
1e119fa9
JF
373 rcode = fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
374 irm_id, generation, SCODE_100,
375 CSR_REGISTER_BASE + CSR_BUS_MANAGER_ID,
f30e6d3e 376 transaction_data, 8);
931c4834 377
1e119fa9
JF
378 if (rcode == RCODE_GENERATION)
379 /* Another bus reset, BM work has been rescheduled. */
15803478 380 goto out;
931c4834 381
f30e6d3e 382 bm_id = be32_to_cpu(transaction_data[0]);
cbae787c 383
250b2b6d
SR
384 spin_lock_irq(&card->lock);
385 if (rcode == RCODE_COMPLETE && generation == card->generation)
386 card->bm_node_id =
387 bm_id == 0x3f ? local_id : 0xffc0 | bm_id;
388 spin_unlock_irq(&card->lock);
cbae787c 389
250b2b6d 390 if (rcode == RCODE_COMPLETE && bm_id != 0x3f) {
cbae787c
SR
391 /* Somebody else is BM. Only act as IRM. */
392 if (local_id == irm_id)
393 allocate_broadcast_channel(card, generation);
394
15803478 395 goto out;
6104ee92 396 }
931c4834 397
bda3b8a1
CL
398 if (rcode == RCODE_SEND_ERROR) {
399 /*
400 * We have been unable to send the lock request due to
401 * some local problem. Let's try again later and hope
402 * that the problem has gone away by then.
403 */
404 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
405 goto out;
406 }
407
ae948011 408 spin_lock_irq(&card->lock);
1e119fa9 409
6044565a 410 if (rcode != RCODE_COMPLETE && !keep_this_irm) {
c781c06d
KH
411 /*
412 * The lock request failed, maybe the IRM
931c4834
KH
413 * isn't really IRM capable after all. Let's
414 * do a bus reset and pick the local node as
c781c06d
KH
415 * root, and thus, IRM.
416 */
cbae787c 417 new_root_id = local_id;
3b00b008
CL
418 fw_notice(card, "BM lock failed (%s), making local node (%02x) root\n",
419 fw_rcode_string(rcode), new_root_id);
931c4834
KH
420 goto pick_me;
421 }
422 } else if (card->bm_generation != generation) {
c781c06d 423 /*
e1dc7cab
SR
424 * We weren't BM in the last generation, and the last
425 * bus reset is less than 125ms ago. Reschedule this job.
c781c06d 426 */
ae948011 427 spin_unlock_irq(&card->lock);
e1dc7cab 428 fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
15803478 429 goto out;
931c4834
KH
430 }
431
c781c06d
KH
432 /*
433 * We're bus manager for this generation, so next step is to
931c4834 434 * make sure we have an active cycle master and do gap count
c781c06d
KH
435 * optimization.
436 */
931c4834 437 card->bm_generation = generation;
19a15b93 438
7ed43800
TS
439 if (card->gap_count == 0) {
440 /*
441 * If self IDs have inconsistent gap counts, do a
442 * bus reset ASAP. The config rom read might never
443 * complete, so don't wait for it. However, still
444 * send a PHY configuration packet prior to the
445 * bus reset. The PHY configuration packet might
446 * fail, but 1394-2008 8.4.5.2 explicitly permits
447 * it in this case, so it should be safe to try.
448 */
449 new_root_id = local_id;
450 /*
451 * We must always send a bus reset if the gap count
452 * is inconsistent, so bypass the 5-reset limit.
453 */
454 card->bm_retries = 0;
455 } else if (root_device == NULL) {
c781c06d
KH
456 /*
457 * Either link_on is false, or we failed to read the
458 * config rom. In either case, pick another root.
459 */
cbae787c 460 new_root_id = local_id;
62305823 461 } else if (!root_device_is_running) {
c781c06d
KH
462 /*
463 * If we haven't probed this device yet, bail out now
464 * and let's try again once that's done.
465 */
ae948011 466 spin_unlock_irq(&card->lock);
15803478 467 goto out;
62305823 468 } else if (root_device_is_cmc) {
c781c06d 469 /*
c374ab42
CL
470 * We will send out a force root packet for this
471 * node as part of the gap count optimization.
c781c06d 472 */
931c4834 473 new_root_id = root_id;
83db801c 474 } else {
c781c06d
KH
475 /*
476 * Current root has an active link layer and we
19a15b93 477 * successfully read the config rom, but it's not
c781c06d
KH
478 * cycle master capable.
479 */
cbae787c 480 new_root_id = local_id;
83db801c
KH
481 }
482
931c4834 483 pick_me:
24d40125
SR
484 /*
485 * Pick a gap count from 1394a table E-1. The table doesn't cover
486 * the typically much larger 1394b beta repeater delays though.
487 */
488 if (!card->beta_repeaters_present &&
15803478
SR
489 root_node->max_hops < ARRAY_SIZE(gap_count_table))
490 gap_count = gap_count_table[root_node->max_hops];
83db801c
KH
491 else
492 gap_count = 63;
493
c781c06d 494 /*
25b1c3d8
SR
495 * Finally, figure out if we should do a reset or not. If we have
496 * done less than 5 resets with the same physical topology and we
c781c06d
KH
497 * have either a new root or a new gap count setting, let's do it.
498 */
19a15b93 499
931c4834
KH
500 if (card->bm_retries++ < 5 &&
501 (card->gap_count != gap_count || new_root_id != root_id))
25b1c3d8 502 do_reset = true;
19a15b93 503
ae948011 504 spin_unlock_irq(&card->lock);
19a15b93 505
83db801c 506 if (do_reset) {
26b4950d
SR
507 fw_notice(card, "phy config: new root=%x, gap_count=%d\n",
508 new_root_id, gap_count);
931c4834 509 fw_send_phy_config(card, new_root_id, generation, gap_count);
d0b06dc4
TS
510 /*
511 * Where possible, use a short bus reset to minimize
512 * disruption to isochronous transfers. But in the event
513 * of a gap count inconsistency, use a long bus reset.
514 *
515 * As noted in 1394a 8.4.6.2, nodes on a mixed 1394/1394a bus
516 * may set different gap counts after a bus reset. On a mixed
517 * 1394/1394a bus, a short bus reset can get doubled. Some
518 * nodes may treat the double reset as one bus reset and others
519 * may treat it as two, causing a gap count inconsistency
520 * again. Using a long bus reset prevents this.
521 */
522 reset_bus(card, card->gap_count != 0);
cbae787c 523 /* Will allocate broadcast channel after the reset. */
c374ab42 524 goto out;
19a15b93 525 }
6104ee92 526
c374ab42
CL
527 if (root_device_is_cmc) {
528 /*
529 * Make sure that the cycle master sends cycle start packets.
530 */
f30e6d3e 531 transaction_data[0] = cpu_to_be32(CSR_STATE_BIT_CMSTR);
c374ab42
CL
532 rcode = fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
533 root_id, generation, SCODE_100,
534 CSR_REGISTER_BASE + CSR_STATE_SET,
f30e6d3e 535 transaction_data, 4);
c374ab42
CL
536 if (rcode == RCODE_GENERATION)
537 goto out;
19a15b93 538 }
6104ee92 539
c374ab42
CL
540 if (local_id == irm_id)
541 allocate_broadcast_channel(card, generation);
542
15803478 543 out:
15803478 544 fw_node_put(root_node);
0fa1986f
JF
545 out_put_card:
546 fw_card_put(card);
19a15b93
KH
547}
548
53dca511
SR
549void fw_card_initialize(struct fw_card *card,
550 const struct fw_card_driver *driver,
551 struct device *device)
3038e353 552{
bbf19db3 553 static atomic_t index = ATOMIC_INIT(-1);
3038e353 554
bbf19db3 555 card->index = atomic_inc_return(&index);
5e20c282 556 card->driver = driver;
3038e353 557 card->device = device;
5e20c282
SR
558 card->current_tlabel = 0;
559 card->tlabel_mask = 0;
dd5eeb99
CL
560 card->split_timeout_hi = DEFAULT_SPLIT_TIMEOUT / 8000;
561 card->split_timeout_lo = (DEFAULT_SPLIT_TIMEOUT % 8000) << 19;
562 card->split_timeout_cycles = DEFAULT_SPLIT_TIMEOUT;
563 card->split_timeout_jiffies =
564 DIV_ROUND_UP(DEFAULT_SPLIT_TIMEOUT * HZ, 8000);
3038e353 565 card->color = 0;
e534fe16 566 card->broadcast_channel = BROADCAST_CHANNEL_INITIAL;
3038e353 567
459f7923
SR
568 kref_init(&card->kref);
569 init_completion(&card->done);
5e20c282 570 INIT_LIST_HEAD(&card->transaction_list);
bf54e146 571 INIT_LIST_HEAD(&card->phy_receiver_list);
3038e353 572 spin_lock_init(&card->lock);
3038e353
KH
573
574 card->local_node = NULL;
575
02d37bed
SR
576 INIT_DELAYED_WORK(&card->br_work, br_work);
577 INIT_DELAYED_WORK(&card->bm_work, bm_work);
3038e353
KH
578}
579EXPORT_SYMBOL(fw_card_initialize);
580
53dca511
SR
581int fw_card_add(struct fw_card *card,
582 u32 max_receive, u32 link_speed, u64 guid)
3038e353 583{
e1eff7a3 584 int ret;
3038e353
KH
585
586 card->max_receive = max_receive;
587 card->link_speed = link_speed;
588 card->guid = guid;
589
6a5033be 590 mutex_lock(&card_mutex);
3038e353 591
e300839d
SR
592 generate_config_rom(card, tmp_config_rom);
593 ret = card->driver->enable(card, tmp_config_rom, config_rom_length);
b171e204
SR
594 if (ret == 0)
595 list_add_tail(&card->link, &card_list);
596
597 mutex_unlock(&card_mutex);
e1eff7a3
SR
598
599 return ret;
3038e353
KH
600}
601EXPORT_SYMBOL(fw_card_add);
602
c781c06d 603/*
d645f4da
SR
604 * The next few functions implement a dummy driver that is used once a card
605 * driver shuts down an fw_card. This allows the driver to cleanly unload,
606 * as all IO to the card will be handled (and failed) by the dummy driver
607 * instead of calling into the module. Only functions for iso context
608 * shutdown still need to be provided by the card driver.
20802224
SR
609 *
610 * .read/write_csr() should never be called anymore after the dummy driver
611 * was bound since they are only used within request handler context.
612 * .set_config_rom() is never called since the card is taken out of card_list
613 * before switching to the dummy driver.
c781c06d 614 */
3038e353 615
02d37bed 616static int dummy_read_phy_reg(struct fw_card *card, int address)
3038e353 617{
02d37bed 618 return -ENODEV;
3038e353
KH
619}
620
53dca511
SR
621static int dummy_update_phy_reg(struct fw_card *card, int address,
622 int clear_bits, int set_bits)
3038e353
KH
623{
624 return -ENODEV;
625}
626
53dca511 627static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
3038e353 628{
18d0cdfd 629 packet->callback(packet, card, RCODE_CANCELLED);
3038e353
KH
630}
631
53dca511 632static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
3038e353 633{
18d0cdfd 634 packet->callback(packet, card, RCODE_CANCELLED);
3038e353
KH
635}
636
53dca511 637static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
730c32f5
KH
638{
639 return -ENOENT;
640}
641
53dca511
SR
642static int dummy_enable_phys_dma(struct fw_card *card,
643 int node_id, int generation)
3038e353
KH
644{
645 return -ENODEV;
646}
647
20802224
SR
648static struct fw_iso_context *dummy_allocate_iso_context(struct fw_card *card,
649 int type, int channel, size_t header_size)
650{
651 return ERR_PTR(-ENODEV);
652}
653
e70b6693
HM
654static u32 dummy_read_csr(struct fw_card *card, int csr_offset)
655{
656 return 0;
657}
658
659static void dummy_write_csr(struct fw_card *card, int csr_offset, u32 value)
660{
661}
662
20802224
SR
663static int dummy_start_iso(struct fw_iso_context *ctx,
664 s32 cycle, u32 sync, u32 tags)
665{
666 return -ENODEV;
667}
668
669static int dummy_set_iso_channels(struct fw_iso_context *ctx, u64 *channels)
670{
671 return -ENODEV;
672}
673
674static int dummy_queue_iso(struct fw_iso_context *ctx, struct fw_iso_packet *p,
675 struct fw_iso_buffer *buffer, unsigned long payload)
676{
677 return -ENODEV;
678}
679
13882a82
CL
680static void dummy_flush_queue_iso(struct fw_iso_context *ctx)
681{
682}
683
d1bbd209
CL
684static int dummy_flush_iso_completions(struct fw_iso_context *ctx)
685{
686 return -ENODEV;
687}
688
d645f4da 689static const struct fw_card_driver dummy_driver_template = {
20802224
SR
690 .read_phy_reg = dummy_read_phy_reg,
691 .update_phy_reg = dummy_update_phy_reg,
692 .send_request = dummy_send_request,
693 .send_response = dummy_send_response,
694 .cancel_packet = dummy_cancel_packet,
695 .enable_phys_dma = dummy_enable_phys_dma,
e70b6693
HM
696 .read_csr = dummy_read_csr,
697 .write_csr = dummy_write_csr,
20802224
SR
698 .allocate_iso_context = dummy_allocate_iso_context,
699 .start_iso = dummy_start_iso,
700 .set_iso_channels = dummy_set_iso_channels,
701 .queue_iso = dummy_queue_iso,
13882a82 702 .flush_queue_iso = dummy_flush_queue_iso,
d1bbd209 703 .flush_iso_completions = dummy_flush_iso_completions,
3038e353
KH
704};
705
53dca511 706void fw_card_release(struct kref *kref)
459f7923
SR
707{
708 struct fw_card *card = container_of(kref, struct fw_card, kref);
709
710 complete(&card->done);
711}
fc5f80b1 712EXPORT_SYMBOL_GPL(fw_card_release);
459f7923 713
53dca511 714void fw_core_remove_card(struct fw_card *card)
3038e353 715{
d645f4da 716 struct fw_card_driver dummy_driver = dummy_driver_template;
a7ecbe92 717 unsigned long flags;
d645f4da 718
ecab4133
MB
719 card->driver->update_phy_reg(card, 4,
720 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
02d37bed 721 fw_schedule_bus_reset(card, false, true);
3038e353 722
6a5033be 723 mutex_lock(&card_mutex);
e747a5c0 724 list_del_init(&card->link);
6a5033be 725 mutex_unlock(&card_mutex);
3038e353 726
d645f4da
SR
727 /* Switch off most of the card driver interface. */
728 dummy_driver.free_iso_context = card->driver->free_iso_context;
729 dummy_driver.stop_iso = card->driver->stop_iso;
3038e353
KH
730 card->driver = &dummy_driver;
731
a7ecbe92 732 spin_lock_irqsave(&card->lock, flags);
3038e353 733 fw_destroy_nodes(card);
a7ecbe92 734 spin_unlock_irqrestore(&card->lock, flags);
459f7923
SR
735
736 /* Wait for all users, especially device workqueue jobs, to finish. */
737 fw_card_put(card);
738 wait_for_completion(&card->done);
8a2d9ed3 739
1e8afea1 740 WARN_ON(!list_empty(&card->transaction_list));
3038e353
KH
741}
742EXPORT_SYMBOL(fw_core_remove_card);
baa914cd
TS
743
744/**
745 * fw_card_read_cycle_time: read from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region
746 * for controller card.
747 * @card: The instance of card for 1394 OHCI controller.
748 * @cycle_time: The mutual reference to value of cycle time for the read operation.
749 *
750 * Read value from Isochronous Cycle Timer Register of 1394 OHCI in MMIO region for the given
751 * controller card. This function accesses the region without any lock primitives or IRQ mask.
752 * When returning successfully, the content of @value argument has value aligned to host endianness,
753 * formetted by CYCLE_TIME CSR Register of IEEE 1394 std.
754 *
755 * Context: Any context.
756 * Return:
757 * * 0 - Read successfully.
758 * * -ENODEV - The controller is unavailable due to being removed or unbound.
759 */
760int fw_card_read_cycle_time(struct fw_card *card, u32 *cycle_time)
761{
762 if (card->driver->read_csr == dummy_read_csr)
763 return -ENODEV;
764
765 // It's possible to switch to dummy driver between the above and the below. This is the best
766 // effort to return -ENODEV.
767 *cycle_time = card->driver->read_csr(card, CSR_CYCLE_TIME);
768 return 0;
769}
770EXPORT_SYMBOL_GPL(fw_card_read_cycle_time);