1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Core IEEE1394 transaction logic
5 * Copyright (C) 2004-2006 Kristian Hoegsberg <krh@bitplanet.net>
9 #include <linux/completion.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/firewire.h>
13 #include <linux/firewire-constants.h>
15 #include <linux/init.h>
16 #include <linux/idr.h>
17 #include <linux/jiffies.h>
18 #include <linux/kernel.h>
19 #include <linux/list.h>
20 #include <linux/module.h>
21 #include <linux/rculist.h>
22 #include <linux/slab.h>
23 #include <linux/spinlock.h>
24 #include <linux/string.h>
25 #include <linux/timer.h>
26 #include <linux/types.h>
27 #include <linux/workqueue.h>
29 #include <asm/byteorder.h>
33 #define HEADER_PRI(pri) ((pri) << 0)
34 #define HEADER_TCODE(tcode) ((tcode) << 4)
35 #define HEADER_RETRY(retry) ((retry) << 8)
36 #define HEADER_TLABEL(tlabel) ((tlabel) << 10)
37 #define HEADER_DESTINATION(destination) ((destination) << 16)
38 #define HEADER_SOURCE(source) ((source) << 16)
39 #define HEADER_RCODE(rcode) ((rcode) << 12)
40 #define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
41 #define HEADER_DATA_LENGTH(length) ((length) << 16)
42 #define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
44 #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
45 #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
46 #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
47 #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
48 #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
49 #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
50 #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
51 #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
53 #define HEADER_DESTINATION_IS_BROADCAST(q) \
54 (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
56 #define PHY_PACKET_CONFIG 0x0
57 #define PHY_PACKET_LINK_ON 0x1
58 #define PHY_PACKET_SELF_ID 0x2
60 #define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
61 #define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
62 #define PHY_IDENTIFIER(id) ((id) << 30)
64 /* returns 0 if the split timeout handler is already running */
65 static int try_cancel_split_timeout(struct fw_transaction *t)
67 if (t->is_split_transaction)
68 return del_timer(&t->split_timeout_timer);
73 static int close_transaction(struct fw_transaction *transaction,
74 struct fw_card *card, int rcode)
76 struct fw_transaction *t = NULL, *iter;
79 spin_lock_irqsave(&card->lock, flags);
80 list_for_each_entry(iter, &card->transaction_list, link) {
81 if (iter == transaction) {
82 if (!try_cancel_split_timeout(iter)) {
83 spin_unlock_irqrestore(&card->lock, flags);
86 list_del_init(&iter->link);
87 card->tlabel_mask &= ~(1ULL << iter->tlabel);
92 spin_unlock_irqrestore(&card->lock, flags);
95 t->callback(card, rcode, NULL, 0, t->callback_data);
104 * Only valid for transactions that are potentially pending (ie have
107 int fw_cancel_transaction(struct fw_card *card,
108 struct fw_transaction *transaction)
111 * Cancel the packet transmission if it's still queued. That
112 * will call the packet transmission callback which cancels
116 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
120 * If the request packet has already been sent, we need to see
121 * if the transaction is still pending and remove it in that case.
124 return close_transaction(transaction, card, RCODE_CANCELLED);
126 EXPORT_SYMBOL(fw_cancel_transaction);
128 static void split_transaction_timeout_callback(struct timer_list *timer)
130 struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
131 struct fw_card *card = t->card;
134 spin_lock_irqsave(&card->lock, flags);
135 if (list_empty(&t->link)) {
136 spin_unlock_irqrestore(&card->lock, flags);
140 card->tlabel_mask &= ~(1ULL << t->tlabel);
141 spin_unlock_irqrestore(&card->lock, flags);
143 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
146 static void start_split_transaction_timeout(struct fw_transaction *t,
147 struct fw_card *card)
151 spin_lock_irqsave(&card->lock, flags);
153 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
154 spin_unlock_irqrestore(&card->lock, flags);
158 t->is_split_transaction = true;
159 mod_timer(&t->split_timeout_timer,
160 jiffies + card->split_timeout_jiffies);
162 spin_unlock_irqrestore(&card->lock, flags);
165 static void transmit_complete_callback(struct fw_packet *packet,
166 struct fw_card *card, int status)
168 struct fw_transaction *t =
169 container_of(packet, struct fw_transaction, packet);
173 close_transaction(t, card, RCODE_COMPLETE);
176 start_split_transaction_timeout(t, card);
181 close_transaction(t, card, RCODE_BUSY);
184 close_transaction(t, card, RCODE_DATA_ERROR);
187 close_transaction(t, card, RCODE_TYPE_ERROR);
191 * In this case the ack is really a juju specific
192 * rcode, so just forward that to the callback.
194 close_transaction(t, card, status);
199 static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
200 int destination_id, int source_id, int generation, int speed,
201 unsigned long long offset, void *payload, size_t length)
205 if (tcode == TCODE_STREAM_DATA) {
207 HEADER_DATA_LENGTH(length) |
209 HEADER_TCODE(TCODE_STREAM_DATA);
210 packet->header_length = 4;
211 packet->payload = payload;
212 packet->payload_length = length;
218 ext_tcode = tcode & ~0x10;
219 tcode = TCODE_LOCK_REQUEST;
224 HEADER_RETRY(RETRY_X) |
225 HEADER_TLABEL(tlabel) |
226 HEADER_TCODE(tcode) |
227 HEADER_DESTINATION(destination_id);
229 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
234 case TCODE_WRITE_QUADLET_REQUEST:
235 packet->header[3] = *(u32 *)payload;
236 packet->header_length = 16;
237 packet->payload_length = 0;
240 case TCODE_LOCK_REQUEST:
241 case TCODE_WRITE_BLOCK_REQUEST:
243 HEADER_DATA_LENGTH(length) |
244 HEADER_EXTENDED_TCODE(ext_tcode);
245 packet->header_length = 16;
246 packet->payload = payload;
247 packet->payload_length = length;
250 case TCODE_READ_QUADLET_REQUEST:
251 packet->header_length = 12;
252 packet->payload_length = 0;
255 case TCODE_READ_BLOCK_REQUEST:
257 HEADER_DATA_LENGTH(length) |
258 HEADER_EXTENDED_TCODE(ext_tcode);
259 packet->header_length = 16;
260 packet->payload_length = 0;
264 WARN(1, "wrong tcode %d\n", tcode);
267 packet->speed = speed;
268 packet->generation = generation;
270 packet->payload_mapped = false;
273 static int allocate_tlabel(struct fw_card *card)
277 tlabel = card->current_tlabel;
278 while (card->tlabel_mask & (1ULL << tlabel)) {
279 tlabel = (tlabel + 1) & 0x3f;
280 if (tlabel == card->current_tlabel)
284 card->current_tlabel = (tlabel + 1) & 0x3f;
285 card->tlabel_mask |= 1ULL << tlabel;
291 * fw_send_request() - submit a request packet for transmission
292 * @card: interface to send the request at
293 * @t: transaction instance to which the request belongs
294 * @tcode: transaction code
295 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
296 * @generation: bus generation in which request and response are valid
297 * @speed: transmission speed
298 * @offset: 48bit wide offset into destination's address space
299 * @payload: data payload for the request subaction
300 * @length: length of the payload, in bytes
301 * @callback: function to be called when the transaction is completed
302 * @callback_data: data to be passed to the transaction completion callback
304 * Submit a request packet into the asynchronous request transmission queue.
305 * Can be called from atomic context. If you prefer a blocking API, use
306 * fw_run_transaction() in a context that can sleep.
308 * In case of lock requests, specify one of the firewire-core specific %TCODE_
309 * constants instead of %TCODE_LOCK_REQUEST in @tcode.
311 * Make sure that the value in @destination_id is not older than the one in
312 * @generation. Otherwise the request is in danger to be sent to a wrong node.
314 * In case of asynchronous stream packets i.e. %TCODE_STREAM_DATA, the caller
315 * needs to synthesize @destination_id with fw_stream_packet_destination_id().
316 * It will contain tag, channel, and sy data instead of a node ID then.
318 * The payload buffer at @data is going to be DMA-mapped except in case of
319 * @length <= 8 or of local (loopback) requests. Hence make sure that the
320 * buffer complies with the restrictions of the streaming DMA mapping API.
321 * @payload must not be freed before the @callback is called.
323 * In case of request types without payload, @data is NULL and @length is 0.
325 * After the transaction is completed successfully or unsuccessfully, the
326 * @callback will be called. Among its parameters is the response code which
327 * is either one of the rcodes per IEEE 1394 or, in case of internal errors,
328 * the firewire-core specific %RCODE_SEND_ERROR. The other firewire-core
329 * specific rcodes (%RCODE_CANCELLED, %RCODE_BUSY, %RCODE_GENERATION,
330 * %RCODE_NO_ACK) denote transaction timeout, busy responder, stale request
331 * generation, or missing ACK respectively.
333 * Note some timing corner cases: fw_send_request() may complete much earlier
334 * than when the request packet actually hits the wire. On the other hand,
335 * transaction completion and hence execution of @callback may happen even
336 * before fw_send_request() returns.
338 void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
339 int destination_id, int generation, int speed,
340 unsigned long long offset, void *payload, size_t length,
341 fw_transaction_callback_t callback, void *callback_data)
347 * Allocate tlabel from the bitmap and put the transaction on
348 * the list while holding the card spinlock.
351 spin_lock_irqsave(&card->lock, flags);
353 tlabel = allocate_tlabel(card);
355 spin_unlock_irqrestore(&card->lock, flags);
356 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
360 t->node_id = destination_id;
363 t->is_split_transaction = false;
364 timer_setup(&t->split_timeout_timer,
365 split_transaction_timeout_callback, 0);
366 t->callback = callback;
367 t->callback_data = callback_data;
369 fw_fill_request(&t->packet, tcode, t->tlabel,
370 destination_id, card->node_id, generation,
371 speed, offset, payload, length);
372 t->packet.callback = transmit_complete_callback;
374 list_add_tail(&t->link, &card->transaction_list);
376 spin_unlock_irqrestore(&card->lock, flags);
378 card->driver->send_request(card, &t->packet);
380 EXPORT_SYMBOL(fw_send_request);
382 struct transaction_callback_data {
383 struct completion done;
388 static void transaction_callback(struct fw_card *card, int rcode,
389 void *payload, size_t length, void *data)
391 struct transaction_callback_data *d = data;
393 if (rcode == RCODE_COMPLETE)
394 memcpy(d->payload, payload, length);
400 * fw_run_transaction() - send request and sleep until transaction is completed
401 * @card: card interface for this request
402 * @tcode: transaction code
403 * @destination_id: destination node ID, consisting of bus_ID and phy_ID
404 * @generation: bus generation in which request and response are valid
405 * @speed: transmission speed
406 * @offset: 48bit wide offset into destination's address space
407 * @payload: data payload for the request subaction
408 * @length: length of the payload, in bytes
410 * Returns the RCODE. See fw_send_request() for parameter documentation.
411 * Unlike fw_send_request(), @data points to the payload of the request or/and
412 * to the payload of the response. DMA mapping restrictions apply to outbound
413 * request payloads of >= 8 bytes but not to inbound response payloads.
415 int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
416 int generation, int speed, unsigned long long offset,
417 void *payload, size_t length)
419 struct transaction_callback_data d;
420 struct fw_transaction t;
422 timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
423 init_completion(&d.done);
425 fw_send_request(card, &t, tcode, destination_id, generation, speed,
426 offset, payload, length, transaction_callback, &d);
427 wait_for_completion(&d.done);
428 destroy_timer_on_stack(&t.split_timeout_timer);
432 EXPORT_SYMBOL(fw_run_transaction);
434 static DEFINE_MUTEX(phy_config_mutex);
435 static DECLARE_COMPLETION(phy_config_done);
437 static void transmit_phy_packet_callback(struct fw_packet *packet,
438 struct fw_card *card, int status)
440 complete(&phy_config_done);
443 static struct fw_packet phy_config_packet = {
445 .header[0] = TCODE_LINK_INTERNAL << 4,
448 .callback = transmit_phy_packet_callback,
451 void fw_send_phy_config(struct fw_card *card,
452 int node_id, int generation, int gap_count)
454 long timeout = DIV_ROUND_UP(HZ, 10);
455 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
457 if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
458 data |= PHY_CONFIG_ROOT_ID(node_id);
460 if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
461 gap_count = card->driver->read_phy_reg(card, 1);
469 data |= PHY_CONFIG_GAP_COUNT(gap_count);
471 mutex_lock(&phy_config_mutex);
473 phy_config_packet.header[1] = data;
474 phy_config_packet.header[2] = ~data;
475 phy_config_packet.generation = generation;
476 reinit_completion(&phy_config_done);
478 card->driver->send_request(card, &phy_config_packet);
479 wait_for_completion_timeout(&phy_config_done, timeout);
481 mutex_unlock(&phy_config_mutex);
484 static struct fw_address_handler *lookup_overlapping_address_handler(
485 struct list_head *list, unsigned long long offset, size_t length)
487 struct fw_address_handler *handler;
489 list_for_each_entry_rcu(handler, list, link) {
490 if (handler->offset < offset + length &&
491 offset < handler->offset + handler->length)
498 static bool is_enclosing_handler(struct fw_address_handler *handler,
499 unsigned long long offset, size_t length)
501 return handler->offset <= offset &&
502 offset + length <= handler->offset + handler->length;
505 static struct fw_address_handler *lookup_enclosing_address_handler(
506 struct list_head *list, unsigned long long offset, size_t length)
508 struct fw_address_handler *handler;
510 list_for_each_entry_rcu(handler, list, link) {
511 if (is_enclosing_handler(handler, offset, length))
518 static DEFINE_SPINLOCK(address_handler_list_lock);
519 static LIST_HEAD(address_handler_list);
521 const struct fw_address_region fw_high_memory_region =
522 { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
523 EXPORT_SYMBOL(fw_high_memory_region);
525 static const struct fw_address_region low_memory_region =
526 { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
529 const struct fw_address_region fw_private_region =
530 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
531 const struct fw_address_region fw_csr_region =
532 { .start = CSR_REGISTER_BASE,
533 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
534 const struct fw_address_region fw_unit_space_region =
535 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
539 * fw_core_add_address_handler() - register for incoming requests
541 * @region: region in the IEEE 1212 node space address range
543 * region->start, ->end, and handler->length have to be quadlet-aligned.
545 * When a request is received that falls within the specified address range,
546 * the specified callback is invoked. The parameters passed to the callback
547 * give the details of the particular request.
549 * To be called in process context.
550 * Return value: 0 on success, non-zero otherwise.
552 * The start offset of the handler's address region is determined by
553 * fw_core_add_address_handler() and is returned in handler->offset.
555 * Address allocations are exclusive, except for the FCP registers.
557 int fw_core_add_address_handler(struct fw_address_handler *handler,
558 const struct fw_address_region *region)
560 struct fw_address_handler *other;
563 if (region->start & 0xffff000000000003ULL ||
564 region->start >= region->end ||
565 region->end > 0x0001000000000000ULL ||
566 handler->length & 3 ||
567 handler->length == 0)
570 spin_lock(&address_handler_list_lock);
572 handler->offset = region->start;
573 while (handler->offset + handler->length <= region->end) {
574 if (is_in_fcp_region(handler->offset, handler->length))
577 other = lookup_overlapping_address_handler
578 (&address_handler_list,
579 handler->offset, handler->length);
581 handler->offset += other->length;
583 list_add_tail_rcu(&handler->link, &address_handler_list);
589 spin_unlock(&address_handler_list_lock);
593 EXPORT_SYMBOL(fw_core_add_address_handler);
596 * fw_core_remove_address_handler() - unregister an address handler
599 * To be called in process context.
601 * When fw_core_remove_address_handler() returns, @handler->callback() is
602 * guaranteed to not run on any CPU anymore.
604 void fw_core_remove_address_handler(struct fw_address_handler *handler)
606 spin_lock(&address_handler_list_lock);
607 list_del_rcu(&handler->link);
608 spin_unlock(&address_handler_list_lock);
611 EXPORT_SYMBOL(fw_core_remove_address_handler);
615 struct fw_packet response;
616 u32 request_header[4];
623 void fw_request_get(struct fw_request *request)
625 kref_get(&request->kref);
628 static void release_request(struct kref *kref)
630 struct fw_request *request = container_of(kref, struct fw_request, kref);
635 void fw_request_put(struct fw_request *request)
637 kref_put(&request->kref, release_request);
640 static void free_response_callback(struct fw_packet *packet,
641 struct fw_card *card, int status)
643 struct fw_request *request = container_of(packet, struct fw_request, response);
645 // Decrease the reference count since not at in-flight.
646 fw_request_put(request);
648 // Decrease the reference count to release the object.
649 fw_request_put(request);
652 int fw_get_response_length(struct fw_request *r)
654 int tcode, ext_tcode, data_length;
656 tcode = HEADER_GET_TCODE(r->request_header[0]);
659 case TCODE_WRITE_QUADLET_REQUEST:
660 case TCODE_WRITE_BLOCK_REQUEST:
663 case TCODE_READ_QUADLET_REQUEST:
666 case TCODE_READ_BLOCK_REQUEST:
667 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
670 case TCODE_LOCK_REQUEST:
671 ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
672 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
674 case EXTCODE_FETCH_ADD:
675 case EXTCODE_LITTLE_ADD:
678 return data_length / 2;
682 WARN(1, "wrong tcode %d\n", tcode);
687 void fw_fill_response(struct fw_packet *response, u32 *request_header,
688 int rcode, void *payload, size_t length)
690 int tcode, tlabel, extended_tcode, source, destination;
692 tcode = HEADER_GET_TCODE(request_header[0]);
693 tlabel = HEADER_GET_TLABEL(request_header[0]);
694 source = HEADER_GET_DESTINATION(request_header[0]);
695 destination = HEADER_GET_SOURCE(request_header[1]);
696 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
698 response->header[0] =
699 HEADER_RETRY(RETRY_1) |
700 HEADER_TLABEL(tlabel) |
701 HEADER_DESTINATION(destination);
702 response->header[1] =
703 HEADER_SOURCE(source) |
705 response->header[2] = 0;
708 case TCODE_WRITE_QUADLET_REQUEST:
709 case TCODE_WRITE_BLOCK_REQUEST:
710 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
711 response->header_length = 12;
712 response->payload_length = 0;
715 case TCODE_READ_QUADLET_REQUEST:
716 response->header[0] |=
717 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
719 response->header[3] = *(u32 *)payload;
721 response->header[3] = 0;
722 response->header_length = 16;
723 response->payload_length = 0;
726 case TCODE_READ_BLOCK_REQUEST:
727 case TCODE_LOCK_REQUEST:
728 response->header[0] |= HEADER_TCODE(tcode + 2);
729 response->header[3] =
730 HEADER_DATA_LENGTH(length) |
731 HEADER_EXTENDED_TCODE(extended_tcode);
732 response->header_length = 16;
733 response->payload = payload;
734 response->payload_length = length;
738 WARN(1, "wrong tcode %d\n", tcode);
741 response->payload_mapped = false;
743 EXPORT_SYMBOL(fw_fill_response);
745 static u32 compute_split_timeout_timestamp(struct fw_card *card,
746 u32 request_timestamp)
751 cycles = card->split_timeout_cycles;
752 cycles += request_timestamp & 0x1fff;
754 timestamp = request_timestamp & ~0x1fff;
755 timestamp += (cycles / 8000) << 13;
756 timestamp |= cycles % 8000;
761 static struct fw_request *allocate_request(struct fw_card *card,
764 struct fw_request *request;
768 request_tcode = HEADER_GET_TCODE(p->header[0]);
769 switch (request_tcode) {
770 case TCODE_WRITE_QUADLET_REQUEST:
771 data = &p->header[3];
775 case TCODE_WRITE_BLOCK_REQUEST:
776 case TCODE_LOCK_REQUEST:
778 length = HEADER_GET_DATA_LENGTH(p->header[3]);
781 case TCODE_READ_QUADLET_REQUEST:
786 case TCODE_READ_BLOCK_REQUEST:
788 length = HEADER_GET_DATA_LENGTH(p->header[3]);
792 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
793 p->header[0], p->header[1], p->header[2]);
797 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
800 kref_init(&request->kref);
802 request->response.speed = p->speed;
803 request->response.timestamp =
804 compute_split_timeout_timestamp(card, p->timestamp);
805 request->response.generation = p->generation;
806 request->response.ack = 0;
807 request->response.callback = free_response_callback;
808 request->ack = p->ack;
809 request->timestamp = p->timestamp;
810 request->length = length;
812 memcpy(request->data, data, length);
814 memcpy(request->request_header, p->header, sizeof(p->header));
820 * fw_send_response: - send response packet for asynchronous transaction.
821 * @card: interface to send the response at.
822 * @request: firewire request data for the transaction.
823 * @rcode: response code to send.
825 * Submit a response packet into the asynchronous response transmission queue. The @request
826 * is going to be released when the transmission successfully finishes later.
828 void fw_send_response(struct fw_card *card,
829 struct fw_request *request, int rcode)
831 /* unified transaction or broadcast transaction: don't respond */
832 if (request->ack != ACK_PENDING ||
833 HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
834 fw_request_put(request);
838 if (rcode == RCODE_COMPLETE)
839 fw_fill_response(&request->response, request->request_header,
840 rcode, request->data,
841 fw_get_response_length(request));
843 fw_fill_response(&request->response, request->request_header,
846 // Increase the reference count so that the object is kept during in-flight.
847 fw_request_get(request);
849 card->driver->send_response(card, &request->response);
851 EXPORT_SYMBOL(fw_send_response);
854 * fw_get_request_speed() - returns speed at which the @request was received
855 * @request: firewire request data
857 int fw_get_request_speed(struct fw_request *request)
859 return request->response.speed;
861 EXPORT_SYMBOL(fw_get_request_speed);
864 * fw_request_get_timestamp: Get timestamp of the request.
865 * @request: The opaque pointer to request structure.
867 * Get timestamp when 1394 OHCI controller receives the asynchronous request subaction. The
868 * timestamp consists of the low order 3 bits of second field and the full 13 bits of count
869 * field of isochronous cycle time register.
871 * Returns: timestamp of the request.
873 u32 fw_request_get_timestamp(const struct fw_request *request)
875 return request->timestamp;
877 EXPORT_SYMBOL_GPL(fw_request_get_timestamp);
879 static void handle_exclusive_region_request(struct fw_card *card,
881 struct fw_request *request,
882 unsigned long long offset)
884 struct fw_address_handler *handler;
885 int tcode, destination, source;
887 destination = HEADER_GET_DESTINATION(p->header[0]);
888 source = HEADER_GET_SOURCE(p->header[1]);
889 tcode = HEADER_GET_TCODE(p->header[0]);
890 if (tcode == TCODE_LOCK_REQUEST)
891 tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
894 handler = lookup_enclosing_address_handler(&address_handler_list,
895 offset, request->length);
897 handler->address_callback(card, request,
898 tcode, destination, source,
899 p->generation, offset,
900 request->data, request->length,
901 handler->callback_data);
905 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
908 static void handle_fcp_region_request(struct fw_card *card,
910 struct fw_request *request,
911 unsigned long long offset)
913 struct fw_address_handler *handler;
914 int tcode, destination, source;
916 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
917 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
918 request->length > 0x200) {
919 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
924 tcode = HEADER_GET_TCODE(p->header[0]);
925 destination = HEADER_GET_DESTINATION(p->header[0]);
926 source = HEADER_GET_SOURCE(p->header[1]);
928 if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
929 tcode != TCODE_WRITE_BLOCK_REQUEST) {
930 fw_send_response(card, request, RCODE_TYPE_ERROR);
936 list_for_each_entry_rcu(handler, &address_handler_list, link) {
937 if (is_enclosing_handler(handler, offset, request->length))
938 handler->address_callback(card, request, tcode,
940 p->generation, offset,
943 handler->callback_data);
947 fw_send_response(card, request, RCODE_COMPLETE);
950 void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
952 struct fw_request *request;
953 unsigned long long offset;
955 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
958 if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
959 fw_cdev_handle_phy_packet(card, p);
963 request = allocate_request(card, p);
964 if (request == NULL) {
965 /* FIXME: send statically allocated busy packet. */
969 offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
972 if (!is_in_fcp_region(offset, request->length))
973 handle_exclusive_region_request(card, p, request, offset);
975 handle_fcp_region_request(card, p, request, offset);
978 EXPORT_SYMBOL(fw_core_handle_request);
980 void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
982 struct fw_transaction *t = NULL, *iter;
986 int tcode, tlabel, source, rcode;
988 tcode = HEADER_GET_TCODE(p->header[0]);
989 tlabel = HEADER_GET_TLABEL(p->header[0]);
990 source = HEADER_GET_SOURCE(p->header[1]);
991 rcode = HEADER_GET_RCODE(p->header[1]);
993 spin_lock_irqsave(&card->lock, flags);
994 list_for_each_entry(iter, &card->transaction_list, link) {
995 if (iter->node_id == source && iter->tlabel == tlabel) {
996 if (!try_cancel_split_timeout(iter)) {
997 spin_unlock_irqrestore(&card->lock, flags);
1000 list_del_init(&iter->link);
1001 card->tlabel_mask &= ~(1ULL << iter->tlabel);
1006 spin_unlock_irqrestore(&card->lock, flags);
1010 fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
1016 * FIXME: sanity check packet, is length correct, does tcodes
1017 * and addresses match.
1021 case TCODE_READ_QUADLET_RESPONSE:
1022 data = (u32 *) &p->header[3];
1026 case TCODE_WRITE_RESPONSE:
1031 case TCODE_READ_BLOCK_RESPONSE:
1032 case TCODE_LOCK_RESPONSE:
1034 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
1038 /* Should never happen, this is just to shut up gcc. */
1045 * The response handler may be executed while the request handler
1046 * is still pending. Cancel the request handler.
1048 card->driver->cancel_packet(card, &t->packet);
1050 t->callback(card, rcode, data, data_length, t->callback_data);
1052 EXPORT_SYMBOL(fw_core_handle_response);
1055 * fw_rcode_string - convert a firewire result code to an error description
1056 * @rcode: the result code
1058 const char *fw_rcode_string(int rcode)
1060 static const char *const names[] = {
1061 [RCODE_COMPLETE] = "no error",
1062 [RCODE_CONFLICT_ERROR] = "conflict error",
1063 [RCODE_DATA_ERROR] = "data error",
1064 [RCODE_TYPE_ERROR] = "type error",
1065 [RCODE_ADDRESS_ERROR] = "address error",
1066 [RCODE_SEND_ERROR] = "send error",
1067 [RCODE_CANCELLED] = "timeout",
1068 [RCODE_BUSY] = "busy",
1069 [RCODE_GENERATION] = "bus reset",
1070 [RCODE_NO_ACK] = "no ack",
1073 if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1074 return names[rcode];
1078 EXPORT_SYMBOL(fw_rcode_string);
1080 static const struct fw_address_region topology_map_region =
1081 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1082 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1084 static void handle_topology_map(struct fw_card *card, struct fw_request *request,
1085 int tcode, int destination, int source, int generation,
1086 unsigned long long offset, void *payload, size_t length,
1087 void *callback_data)
1091 if (!TCODE_IS_READ_REQUEST(tcode)) {
1092 fw_send_response(card, request, RCODE_TYPE_ERROR);
1096 if ((offset & 3) > 0 || (length & 3) > 0) {
1097 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1101 start = (offset - topology_map_region.start) / 4;
1102 memcpy(payload, &card->topology_map[start], length);
1104 fw_send_response(card, request, RCODE_COMPLETE);
1107 static struct fw_address_handler topology_map = {
1109 .address_callback = handle_topology_map,
1112 static const struct fw_address_region registers_region =
1113 { .start = CSR_REGISTER_BASE,
1114 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1116 static void update_split_timeout(struct fw_card *card)
1118 unsigned int cycles;
1120 cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
1122 /* minimum per IEEE 1394, maximum which doesn't overflow OHCI */
1123 cycles = clamp(cycles, 800u, 3u * 8000u);
1125 card->split_timeout_cycles = cycles;
1126 card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
1129 static void handle_registers(struct fw_card *card, struct fw_request *request,
1130 int tcode, int destination, int source, int generation,
1131 unsigned long long offset, void *payload, size_t length,
1132 void *callback_data)
1134 int reg = offset & ~CSR_REGISTER_BASE;
1135 __be32 *data = payload;
1136 int rcode = RCODE_COMPLETE;
1137 unsigned long flags;
1140 case CSR_PRIORITY_BUDGET:
1141 if (!card->priority_budget_implemented) {
1142 rcode = RCODE_ADDRESS_ERROR;
1149 * per IEEE 1394-2008 8.3.22.3, not IEEE 1394.1-2004 3.2.8
1150 * and 9.6, but interoperable with IEEE 1394.1-2004 bridges
1154 case CSR_STATE_CLEAR:
1156 case CSR_CYCLE_TIME:
1158 case CSR_BUSY_TIMEOUT:
1159 if (tcode == TCODE_READ_QUADLET_REQUEST)
1160 *data = cpu_to_be32(card->driver->read_csr(card, reg));
1161 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1162 card->driver->write_csr(card, reg, be32_to_cpu(*data));
1164 rcode = RCODE_TYPE_ERROR;
1167 case CSR_RESET_START:
1168 if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1169 card->driver->write_csr(card, CSR_STATE_CLEAR,
1170 CSR_STATE_BIT_ABDICATE);
1172 rcode = RCODE_TYPE_ERROR;
1175 case CSR_SPLIT_TIMEOUT_HI:
1176 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1177 *data = cpu_to_be32(card->split_timeout_hi);
1178 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1179 spin_lock_irqsave(&card->lock, flags);
1180 card->split_timeout_hi = be32_to_cpu(*data) & 7;
1181 update_split_timeout(card);
1182 spin_unlock_irqrestore(&card->lock, flags);
1184 rcode = RCODE_TYPE_ERROR;
1188 case CSR_SPLIT_TIMEOUT_LO:
1189 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1190 *data = cpu_to_be32(card->split_timeout_lo);
1191 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1192 spin_lock_irqsave(&card->lock, flags);
1193 card->split_timeout_lo =
1194 be32_to_cpu(*data) & 0xfff80000;
1195 update_split_timeout(card);
1196 spin_unlock_irqrestore(&card->lock, flags);
1198 rcode = RCODE_TYPE_ERROR;
1202 case CSR_MAINT_UTILITY:
1203 if (tcode == TCODE_READ_QUADLET_REQUEST)
1204 *data = card->maint_utility_register;
1205 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1206 card->maint_utility_register = *data;
1208 rcode = RCODE_TYPE_ERROR;
1211 case CSR_BROADCAST_CHANNEL:
1212 if (tcode == TCODE_READ_QUADLET_REQUEST)
1213 *data = cpu_to_be32(card->broadcast_channel);
1214 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1215 card->broadcast_channel =
1216 (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
1217 BROADCAST_CHANNEL_INITIAL;
1219 rcode = RCODE_TYPE_ERROR;
1222 case CSR_BUS_MANAGER_ID:
1223 case CSR_BANDWIDTH_AVAILABLE:
1224 case CSR_CHANNELS_AVAILABLE_HI:
1225 case CSR_CHANNELS_AVAILABLE_LO:
1227 * FIXME: these are handled by the OHCI hardware and
1228 * the stack never sees these request. If we add
1229 * support for a new type of controller that doesn't
1230 * handle this in hardware we need to deal with these
1237 rcode = RCODE_ADDRESS_ERROR;
1241 fw_send_response(card, request, rcode);
1244 static struct fw_address_handler registers = {
1246 .address_callback = handle_registers,
1249 static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1250 int tcode, int destination, int source, int generation,
1251 unsigned long long offset, void *payload, size_t length,
1252 void *callback_data)
1255 * This catches requests not handled by the physical DMA unit,
1256 * i.e., wrong transaction types or unauthorized source nodes.
1258 fw_send_response(card, request, RCODE_TYPE_ERROR);
1261 static struct fw_address_handler low_memory = {
1262 .length = FW_MAX_PHYSICAL_RANGE,
1263 .address_callback = handle_low_memory,
1266 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1267 MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1268 MODULE_LICENSE("GPL");
1270 static const u32 vendor_textual_descriptor[] = {
1271 /* textual descriptor leaf () */
1275 0x4c696e75, /* L i n u */
1276 0x78204669, /* x F i */
1277 0x72657769, /* r e w i */
1278 0x72650000, /* r e */
1281 static const u32 model_textual_descriptor[] = {
1282 /* model descriptor leaf () */
1286 0x4a756a75, /* J u j u */
1289 static struct fw_descriptor vendor_id_descriptor = {
1290 .length = ARRAY_SIZE(vendor_textual_descriptor),
1291 .immediate = 0x03001f11,
1293 .data = vendor_textual_descriptor,
1296 static struct fw_descriptor model_id_descriptor = {
1297 .length = ARRAY_SIZE(model_textual_descriptor),
1298 .immediate = 0x17023901,
1300 .data = model_textual_descriptor,
1303 static int __init fw_core_init(void)
1307 fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
1311 ret = bus_register(&fw_bus_type);
1313 destroy_workqueue(fw_workqueue);
1317 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1318 if (fw_cdev_major < 0) {
1319 bus_unregister(&fw_bus_type);
1320 destroy_workqueue(fw_workqueue);
1321 return fw_cdev_major;
1324 fw_core_add_address_handler(&topology_map, &topology_map_region);
1325 fw_core_add_address_handler(®isters, ®isters_region);
1326 fw_core_add_address_handler(&low_memory, &low_memory_region);
1327 fw_core_add_descriptor(&vendor_id_descriptor);
1328 fw_core_add_descriptor(&model_id_descriptor);
1333 static void __exit fw_core_cleanup(void)
1335 unregister_chrdev(fw_cdev_major, "firewire");
1336 bus_unregister(&fw_bus_type);
1337 destroy_workqueue(fw_workqueue);
1338 idr_destroy(&fw_device_idr);
1341 module_init(fw_core_init);
1342 module_exit(fw_core_cleanup);