rt2x00: Make use of ieee80211_free_txskb in tx path
[linux-2.6-block.git] / net / nfc / nci / core.c
CommitLineData
6a2968aa
IE
1/*
2 * The NFC Controller Interface is the communication protocol between an
3 * NFC Controller (NFCC) and a Device Host (DH).
4 *
5 * Copyright (C) 2011 Texas Instruments, Inc.
6 *
7 * Written by Ilan Elias <ilane@ti.com>
8 *
9 * Acknowledgements:
10 * This file is based on hci_core.c, which was written
11 * by Maxim Krasnyansky.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 *
26 */
27
ed1e0ad8
JP
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
6a2968aa
IE
30#include <linux/types.h>
31#include <linux/workqueue.h>
32#include <linux/completion.h>
bc3b2d7f 33#include <linux/export.h>
6a2968aa
IE
34#include <linux/sched.h>
35#include <linux/bitops.h>
36#include <linux/skbuff.h>
37
38#include "../nfc.h"
39#include <net/nfc/nci.h>
40#include <net/nfc/nci_core.h>
41#include <linux/nfc.h>
42
43static void nci_cmd_work(struct work_struct *work);
44static void nci_rx_work(struct work_struct *work);
45static void nci_tx_work(struct work_struct *work);
46
47/* ---- NCI requests ---- */
48
49void nci_req_complete(struct nci_dev *ndev, int result)
50{
51 if (ndev->req_status == NCI_REQ_PEND) {
52 ndev->req_result = result;
53 ndev->req_status = NCI_REQ_DONE;
54 complete(&ndev->req_completion);
55 }
56}
57
58static void nci_req_cancel(struct nci_dev *ndev, int err)
59{
60 if (ndev->req_status == NCI_REQ_PEND) {
61 ndev->req_result = err;
62 ndev->req_status = NCI_REQ_CANCELED;
63 complete(&ndev->req_completion);
64 }
65}
66
67/* Execute request and wait for completion. */
68static int __nci_request(struct nci_dev *ndev,
69 void (*req)(struct nci_dev *ndev, unsigned long opt),
70 unsigned long opt,
71 __u32 timeout)
72{
73 int rc = 0;
f8c141c3 74 long completion_rc;
6a2968aa
IE
75
76 ndev->req_status = NCI_REQ_PEND;
77
78 init_completion(&ndev->req_completion);
79 req(ndev, opt);
80 completion_rc = wait_for_completion_interruptible_timeout(
81 &ndev->req_completion,
82 timeout);
83
20c239c1 84 pr_debug("wait_for_completion return %ld\n", completion_rc);
6a2968aa
IE
85
86 if (completion_rc > 0) {
87 switch (ndev->req_status) {
88 case NCI_REQ_DONE:
89 rc = nci_to_errno(ndev->req_result);
90 break;
91
92 case NCI_REQ_CANCELED:
93 rc = -ndev->req_result;
94 break;
95
96 default:
97 rc = -ETIMEDOUT;
98 break;
99 }
100 } else {
ed1e0ad8
JP
101 pr_err("wait_for_completion_interruptible_timeout failed %ld\n",
102 completion_rc);
6a2968aa
IE
103
104 rc = ((completion_rc == 0) ? (-ETIMEDOUT) : (completion_rc));
105 }
106
107 ndev->req_status = ndev->req_result = 0;
108
109 return rc;
110}
111
112static inline int nci_request(struct nci_dev *ndev,
113 void (*req)(struct nci_dev *ndev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
115{
116 int rc;
117
118 if (!test_bit(NCI_UP, &ndev->flags))
119 return -ENETDOWN;
120
121 /* Serialize all requests */
122 mutex_lock(&ndev->req_lock);
123 rc = __nci_request(ndev, req, opt, timeout);
124 mutex_unlock(&ndev->req_lock);
125
126 return rc;
127}
128
129static void nci_reset_req(struct nci_dev *ndev, unsigned long opt)
130{
e8c0dacd
IE
131 struct nci_core_reset_cmd cmd;
132
133 cmd.reset_type = NCI_RESET_TYPE_RESET_CONFIG;
134 nci_send_cmd(ndev, NCI_OP_CORE_RESET_CMD, 1, &cmd);
6a2968aa
IE
135}
136
137static void nci_init_req(struct nci_dev *ndev, unsigned long opt)
138{
139 nci_send_cmd(ndev, NCI_OP_CORE_INIT_CMD, 0, NULL);
140}
141
142static void nci_init_complete_req(struct nci_dev *ndev, unsigned long opt)
143{
2eb1dc10
IE
144 struct nci_rf_disc_map_cmd cmd;
145 struct disc_map_config *cfg = cmd.mapping_configs;
146 __u8 *num = &cmd.num_mapping_configs;
6a2968aa
IE
147 int i;
148
6a2968aa 149 /* set rf mapping configurations */
2eb1dc10 150 *num = 0;
6a2968aa
IE
151
152 /* by default mapping is set to NCI_RF_INTERFACE_FRAME */
153 for (i = 0; i < ndev->num_supported_rf_interfaces; i++) {
154 if (ndev->supported_rf_interfaces[i] ==
155 NCI_RF_INTERFACE_ISO_DEP) {
2eb1dc10
IE
156 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_ISO_DEP;
157 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
158 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_ISO_DEP;
159 (*num)++;
6a2968aa
IE
160 } else if (ndev->supported_rf_interfaces[i] ==
161 NCI_RF_INTERFACE_NFC_DEP) {
2eb1dc10
IE
162 cfg[*num].rf_protocol = NCI_RF_PROTOCOL_NFC_DEP;
163 cfg[*num].mode = NCI_DISC_MAP_MODE_BOTH;
164 cfg[*num].rf_interface_type = NCI_RF_INTERFACE_NFC_DEP;
165 (*num)++;
6a2968aa
IE
166 }
167
2eb1dc10 168 if (*num == NCI_MAX_NUM_MAPPING_CONFIGS)
6a2968aa
IE
169 break;
170 }
171
172 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_MAP_CMD,
2eb1dc10 173 (1 + ((*num)*sizeof(struct disc_map_config))),
6a2968aa
IE
174 &cmd);
175}
176
177static void nci_rf_discover_req(struct nci_dev *ndev, unsigned long opt)
178{
179 struct nci_rf_disc_cmd cmd;
180 __u32 protocols = opt;
181
182 cmd.num_disc_configs = 0;
183
184 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
185 (protocols & NFC_PROTO_JEWEL_MASK
186 || protocols & NFC_PROTO_MIFARE_MASK
187 || protocols & NFC_PROTO_ISO14443_MASK
188 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
189 cmd.disc_configs[cmd.num_disc_configs].type =
190 NCI_DISCOVERY_TYPE_POLL_A_PASSIVE;
191 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
192 cmd.num_disc_configs++;
193 }
194
195 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
196 (protocols & NFC_PROTO_ISO14443_MASK)) {
197 cmd.disc_configs[cmd.num_disc_configs].type =
198 NCI_DISCOVERY_TYPE_POLL_B_PASSIVE;
199 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
200 cmd.num_disc_configs++;
201 }
202
203 if ((cmd.num_disc_configs < NCI_MAX_NUM_RF_CONFIGS) &&
204 (protocols & NFC_PROTO_FELICA_MASK
205 || protocols & NFC_PROTO_NFC_DEP_MASK)) {
206 cmd.disc_configs[cmd.num_disc_configs].type =
207 NCI_DISCOVERY_TYPE_POLL_F_PASSIVE;
208 cmd.disc_configs[cmd.num_disc_configs].frequency = 1;
209 cmd.num_disc_configs++;
210 }
211
212 nci_send_cmd(ndev, NCI_OP_RF_DISCOVER_CMD,
213 (1 + (cmd.num_disc_configs*sizeof(struct disc_config))),
214 &cmd);
215}
216
217static void nci_rf_deactivate_req(struct nci_dev *ndev, unsigned long opt)
218{
219 struct nci_rf_deactivate_cmd cmd;
220
221 cmd.type = NCI_DEACTIVATE_TYPE_IDLE_MODE;
222
223 nci_send_cmd(ndev, NCI_OP_RF_DEACTIVATE_CMD,
224 sizeof(struct nci_rf_deactivate_cmd),
225 &cmd);
226}
227
228static int nci_open_device(struct nci_dev *ndev)
229{
230 int rc = 0;
231
232 mutex_lock(&ndev->req_lock);
233
234 if (test_bit(NCI_UP, &ndev->flags)) {
235 rc = -EALREADY;
236 goto done;
237 }
238
239 if (ndev->ops->open(ndev)) {
240 rc = -EIO;
241 goto done;
242 }
243
244 atomic_set(&ndev->cmd_cnt, 1);
245
246 set_bit(NCI_INIT, &ndev->flags);
247
248 rc = __nci_request(ndev, nci_reset_req, 0,
249 msecs_to_jiffies(NCI_RESET_TIMEOUT));
250
251 if (!rc) {
252 rc = __nci_request(ndev, nci_init_req, 0,
253 msecs_to_jiffies(NCI_INIT_TIMEOUT));
254 }
255
256 if (!rc) {
257 rc = __nci_request(ndev, nci_init_complete_req, 0,
258 msecs_to_jiffies(NCI_INIT_TIMEOUT));
259 }
260
261 clear_bit(NCI_INIT, &ndev->flags);
262
263 if (!rc) {
264 set_bit(NCI_UP, &ndev->flags);
265 } else {
266 /* Init failed, cleanup */
267 skb_queue_purge(&ndev->cmd_q);
268 skb_queue_purge(&ndev->rx_q);
269 skb_queue_purge(&ndev->tx_q);
270
271 ndev->ops->close(ndev);
272 ndev->flags = 0;
273 }
274
275done:
276 mutex_unlock(&ndev->req_lock);
277 return rc;
278}
279
280static int nci_close_device(struct nci_dev *ndev)
281{
282 nci_req_cancel(ndev, ENODEV);
283 mutex_lock(&ndev->req_lock);
284
285 if (!test_and_clear_bit(NCI_UP, &ndev->flags)) {
286 del_timer_sync(&ndev->cmd_timer);
287 mutex_unlock(&ndev->req_lock);
288 return 0;
289 }
290
291 /* Drop RX and TX queues */
292 skb_queue_purge(&ndev->rx_q);
293 skb_queue_purge(&ndev->tx_q);
294
295 /* Flush RX and TX wq */
296 flush_workqueue(ndev->rx_wq);
297 flush_workqueue(ndev->tx_wq);
298
299 /* Reset device */
300 skb_queue_purge(&ndev->cmd_q);
301 atomic_set(&ndev->cmd_cnt, 1);
302
303 set_bit(NCI_INIT, &ndev->flags);
304 __nci_request(ndev, nci_reset_req, 0,
305 msecs_to_jiffies(NCI_RESET_TIMEOUT));
306 clear_bit(NCI_INIT, &ndev->flags);
307
308 /* Flush cmd wq */
309 flush_workqueue(ndev->cmd_wq);
310
311 /* After this point our queues are empty
312 * and no works are scheduled. */
313 ndev->ops->close(ndev);
314
315 /* Clear flags */
316 ndev->flags = 0;
317
318 mutex_unlock(&ndev->req_lock);
319
320 return 0;
321}
322
323/* NCI command timer function */
324static void nci_cmd_timer(unsigned long arg)
325{
326 struct nci_dev *ndev = (void *) arg;
327
6a2968aa
IE
328 atomic_set(&ndev->cmd_cnt, 1);
329 queue_work(ndev->cmd_wq, &ndev->cmd_work);
330}
331
332static int nci_dev_up(struct nfc_dev *nfc_dev)
333{
334 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
335
6a2968aa
IE
336 return nci_open_device(ndev);
337}
338
339static int nci_dev_down(struct nfc_dev *nfc_dev)
340{
341 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
342
6a2968aa
IE
343 return nci_close_device(ndev);
344}
345
346static int nci_start_poll(struct nfc_dev *nfc_dev, __u32 protocols)
347{
348 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
349 int rc;
350
6a2968aa 351 if (test_bit(NCI_DISCOVERY, &ndev->flags)) {
ed1e0ad8 352 pr_err("unable to start poll, since poll is already active\n");
6a2968aa
IE
353 return -EBUSY;
354 }
355
de054799 356 if (ndev->target_active_prot) {
ed1e0ad8 357 pr_err("there is an active target\n");
de054799
IE
358 return -EBUSY;
359 }
360
6a2968aa 361 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
20c239c1 362 pr_debug("target is active, implicitly deactivate...\n");
6a2968aa
IE
363
364 rc = nci_request(ndev, nci_rf_deactivate_req, 0,
365 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
366 if (rc)
367 return -EBUSY;
368 }
369
370 rc = nci_request(ndev, nci_rf_discover_req, protocols,
371 msecs_to_jiffies(NCI_RF_DISC_TIMEOUT));
372
373 if (!rc)
374 ndev->poll_prots = protocols;
375
376 return rc;
377}
378
379static void nci_stop_poll(struct nfc_dev *nfc_dev)
380{
381 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
382
6a2968aa 383 if (!test_bit(NCI_DISCOVERY, &ndev->flags)) {
ed1e0ad8 384 pr_err("unable to stop poll, since poll is not active\n");
6a2968aa
IE
385 return;
386 }
387
388 nci_request(ndev, nci_rf_deactivate_req, 0,
389 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
390}
391
392static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
393 __u32 protocol)
394{
395 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
396
24bf3304 397 pr_debug("target_idx %d, protocol 0x%x\n", target_idx, protocol);
6a2968aa
IE
398
399 if (!test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
ed1e0ad8 400 pr_err("there is no available target to activate\n");
6a2968aa
IE
401 return -EINVAL;
402 }
403
404 if (ndev->target_active_prot) {
ed1e0ad8 405 pr_err("there is already an active target\n");
6a2968aa
IE
406 return -EBUSY;
407 }
408
409 if (!(ndev->target_available_prots & (1 << protocol))) {
ed1e0ad8
JP
410 pr_err("target does not support the requested protocol 0x%x\n",
411 protocol);
6a2968aa
IE
412 return -EINVAL;
413 }
414
415 ndev->target_active_prot = protocol;
416 ndev->target_available_prots = 0;
417
418 return 0;
419}
420
421static void nci_deactivate_target(struct nfc_dev *nfc_dev, __u32 target_idx)
422{
423 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
424
24bf3304 425 pr_debug("target_idx %d\n", target_idx);
6a2968aa
IE
426
427 if (!ndev->target_active_prot) {
ed1e0ad8 428 pr_err("unable to deactivate target, no active target\n");
6a2968aa
IE
429 return;
430 }
431
432 ndev->target_active_prot = 0;
433
434 if (test_bit(NCI_POLL_ACTIVE, &ndev->flags)) {
435 nci_request(ndev, nci_rf_deactivate_req, 0,
436 msecs_to_jiffies(NCI_RF_DEACTIVATE_TIMEOUT));
437 }
438}
439
440static int nci_data_exchange(struct nfc_dev *nfc_dev, __u32 target_idx,
441 struct sk_buff *skb,
442 data_exchange_cb_t cb,
443 void *cb_context)
444{
445 struct nci_dev *ndev = nfc_get_drvdata(nfc_dev);
38f04c6b 446 int rc;
6a2968aa 447
24bf3304 448 pr_debug("target_idx %d, len %d\n", target_idx, skb->len);
6a2968aa
IE
449
450 if (!ndev->target_active_prot) {
ed1e0ad8 451 pr_err("unable to exchange data, no active target\n");
6a2968aa
IE
452 return -EINVAL;
453 }
454
38f04c6b
IE
455 if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
456 return -EBUSY;
457
6a2968aa
IE
458 /* store cb and context to be used on receiving data */
459 ndev->data_exchange_cb = cb;
460 ndev->data_exchange_cb_context = cb_context;
461
e8c0dacd 462 rc = nci_send_data(ndev, NCI_STATIC_RF_CONN_ID, skb);
38f04c6b
IE
463 if (rc)
464 clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
465
466 return rc;
6a2968aa
IE
467}
468
469static struct nfc_ops nci_nfc_ops = {
470 .dev_up = nci_dev_up,
471 .dev_down = nci_dev_down,
472 .start_poll = nci_start_poll,
473 .stop_poll = nci_stop_poll,
474 .activate_target = nci_activate_target,
475 .deactivate_target = nci_deactivate_target,
476 .data_exchange = nci_data_exchange,
477};
478
479/* ---- Interface to NCI drivers ---- */
480
481/**
482 * nci_allocate_device - allocate a new nci device
483 *
484 * @ops: device operations
485 * @supported_protocols: NFC protocols supported by the device
486 */
487struct nci_dev *nci_allocate_device(struct nci_ops *ops,
488 __u32 supported_protocols,
489 int tx_headroom,
490 int tx_tailroom)
491{
8ebafde0 492 struct nci_dev *ndev;
6a2968aa 493
24bf3304 494 pr_debug("supported_protocols 0x%x\n", supported_protocols);
6a2968aa
IE
495
496 if (!ops->open || !ops->close || !ops->send)
8ebafde0 497 return NULL;
6a2968aa
IE
498
499 if (!supported_protocols)
8ebafde0 500 return NULL;
6a2968aa
IE
501
502 ndev = kzalloc(sizeof(struct nci_dev), GFP_KERNEL);
503 if (!ndev)
8ebafde0 504 return NULL;
6a2968aa
IE
505
506 ndev->ops = ops;
507 ndev->tx_headroom = tx_headroom;
508 ndev->tx_tailroom = tx_tailroom;
509
510 ndev->nfc_dev = nfc_allocate_device(&nci_nfc_ops,
511 supported_protocols,
512 tx_headroom + NCI_DATA_HDR_SIZE,
513 tx_tailroom);
514 if (!ndev->nfc_dev)
515 goto free_exit;
516
517 nfc_set_drvdata(ndev->nfc_dev, ndev);
518
8ebafde0 519 return ndev;
6a2968aa
IE
520
521free_exit:
522 kfree(ndev);
8ebafde0 523 return NULL;
6a2968aa
IE
524}
525EXPORT_SYMBOL(nci_allocate_device);
526
527/**
528 * nci_free_device - deallocate nci device
529 *
530 * @ndev: The nci device to deallocate
531 */
532void nci_free_device(struct nci_dev *ndev)
533{
6a2968aa
IE
534 nfc_free_device(ndev->nfc_dev);
535 kfree(ndev);
536}
537EXPORT_SYMBOL(nci_free_device);
538
539/**
540 * nci_register_device - register a nci device in the nfc subsystem
541 *
542 * @dev: The nci device to register
543 */
544int nci_register_device(struct nci_dev *ndev)
545{
546 int rc;
547 struct device *dev = &ndev->nfc_dev->dev;
548 char name[32];
549
6a2968aa
IE
550 rc = nfc_register_device(ndev->nfc_dev);
551 if (rc)
552 goto exit;
553
554 ndev->flags = 0;
555
556 INIT_WORK(&ndev->cmd_work, nci_cmd_work);
557 snprintf(name, sizeof(name), "%s_nci_cmd_wq", dev_name(dev));
558 ndev->cmd_wq = create_singlethread_workqueue(name);
559 if (!ndev->cmd_wq) {
560 rc = -ENOMEM;
561 goto unreg_exit;
562 }
563
564 INIT_WORK(&ndev->rx_work, nci_rx_work);
565 snprintf(name, sizeof(name), "%s_nci_rx_wq", dev_name(dev));
566 ndev->rx_wq = create_singlethread_workqueue(name);
567 if (!ndev->rx_wq) {
568 rc = -ENOMEM;
569 goto destroy_cmd_wq_exit;
570 }
571
572 INIT_WORK(&ndev->tx_work, nci_tx_work);
573 snprintf(name, sizeof(name), "%s_nci_tx_wq", dev_name(dev));
574 ndev->tx_wq = create_singlethread_workqueue(name);
575 if (!ndev->tx_wq) {
576 rc = -ENOMEM;
577 goto destroy_rx_wq_exit;
578 }
579
580 skb_queue_head_init(&ndev->cmd_q);
581 skb_queue_head_init(&ndev->rx_q);
582 skb_queue_head_init(&ndev->tx_q);
583
584 setup_timer(&ndev->cmd_timer, nci_cmd_timer,
585 (unsigned long) ndev);
586
587 mutex_init(&ndev->req_lock);
588
589 goto exit;
590
591destroy_rx_wq_exit:
592 destroy_workqueue(ndev->rx_wq);
593
594destroy_cmd_wq_exit:
595 destroy_workqueue(ndev->cmd_wq);
596
597unreg_exit:
598 nfc_unregister_device(ndev->nfc_dev);
599
600exit:
601 return rc;
602}
603EXPORT_SYMBOL(nci_register_device);
604
605/**
606 * nci_unregister_device - unregister a nci device in the nfc subsystem
607 *
608 * @dev: The nci device to unregister
609 */
610void nci_unregister_device(struct nci_dev *ndev)
611{
6a2968aa
IE
612 nci_close_device(ndev);
613
614 destroy_workqueue(ndev->cmd_wq);
615 destroy_workqueue(ndev->rx_wq);
616 destroy_workqueue(ndev->tx_wq);
617
618 nfc_unregister_device(ndev->nfc_dev);
619}
620EXPORT_SYMBOL(nci_unregister_device);
621
622/**
623 * nci_recv_frame - receive frame from NCI drivers
624 *
625 * @skb: The sk_buff to receive
626 */
627int nci_recv_frame(struct sk_buff *skb)
628{
629 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
630
24bf3304 631 pr_debug("len %d\n", skb->len);
6a2968aa
IE
632
633 if (!ndev || (!test_bit(NCI_UP, &ndev->flags)
634 && !test_bit(NCI_INIT, &ndev->flags))) {
635 kfree_skb(skb);
636 return -ENXIO;
637 }
638
639 /* Queue frame for rx worker thread */
640 skb_queue_tail(&ndev->rx_q, skb);
641 queue_work(ndev->rx_wq, &ndev->rx_work);
642
643 return 0;
644}
645EXPORT_SYMBOL(nci_recv_frame);
646
647static int nci_send_frame(struct sk_buff *skb)
648{
649 struct nci_dev *ndev = (struct nci_dev *) skb->dev;
650
24bf3304 651 pr_debug("len %d\n", skb->len);
6a2968aa
IE
652
653 if (!ndev) {
654 kfree_skb(skb);
655 return -ENODEV;
656 }
657
658 /* Get rid of skb owner, prior to sending to the driver. */
659 skb_orphan(skb);
660
661 return ndev->ops->send(skb);
662}
663
664/* Send NCI command */
665int nci_send_cmd(struct nci_dev *ndev, __u16 opcode, __u8 plen, void *payload)
666{
667 struct nci_ctrl_hdr *hdr;
668 struct sk_buff *skb;
669
24bf3304 670 pr_debug("opcode 0x%x, plen %d\n", opcode, plen);
6a2968aa
IE
671
672 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + plen), GFP_KERNEL);
673 if (!skb) {
ed1e0ad8 674 pr_err("no memory for command\n");
6a2968aa
IE
675 return -ENOMEM;
676 }
677
678 hdr = (struct nci_ctrl_hdr *) skb_put(skb, NCI_CTRL_HDR_SIZE);
679 hdr->gid = nci_opcode_gid(opcode);
680 hdr->oid = nci_opcode_oid(opcode);
681 hdr->plen = plen;
682
683 nci_mt_set((__u8 *)hdr, NCI_MT_CMD_PKT);
684 nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST);
685
686 if (plen)
687 memcpy(skb_put(skb, plen), payload, plen);
688
689 skb->dev = (void *) ndev;
690
691 skb_queue_tail(&ndev->cmd_q, skb);
692 queue_work(ndev->cmd_wq, &ndev->cmd_work);
693
694 return 0;
695}
696
697/* ---- NCI TX Data worker thread ---- */
698
699static void nci_tx_work(struct work_struct *work)
700{
701 struct nci_dev *ndev = container_of(work, struct nci_dev, tx_work);
702 struct sk_buff *skb;
703
24bf3304 704 pr_debug("credits_cnt %d\n", atomic_read(&ndev->credits_cnt));
6a2968aa
IE
705
706 /* Send queued tx data */
707 while (atomic_read(&ndev->credits_cnt)) {
708 skb = skb_dequeue(&ndev->tx_q);
709 if (!skb)
710 return;
711
db98c829
IE
712 /* Check if data flow control is used */
713 if (atomic_read(&ndev->credits_cnt) !=
714 NCI_DATA_FLOW_CONTROL_NOT_USED)
715 atomic_dec(&ndev->credits_cnt);
6a2968aa 716
20c239c1
JP
717 pr_debug("NCI TX: MT=data, PBF=%d, conn_id=%d, plen=%d\n",
718 nci_pbf(skb->data),
719 nci_conn_id(skb->data),
720 nci_plen(skb->data));
6a2968aa
IE
721
722 nci_send_frame(skb);
723 }
724}
725
726/* ----- NCI RX worker thread (data & control) ----- */
727
728static void nci_rx_work(struct work_struct *work)
729{
730 struct nci_dev *ndev = container_of(work, struct nci_dev, rx_work);
731 struct sk_buff *skb;
732
733 while ((skb = skb_dequeue(&ndev->rx_q))) {
734 /* Process frame */
735 switch (nci_mt(skb->data)) {
736 case NCI_MT_RSP_PKT:
737 nci_rsp_packet(ndev, skb);
738 break;
739
740 case NCI_MT_NTF_PKT:
741 nci_ntf_packet(ndev, skb);
742 break;
743
744 case NCI_MT_DATA_PKT:
745 nci_rx_data_packet(ndev, skb);
746 break;
747
748 default:
ed1e0ad8 749 pr_err("unknown MT 0x%x\n", nci_mt(skb->data));
6a2968aa
IE
750 kfree_skb(skb);
751 break;
752 }
753 }
754}
755
756/* ----- NCI TX CMD worker thread ----- */
757
758static void nci_cmd_work(struct work_struct *work)
759{
760 struct nci_dev *ndev = container_of(work, struct nci_dev, cmd_work);
761 struct sk_buff *skb;
762
24bf3304 763 pr_debug("cmd_cnt %d\n", atomic_read(&ndev->cmd_cnt));
6a2968aa
IE
764
765 /* Send queued command */
766 if (atomic_read(&ndev->cmd_cnt)) {
767 skb = skb_dequeue(&ndev->cmd_q);
768 if (!skb)
769 return;
770
771 atomic_dec(&ndev->cmd_cnt);
772
20c239c1
JP
773 pr_debug("NCI TX: MT=cmd, PBF=%d, GID=0x%x, OID=0x%x, plen=%d\n",
774 nci_pbf(skb->data),
775 nci_opcode_gid(nci_opcode(skb->data)),
776 nci_opcode_oid(nci_opcode(skb->data)),
777 nci_plen(skb->data));
6a2968aa
IE
778
779 nci_send_frame(skb);
780
781 mod_timer(&ndev->cmd_timer,
782 jiffies + msecs_to_jiffies(NCI_CMD_TIMEOUT));
783 }
784}