networking: make skb_push & __skb_push return void pointers
[linux-2.6-block.git] / drivers / net / wireless / ath / ath6kl / htc_pipe.c
CommitLineData
636f8288
KV
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19#include "hif-ops.h"
20
21#define HTC_PACKET_CONTAINER_ALLOCATION 32
22#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
23
24static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25 struct htc_packet *packet);
26static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
27
28/* htc pipe tx path */
29static inline void restore_tx_packet(struct htc_packet *packet)
30{
31 if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32 skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33 packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
34 }
35}
36
37static void do_send_completion(struct htc_endpoint *ep,
38 struct list_head *queue_to_indicate)
39{
40 struct htc_packet *packet;
41
42 if (list_empty(queue_to_indicate)) {
43 /* nothing to indicate */
44 return;
45 }
46
47 if (ep->ep_cb.tx_comp_multi != NULL) {
48 ath6kl_dbg(ATH6KL_DBG_HTC,
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
50 __func__, ep->eid,
51 get_queue_depth(queue_to_indicate));
52 /*
53 * a multiple send complete handler is being used,
54 * pass the queue to the handler
55 */
56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
57 /*
58 * all packets are now owned by the callback,
59 * reset queue to be safe
60 */
61 INIT_LIST_HEAD(queue_to_indicate);
62 } else {
63 /* using legacy EpTxComplete */
64 do {
65 packet = list_first_entry(queue_to_indicate,
66 struct htc_packet, list);
67
68 list_del(&packet->list);
69 ath6kl_dbg(ATH6KL_DBG_HTC,
70 "%s: calling ep %d send complete callback on packet 0x%p\n",
71 __func__, ep->eid, packet);
72 ep->ep_cb.tx_complete(ep->target, packet);
73 } while (!list_empty(queue_to_indicate));
74 }
75}
76
77static void send_packet_completion(struct htc_target *target,
78 struct htc_packet *packet)
79{
80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81 struct list_head container;
82
83 restore_tx_packet(packet);
84 INIT_LIST_HEAD(&container);
85 list_add_tail(&packet->list, &container);
86
87 /* do completion */
88 do_send_completion(ep, &container);
89}
90
91static void get_htc_packet_credit_based(struct htc_target *target,
92 struct htc_endpoint *ep,
93 struct list_head *queue)
94{
95 int credits_required;
96 int remainder;
97 u8 send_flags;
98 struct htc_packet *packet;
99 unsigned int transfer_len;
100
101 /* NOTE : the TX lock is held when this function is called */
102
103 /* loop until we can grab as many packets out of the queue as we can */
104 while (true) {
105 send_flags = 0;
106 if (list_empty(&ep->txq))
107 break;
108
109 /* get packet at head, but don't remove it */
110 packet = list_first_entry(&ep->txq, struct htc_packet, list);
636f8288
KV
111
112 ath6kl_dbg(ATH6KL_DBG_HTC,
113 "%s: got head packet:0x%p , queue depth: %d\n",
114 __func__, packet, get_queue_depth(&ep->txq));
115
116 transfer_len = packet->act_len + HTC_HDR_LENGTH;
117
118 if (transfer_len <= target->tgt_cred_sz) {
119 credits_required = 1;
120 } else {
121 /* figure out how many credits this message requires */
122 credits_required = transfer_len / target->tgt_cred_sz;
123 remainder = transfer_len % target->tgt_cred_sz;
124
125 if (remainder)
126 credits_required++;
127 }
128
129 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
130 __func__, credits_required, ep->cred_dist.credits);
131
132 if (ep->eid == ENDPOINT_0) {
133 /*
134 * endpoint 0 is special, it always has a credit and
135 * does not require credit based flow control
136 */
137 credits_required = 0;
138
139 } else {
636f8288
KV
140 if (ep->cred_dist.credits < credits_required)
141 break;
142
143 ep->cred_dist.credits -= credits_required;
144 ep->ep_st.cred_cosumd += credits_required;
145
146 /* check if we need credits back from the target */
147 if (ep->cred_dist.credits <
148 ep->cred_dist.cred_per_msg) {
149 /* tell the target we need credits ASAP! */
150 send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
151 ep->ep_st.cred_low_indicate += 1;
152 ath6kl_dbg(ATH6KL_DBG_HTC,
153 "%s: host needs credits\n",
154 __func__);
155 }
156 }
157
158 /* now we can fully dequeue */
159 packet = list_first_entry(&ep->txq, struct htc_packet, list);
160
161 list_del(&packet->list);
162 /* save the number of credits this packet consumed */
163 packet->info.tx.cred_used = credits_required;
164 /* save send flags */
165 packet->info.tx.flags = send_flags;
166 packet->info.tx.seqno = ep->seqno;
167 ep->seqno++;
168 /* queue this packet into the caller's queue */
169 list_add_tail(&packet->list, queue);
170 }
636f8288
KV
171}
172
173static void get_htc_packet(struct htc_target *target,
174 struct htc_endpoint *ep,
175 struct list_head *queue, int resources)
176{
177 struct htc_packet *packet;
178
179 /* NOTE : the TX lock is held when this function is called */
180
181 /* loop until we can grab as many packets out of the queue as we can */
182 while (resources) {
183 if (list_empty(&ep->txq))
184 break;
185
186 packet = list_first_entry(&ep->txq, struct htc_packet, list);
187 list_del(&packet->list);
188
189 ath6kl_dbg(ATH6KL_DBG_HTC,
190 "%s: got packet:0x%p , new queue depth: %d\n",
191 __func__, packet, get_queue_depth(&ep->txq));
192 packet->info.tx.seqno = ep->seqno;
193 packet->info.tx.flags = 0;
194 packet->info.tx.cred_used = 0;
195 ep->seqno++;
196
197 /* queue this packet into the caller's queue */
198 list_add_tail(&packet->list, queue);
199 resources--;
200 }
201}
202
203static int htc_issue_packets(struct htc_target *target,
204 struct htc_endpoint *ep,
205 struct list_head *pkt_queue)
206{
207 int status = 0;
208 u16 payload_len;
209 struct sk_buff *skb;
210 struct htc_frame_hdr *htc_hdr;
211 struct htc_packet *packet;
212
213 ath6kl_dbg(ATH6KL_DBG_HTC,
214 "%s: queue: 0x%p, pkts %d\n", __func__,
215 pkt_queue, get_queue_depth(pkt_queue));
216
217 while (!list_empty(pkt_queue)) {
218 packet = list_first_entry(pkt_queue, struct htc_packet, list);
219 list_del(&packet->list);
220
221 skb = packet->skb;
222 if (!skb) {
223 WARN_ON_ONCE(1);
224 status = -EINVAL;
225 break;
226 }
227
228 payload_len = packet->act_len;
229
230 /* setup HTC frame header */
d58ff351 231 htc_hdr = skb_push(skb, sizeof(*htc_hdr));
636f8288
KV
232 if (!htc_hdr) {
233 WARN_ON_ONCE(1);
234 status = -EINVAL;
235 break;
236 }
237
238 packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
239
240 /* Endianess? */
241 put_unaligned((u16) payload_len, &htc_hdr->payld_len);
242 htc_hdr->flags = packet->info.tx.flags;
243 htc_hdr->eid = (u8) packet->endpoint;
244 htc_hdr->ctrl[0] = 0;
245 htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
246
247 spin_lock_bh(&target->tx_lock);
248
249 /* store in look up queue to match completions */
250 list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
251 ep->ep_st.tx_issued += 1;
252 spin_unlock_bh(&target->tx_lock);
253
254 status = ath6kl_hif_pipe_send(target->dev->ar,
255 ep->pipe.pipeid_ul, NULL, skb);
256
257 if (status != 0) {
258 if (status != -ENOMEM) {
259 /* TODO: if more than 1 endpoint maps to the
260 * same PipeID, it is possible to run out of
261 * resources in the HIF layer.
262 * Don't emit the error
263 */
264 ath6kl_dbg(ATH6KL_DBG_HTC,
265 "%s: failed status:%d\n",
266 __func__, status);
267 }
268 spin_lock_bh(&target->tx_lock);
269 list_del(&packet->list);
270
271 /* reclaim credits */
272 ep->cred_dist.credits += packet->info.tx.cred_used;
273 spin_unlock_bh(&target->tx_lock);
274
275 /* put it back into the callers queue */
276 list_add(&packet->list, pkt_queue);
277 break;
278 }
636f8288
KV
279 }
280
281 if (status != 0) {
282 while (!list_empty(pkt_queue)) {
283 if (status != -ENOMEM) {
284 ath6kl_dbg(ATH6KL_DBG_HTC,
285 "%s: failed pkt:0x%p status:%d\n",
286 __func__, packet, status);
287 }
288
289 packet = list_first_entry(pkt_queue,
290 struct htc_packet, list);
291 list_del(&packet->list);
292 packet->status = status;
293 send_packet_completion(target, packet);
294 }
295 }
296
297 return status;
298}
299
300static enum htc_send_queue_result htc_try_send(struct htc_target *target,
301 struct htc_endpoint *ep,
302 struct list_head *txq)
303{
304 struct list_head send_queue; /* temp queue to hold packets */
305 struct htc_packet *packet, *tmp_pkt;
306 struct ath6kl *ar = target->dev->ar;
307 enum htc_send_full_action action;
308 int tx_resources, overflow, txqueue_depth, i, good_pkts;
309 u8 pipeid;
310
311 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
312 __func__, txq,
313 (txq == NULL) ? 0 : get_queue_depth(txq));
314
315 /* init the local send queue */
316 INIT_LIST_HEAD(&send_queue);
317
318 /*
319 * txq equals to NULL means
320 * caller didn't provide a queue, just wants us to
321 * check queues and send
322 */
323 if (txq != NULL) {
324 if (list_empty(txq)) {
325 /* empty queue */
326 return HTC_SEND_QUEUE_DROP;
327 }
328
329 spin_lock_bh(&target->tx_lock);
330 txqueue_depth = get_queue_depth(&ep->txq);
331 spin_unlock_bh(&target->tx_lock);
332
333 if (txqueue_depth >= ep->max_txq_depth) {
334 /* we've already overflowed */
335 overflow = get_queue_depth(txq);
336 } else {
337 /* get how much we will overflow by */
338 overflow = txqueue_depth;
339 overflow += get_queue_depth(txq);
340 /* get how much we will overflow the TX queue by */
341 overflow -= ep->max_txq_depth;
342 }
343
344 /* if overflow is negative or zero, we are okay */
345 if (overflow > 0) {
346 ath6kl_dbg(ATH6KL_DBG_HTC,
347 "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
348 __func__, ep->eid, overflow, txqueue_depth,
349 ep->max_txq_depth);
350 }
351 if ((overflow <= 0) ||
352 (ep->ep_cb.tx_full == NULL)) {
353 /*
354 * all packets will fit or caller did not provide send
355 * full indication handler -- just move all of them
356 * to the local send_queue object
357 */
358 list_splice_tail_init(txq, &send_queue);
359 } else {
360 good_pkts = get_queue_depth(txq) - overflow;
361 if (good_pkts < 0) {
362 WARN_ON_ONCE(1);
363 return HTC_SEND_QUEUE_DROP;
364 }
365
366 /* we have overflowed, and a callback is provided */
367 /* dequeue all non-overflow packets to the sendqueue */
368 for (i = 0; i < good_pkts; i++) {
369 /* pop off caller's queue */
370 packet = list_first_entry(txq,
371 struct htc_packet,
372 list);
f08dbda2
WY
373 /* move to local queue */
374 list_move_tail(&packet->list, &send_queue);
636f8288
KV
375 }
376
377 /*
378 * the caller's queue has all the packets that won't fit
379 * walk through the caller's queue and indicate each to
380 * the send full handler
381 */
382 list_for_each_entry_safe(packet, tmp_pkt,
383 txq, list) {
636f8288
KV
384 ath6kl_dbg(ATH6KL_DBG_HTC,
385 "%s: Indicat overflowed TX pkts: %p\n",
386 __func__, packet);
387 action = ep->ep_cb.tx_full(ep->target, packet);
388 if (action == HTC_SEND_FULL_DROP) {
389 /* callback wants the packet dropped */
390 ep->ep_st.tx_dropped += 1;
391
392 /* leave this one in the caller's queue
393 * for cleanup */
394 } else {
395 /* callback wants to keep this packet,
f08dbda2
WY
396 * move from caller's queue to the send
397 * queue */
398 list_move_tail(&packet->list,
399 &send_queue);
636f8288 400 }
636f8288
KV
401 }
402
403 if (list_empty(&send_queue)) {
404 /* no packets made it in, caller will cleanup */
405 return HTC_SEND_QUEUE_DROP;
406 }
407 }
408 }
409
410 if (!ep->pipe.tx_credit_flow_enabled) {
411 tx_resources =
412 ath6kl_hif_pipe_get_free_queue_number(ar,
413 ep->pipe.pipeid_ul);
414 } else {
415 tx_resources = 0;
416 }
417
418 spin_lock_bh(&target->tx_lock);
419 if (!list_empty(&send_queue)) {
420 /* transfer packets to tail */
421 list_splice_tail_init(&send_queue, &ep->txq);
422 if (!list_empty(&send_queue)) {
423 WARN_ON_ONCE(1);
424 spin_unlock_bh(&target->tx_lock);
425 return HTC_SEND_QUEUE_DROP;
426 }
427 INIT_LIST_HEAD(&send_queue);
428 }
429
430 /* increment tx processing count on entry */
431 ep->tx_proc_cnt++;
432
433 if (ep->tx_proc_cnt > 1) {
434 /*
435 * Another thread or task is draining the TX queues on this
436 * endpoint that thread will reset the tx processing count
437 * when the queue is drained.
438 */
439 ep->tx_proc_cnt--;
440 spin_unlock_bh(&target->tx_lock);
441 return HTC_SEND_QUEUE_OK;
442 }
443
444 /***** beyond this point only 1 thread may enter ******/
445
446 /*
447 * Now drain the endpoint TX queue for transmission as long as we have
448 * enough transmit resources.
449 */
450 while (true) {
636f8288
KV
451 if (get_queue_depth(&ep->txq) == 0)
452 break;
453
454 if (ep->pipe.tx_credit_flow_enabled) {
455 /*
456 * Credit based mechanism provides flow control
457 * based on target transmit resource availability,
458 * we assume that the HIF layer will always have
459 * bus resources greater than target transmit
460 * resources.
461 */
462 get_htc_packet_credit_based(target, ep, &send_queue);
463 } else {
464 /*
465 * Get all packets for this endpoint that we can
466 * for this pass.
467 */
468 get_htc_packet(target, ep, &send_queue, tx_resources);
469 }
470
471 if (get_queue_depth(&send_queue) == 0) {
472 /*
473 * Didn't get packets due to out of resources or TX
474 * queue was drained.
475 */
476 break;
477 }
478
479 spin_unlock_bh(&target->tx_lock);
480
481 /* send what we can */
482 htc_issue_packets(target, ep, &send_queue);
483
484 if (!ep->pipe.tx_credit_flow_enabled) {
485 pipeid = ep->pipe.pipeid_ul;
486 tx_resources =
487 ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
488 }
489
490 spin_lock_bh(&target->tx_lock);
636f8288 491 }
3629fa14 492
636f8288
KV
493 /* done with this endpoint, we can clear the count */
494 ep->tx_proc_cnt = 0;
495 spin_unlock_bh(&target->tx_lock);
496
497 return HTC_SEND_QUEUE_OK;
498}
499
500/* htc control packet manipulation */
501static void destroy_htc_txctrl_packet(struct htc_packet *packet)
502{
503 struct sk_buff *skb;
504 skb = packet->skb;
e16ccfee 505 dev_kfree_skb(skb);
636f8288
KV
506 kfree(packet);
507}
508
509static struct htc_packet *build_htc_txctrl_packet(void)
510{
511 struct htc_packet *packet = NULL;
512 struct sk_buff *skb;
513
514 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
515 if (packet == NULL)
516 return NULL;
517
518 skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
519
520 if (skb == NULL) {
521 kfree(packet);
522 return NULL;
523 }
524 packet->skb = skb;
525
526 return packet;
527}
528
529static void htc_free_txctrl_packet(struct htc_target *target,
530 struct htc_packet *packet)
531{
532 destroy_htc_txctrl_packet(packet);
533}
534
535static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
536{
537 return build_htc_txctrl_packet();
538}
539
540static void htc_txctrl_complete(struct htc_target *target,
541 struct htc_packet *packet)
542{
543 htc_free_txctrl_packet(target, packet);
544}
545
546#define MAX_MESSAGE_SIZE 1536
547
548static int htc_setup_target_buffer_assignments(struct htc_target *target)
549{
550 int status, credits, credit_per_maxmsg, i;
551 struct htc_pipe_txcredit_alloc *entry;
552 unsigned int hif_usbaudioclass = 0;
553
554 credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
555 if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
556 credit_per_maxmsg++;
557
558 /* TODO, this should be configured by the caller! */
559
560 credits = target->tgt_creds;
561 entry = &target->pipe.txcredit_alloc[0];
562
563 status = -ENOMEM;
564
565 /* FIXME: hif_usbaudioclass is always zero */
566 if (hif_usbaudioclass) {
567 ath6kl_dbg(ATH6KL_DBG_HTC,
568 "%s: For USB Audio Class- Total:%d\n",
569 __func__, credits);
570 entry++;
571 entry++;
572 /* Setup VO Service To have Max Credits */
573 entry->service_id = WMI_DATA_VO_SVC;
574 entry->credit_alloc = (credits - 6);
575 if (entry->credit_alloc == 0)
576 entry->credit_alloc++;
577
578 credits -= (int) entry->credit_alloc;
579 if (credits <= 0)
580 return status;
581
582 entry++;
583 entry->service_id = WMI_CONTROL_SVC;
584 entry->credit_alloc = credit_per_maxmsg;
585 credits -= (int) entry->credit_alloc;
586 if (credits <= 0)
587 return status;
588
589 /* leftovers go to best effort */
590 entry++;
591 entry++;
592 entry->service_id = WMI_DATA_BE_SVC;
593 entry->credit_alloc = (u8) credits;
594 status = 0;
595 } else {
596 entry++;
597 entry->service_id = WMI_DATA_VI_SVC;
598 entry->credit_alloc = credits / 4;
599 if (entry->credit_alloc == 0)
600 entry->credit_alloc++;
601
602 credits -= (int) entry->credit_alloc;
603 if (credits <= 0)
604 return status;
605
606 entry++;
607 entry->service_id = WMI_DATA_VO_SVC;
608 entry->credit_alloc = credits / 4;
609 if (entry->credit_alloc == 0)
610 entry->credit_alloc++;
611
612 credits -= (int) entry->credit_alloc;
613 if (credits <= 0)
614 return status;
615
616 entry++;
617 entry->service_id = WMI_CONTROL_SVC;
618 entry->credit_alloc = credit_per_maxmsg;
619 credits -= (int) entry->credit_alloc;
620 if (credits <= 0)
621 return status;
622
623 entry++;
624 entry->service_id = WMI_DATA_BK_SVC;
625 entry->credit_alloc = credit_per_maxmsg;
626 credits -= (int) entry->credit_alloc;
627 if (credits <= 0)
628 return status;
629
630 /* leftovers go to best effort */
631 entry++;
632 entry->service_id = WMI_DATA_BE_SVC;
633 entry->credit_alloc = (u8) credits;
634 status = 0;
635 }
636
637 if (status == 0) {
638 for (i = 0; i < ENDPOINT_MAX; i++) {
639 if (target->pipe.txcredit_alloc[i].service_id != 0) {
640 ath6kl_dbg(ATH6KL_DBG_HTC,
641 "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
642 i,
643 target->pipe.txcredit_alloc[i].
644 service_id,
645 target->pipe.txcredit_alloc[i].
646 credit_alloc);
647 }
648 }
649 }
650 return status;
651}
652
653/* process credit reports and call distribution function */
654static void htc_process_credit_report(struct htc_target *target,
655 struct htc_credit_report *rpt,
656 int num_entries,
657 enum htc_endpoint_id from_ep)
658{
659 int total_credits = 0, i;
660 struct htc_endpoint *ep;
661
662 /* lock out TX while we update credits */
663 spin_lock_bh(&target->tx_lock);
664
665 for (i = 0; i < num_entries; i++, rpt++) {
666 if (rpt->eid >= ENDPOINT_MAX) {
667 WARN_ON_ONCE(1);
668 spin_unlock_bh(&target->tx_lock);
669 return;
670 }
671
672 ep = &target->endpoint[rpt->eid];
673 ep->cred_dist.credits += rpt->credits;
674
675 if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
676 spin_unlock_bh(&target->tx_lock);
677 htc_try_send(target, ep, NULL);
678 spin_lock_bh(&target->tx_lock);
679 }
680
681 total_credits += rpt->credits;
682 }
683 ath6kl_dbg(ATH6KL_DBG_HTC,
684 "Report indicated %d credits to distribute\n",
685 total_credits);
686
687 spin_unlock_bh(&target->tx_lock);
688}
689
690/* flush endpoint TX queue */
691static void htc_flush_tx_endpoint(struct htc_target *target,
692 struct htc_endpoint *ep, u16 tag)
693{
694 struct htc_packet *packet;
695
696 spin_lock_bh(&target->tx_lock);
697 while (get_queue_depth(&ep->txq)) {
698 packet = list_first_entry(&ep->txq, struct htc_packet, list);
699 list_del(&packet->list);
700 packet->status = 0;
701 send_packet_completion(target, packet);
702 }
703 spin_unlock_bh(&target->tx_lock);
704}
705
706/*
707 * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
708 * since upper layers expects struct htc_packet containers we use the completed
709 * skb and lookup it's corresponding HTC packet buffer from a lookup list.
710 * This is extra overhead that can be fixed by re-aligning HIF interfaces with
711 * HTC.
712 */
713static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
714 struct htc_endpoint *ep,
715 struct sk_buff *skb)
716{
717 struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
718
719 spin_lock_bh(&target->tx_lock);
720
721 /*
722 * interate from the front of tx lookup queue
723 * this lookup should be fast since lower layers completes in-order and
724 * so the completed packet should be at the head of the list generally
725 */
726 list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
727 list) {
728 /* check for removal */
729 if (skb == packet->skb) {
730 /* found it */
731 list_del(&packet->list);
732 found_packet = packet;
733 break;
734 }
735 }
736
737 spin_unlock_bh(&target->tx_lock);
738
739 return found_packet;
740}
741
742static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
743{
744 struct htc_target *target = ar->htc_target;
745 struct htc_frame_hdr *htc_hdr;
746 struct htc_endpoint *ep;
747 struct htc_packet *packet;
748 u8 ep_id, *netdata;
749 u32 netlen;
750
751 netdata = skb->data;
752 netlen = skb->len;
753
754 htc_hdr = (struct htc_frame_hdr *) netdata;
755
756 ep_id = htc_hdr->eid;
757 ep = &target->endpoint[ep_id];
758
759 packet = htc_lookup_tx_packet(target, ep, skb);
760 if (packet == NULL) {
761 /* may have already been flushed and freed */
762 ath6kl_err("HTC TX lookup failed!\n");
763 } else {
764 /* will be giving this buffer back to upper layers */
765 packet->status = 0;
766 send_packet_completion(target, packet);
767 }
768 skb = NULL;
769
770 if (!ep->pipe.tx_credit_flow_enabled) {
771 /*
772 * note: when using TX credit flow, the re-checking of queues
773 * happens when credits flow back from the target. in the
774 * non-TX credit case, we recheck after the packet completes
775 */
776 htc_try_send(target, ep, NULL);
777 }
778
779 return 0;
780}
781
782static int htc_send_packets_multiple(struct htc_target *target,
783 struct list_head *pkt_queue)
784{
785 struct htc_endpoint *ep;
786 struct htc_packet *packet, *tmp_pkt;
787
788 if (list_empty(pkt_queue))
789 return -EINVAL;
790
791 /* get first packet to find out which ep the packets will go into */
792 packet = list_first_entry(pkt_queue, struct htc_packet, list);
636f8288
KV
793
794 if (packet->endpoint >= ENDPOINT_MAX) {
795 WARN_ON_ONCE(1);
796 return -EINVAL;
797 }
798 ep = &target->endpoint[packet->endpoint];
799
800 htc_try_send(target, ep, pkt_queue);
801
802 /* do completion on any packets that couldn't get in */
803 if (!list_empty(pkt_queue)) {
804 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
805 packet->status = -ENOMEM;
806 }
807
808 do_send_completion(ep, pkt_queue);
809 }
810
811 return 0;
812}
813
814/* htc pipe rx path */
815static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
816{
817 struct htc_packet *packet;
818 spin_lock_bh(&target->rx_lock);
819
820 if (target->pipe.htc_packet_pool == NULL) {
821 spin_unlock_bh(&target->rx_lock);
822 return NULL;
823 }
824
825 packet = target->pipe.htc_packet_pool;
826 target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
827
828 spin_unlock_bh(&target->rx_lock);
829
830 packet->list.next = NULL;
831 return packet;
832}
833
834static void free_htc_packet_container(struct htc_target *target,
835 struct htc_packet *packet)
836{
837 struct list_head *lh;
838
839 spin_lock_bh(&target->rx_lock);
840
841 if (target->pipe.htc_packet_pool == NULL) {
842 target->pipe.htc_packet_pool = packet;
843 packet->list.next = NULL;
844 } else {
845 lh = (struct list_head *) target->pipe.htc_packet_pool;
846 packet->list.next = lh;
847 target->pipe.htc_packet_pool = packet;
848 }
849
850 spin_unlock_bh(&target->rx_lock);
851}
852
853static int htc_process_trailer(struct htc_target *target, u8 *buffer,
854 int len, enum htc_endpoint_id from_ep)
855{
856 struct htc_credit_report *report;
857 struct htc_record_hdr *record;
858 u8 *record_buf, *orig_buf;
859 int orig_len, status;
860
861 orig_buf = buffer;
862 orig_len = len;
863 status = 0;
864
865 while (len > 0) {
866 if (len < sizeof(struct htc_record_hdr)) {
867 status = -EINVAL;
868 break;
869 }
870
871 /* these are byte aligned structs */
872 record = (struct htc_record_hdr *) buffer;
873 len -= sizeof(struct htc_record_hdr);
874 buffer += sizeof(struct htc_record_hdr);
875
876 if (record->len > len) {
877 /* no room left in buffer for record */
878 ath6kl_dbg(ATH6KL_DBG_HTC,
879 "invalid length: %d (id:%d) buffer has: %d bytes left\n",
880 record->len, record->rec_id, len);
881 status = -EINVAL;
882 break;
883 }
884
885 /* start of record follows the header */
886 record_buf = buffer;
887
888 switch (record->rec_id) {
889 case HTC_RECORD_CREDITS:
890 if (record->len < sizeof(struct htc_credit_report)) {
891 WARN_ON_ONCE(1);
892 return -EINVAL;
893 }
894
895 report = (struct htc_credit_report *) record_buf;
896 htc_process_credit_report(target, report,
897 record->len / sizeof(*report),
898 from_ep);
899 break;
900 default:
901 ath6kl_dbg(ATH6KL_DBG_HTC,
902 "unhandled record: id:%d length:%d\n",
903 record->rec_id, record->len);
904 break;
905 }
906
907 if (status != 0)
908 break;
909
910 /* advance buffer past this record for next time around */
911 buffer += record->len;
912 len -= record->len;
913 }
914
915 return status;
916}
917
918static void do_recv_completion(struct htc_endpoint *ep,
919 struct list_head *queue_to_indicate)
920{
921 struct htc_packet *packet;
922
923 if (list_empty(queue_to_indicate)) {
924 /* nothing to indicate */
925 return;
926 }
927
928 /* using legacy EpRecv */
929 while (!list_empty(queue_to_indicate)) {
930 packet = list_first_entry(queue_to_indicate,
931 struct htc_packet, list);
932 list_del(&packet->list);
933 ep->ep_cb.rx(ep->target, packet);
934 }
935
936 return;
937}
938
939static void recv_packet_completion(struct htc_target *target,
940 struct htc_endpoint *ep,
941 struct htc_packet *packet)
942{
943 struct list_head container;
944 INIT_LIST_HEAD(&container);
945 list_add_tail(&packet->list, &container);
946
947 /* do completion */
948 do_recv_completion(ep, &container);
949}
950
951static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
952 u8 pipeid)
953{
954 struct htc_target *target = ar->htc_target;
955 u8 *netdata, *trailer, hdr_info;
956 struct htc_frame_hdr *htc_hdr;
957 u32 netlen, trailerlen = 0;
958 struct htc_packet *packet;
959 struct htc_endpoint *ep;
960 u16 payload_len;
961 int status = 0;
962
de2070fc
MSS
963 /*
964 * ar->htc_target can be NULL due to a race condition that can occur
965 * during driver initialization(we do 'ath6kl_hif_power_on' before
966 * initializing 'ar->htc_target' via 'ath6kl_htc_create').
967 * 'ath6kl_hif_power_on' assigns 'ath6kl_recv_complete' as
968 * usb_complete_t/callback function for 'usb_fill_bulk_urb'.
969 * Thus the possibility of ar->htc_target being NULL
970 * via ath6kl_recv_complete -> ath6kl_usb_io_comp_work.
971 */
972 if (WARN_ON_ONCE(!target)) {
973 ath6kl_err("Target not yet initialized\n");
974 status = -EINVAL;
975 goto free_skb;
976 }
977
978
636f8288
KV
979 netdata = skb->data;
980 netlen = skb->len;
981
982 htc_hdr = (struct htc_frame_hdr *) netdata;
983
636f8288
KV
984 if (htc_hdr->eid >= ENDPOINT_MAX) {
985 ath6kl_dbg(ATH6KL_DBG_HTC,
986 "HTC Rx: invalid EndpointID=%d\n",
987 htc_hdr->eid);
988 status = -EINVAL;
989 goto free_skb;
990 }
1fdc7fe1 991 ep = &target->endpoint[htc_hdr->eid];
636f8288
KV
992
993 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
994
995 if (netlen < (payload_len + HTC_HDR_LENGTH)) {
996 ath6kl_dbg(ATH6KL_DBG_HTC,
169345d4 997 "HTC Rx: insufficient length, got:%d expected =%zu\n",
636f8288
KV
998 netlen, payload_len + HTC_HDR_LENGTH);
999 status = -EINVAL;
1000 goto free_skb;
1001 }
1002
1003 /* get flags to check for trailer */
1004 hdr_info = htc_hdr->flags;
1005 if (hdr_info & HTC_FLG_RX_TRAILER) {
1006 /* extract the trailer length */
1007 hdr_info = htc_hdr->ctrl[0];
1008 if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1009 (hdr_info > payload_len)) {
1010 ath6kl_dbg(ATH6KL_DBG_HTC,
1011 "invalid header: payloadlen should be %d, CB[0]: %d\n",
1012 payload_len, hdr_info);
1013 status = -EINVAL;
1014 goto free_skb;
1015 }
1016
1017 trailerlen = hdr_info;
1018 /* process trailer after hdr/apps payload */
1019 trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1020 payload_len - hdr_info;
1021 status = htc_process_trailer(target, trailer, hdr_info,
1022 htc_hdr->eid);
1023 if (status != 0)
1024 goto free_skb;
1025 }
1026
1027 if (((int) payload_len - (int) trailerlen) <= 0) {
1028 /* zero length packet with trailer, just drop these */
1029 goto free_skb;
1030 }
1031
1032 if (htc_hdr->eid == ENDPOINT_0) {
1033 /* handle HTC control message */
1034 if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1035 /*
1036 * fatal: target should not send unsolicited
1037 * messageson the endpoint 0
1038 */
1039 ath6kl_dbg(ATH6KL_DBG_HTC,
1040 "HTC ignores Rx Ctrl after setup complete\n");
1041 status = -EINVAL;
1042 goto free_skb;
1043 }
1044
1045 /* remove HTC header */
1046 skb_pull(skb, HTC_HDR_LENGTH);
1047
1048 netdata = skb->data;
1049 netlen = skb->len;
1050
1051 spin_lock_bh(&target->rx_lock);
1052
1053 target->pipe.ctrl_response_valid = true;
1054 target->pipe.ctrl_response_len = min_t(int, netlen,
1055 HTC_MAX_CTRL_MSG_LEN);
1056 memcpy(target->pipe.ctrl_response_buf, netdata,
1057 target->pipe.ctrl_response_len);
1058
1059 spin_unlock_bh(&target->rx_lock);
1060
1061 dev_kfree_skb(skb);
1062 skb = NULL;
e16ccfee 1063
636f8288
KV
1064 goto free_skb;
1065 }
1066
1067 /*
1068 * TODO: the message based HIF architecture allocates net bufs
1069 * for recv packets since it bridges that HIF to upper layers,
1070 * which expects HTC packets, we form the packets here
1071 */
1072 packet = alloc_htc_packet_container(target);
1073 if (packet == NULL) {
1074 status = -ENOMEM;
1075 goto free_skb;
1076 }
1077
1078 packet->status = 0;
1079 packet->endpoint = htc_hdr->eid;
1080 packet->pkt_cntxt = skb;
1081
1082 /* TODO: for backwards compatibility */
1083 packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1084 packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1085
1086 /*
1087 * TODO: this is a hack because the driver layer will set the
1088 * actual len of the skb again which will just double the len
1089 */
1090 skb_trim(skb, 0);
1091
1092 recv_packet_completion(target, ep, packet);
1093
1094 /* recover the packet container */
1095 free_htc_packet_container(target, packet);
1096 skb = NULL;
1097
1098free_skb:
e16ccfee 1099 dev_kfree_skb(skb);
636f8288
KV
1100
1101 return status;
636f8288
KV
1102}
1103
1104static void htc_flush_rx_queue(struct htc_target *target,
1105 struct htc_endpoint *ep)
1106{
1107 struct list_head container;
1108 struct htc_packet *packet;
1109
1110 spin_lock_bh(&target->rx_lock);
1111
1112 while (1) {
1113 if (list_empty(&ep->rx_bufq))
1114 break;
1115
1116 packet = list_first_entry(&ep->rx_bufq,
1117 struct htc_packet, list);
1118 list_del(&packet->list);
1119
1120 spin_unlock_bh(&target->rx_lock);
1121 packet->status = -ECANCELED;
1122 packet->act_len = 0;
1123
1124 ath6kl_dbg(ATH6KL_DBG_HTC,
1125 "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1126 packet, packet->buf_len,
1127 packet->endpoint);
1128
1129 INIT_LIST_HEAD(&container);
1130 list_add_tail(&packet->list, &container);
1131
1132 /* give the packet back */
1133 do_recv_completion(ep, &container);
1134 spin_lock_bh(&target->rx_lock);
1135 }
1136
1137 spin_unlock_bh(&target->rx_lock);
1138}
1139
1140/* polling routine to wait for a control packet to be received */
1141static int htc_wait_recv_ctrl_message(struct htc_target *target)
1142{
1143 int count = HTC_TARGET_RESPONSE_POLL_COUNT;
1144
1145 while (count > 0) {
1146 spin_lock_bh(&target->rx_lock);
1147
1148 if (target->pipe.ctrl_response_valid) {
1149 target->pipe.ctrl_response_valid = false;
1150 spin_unlock_bh(&target->rx_lock);
1151 break;
1152 }
1153
1154 spin_unlock_bh(&target->rx_lock);
1155
1156 count--;
1157
1158 msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
1159 }
1160
1161 if (count <= 0) {
4e1609c9 1162 ath6kl_warn("htc pipe control receive timeout!\n");
44af3442 1163 return -ETIMEDOUT;
636f8288
KV
1164 }
1165
1166 return 0;
1167}
1168
1169static void htc_rxctrl_complete(struct htc_target *context,
1170 struct htc_packet *packet)
1171{
b056397e
JW
1172 struct sk_buff *skb = packet->skb;
1173
1174 if (packet->endpoint == ENDPOINT_0 &&
1175 packet->status == -ECANCELED &&
1176 skb != NULL)
1177 dev_kfree_skb(skb);
636f8288
KV
1178}
1179
1180/* htc pipe initialization */
1181static void reset_endpoint_states(struct htc_target *target)
1182{
1183 struct htc_endpoint *ep;
1184 int i;
1185
1186 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1187 ep = &target->endpoint[i];
1188 ep->svc_id = 0;
1189 ep->len_max = 0;
1190 ep->max_txq_depth = 0;
1191 ep->eid = i;
1192 INIT_LIST_HEAD(&ep->txq);
1193 INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1194 INIT_LIST_HEAD(&ep->rx_bufq);
1195 ep->target = target;
895dc386 1196 ep->pipe.tx_credit_flow_enabled = true;
636f8288
KV
1197 }
1198}
1199
1200/* start HTC, this is called after all services are connected */
1201static int htc_config_target_hif_pipe(struct htc_target *target)
1202{
1203 return 0;
1204}
1205
1206/* htc service functions */
1207static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1208{
1209 u8 allocation = 0;
1210 int i;
1211
1212 for (i = 0; i < ENDPOINT_MAX; i++) {
1213 if (target->pipe.txcredit_alloc[i].service_id == service_id)
1214 allocation =
1215 target->pipe.txcredit_alloc[i].credit_alloc;
1216 }
1217
1218 if (allocation == 0) {
1219 ath6kl_dbg(ATH6KL_DBG_HTC,
1220 "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1221 service_id);
1222 }
1223
1224 return allocation;
1225}
1226
1227static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1228 struct htc_service_connect_req *conn_req,
1229 struct htc_service_connect_resp *conn_resp)
1230{
1231 struct ath6kl *ar = target->dev->ar;
1232 struct htc_packet *packet = NULL;
1233 struct htc_conn_service_resp *resp_msg;
1234 struct htc_conn_service_msg *conn_msg;
1235 enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1236 bool disable_credit_flowctrl = false;
1237 unsigned int max_msg_size = 0;
1238 struct htc_endpoint *ep;
1239 int length, status = 0;
1240 struct sk_buff *skb;
1241 u8 tx_alloc;
1242 u16 flags;
1243
1244 if (conn_req->svc_id == 0) {
1245 WARN_ON_ONCE(1);
1246 status = -EINVAL;
1247 goto free_packet;
1248 }
1249
1250 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1251 /* special case for pseudo control service */
1252 assigned_epid = ENDPOINT_0;
1253 max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1254 tx_alloc = 0;
1255
1256 } else {
636f8288
KV
1257 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1258 if (tx_alloc == 0) {
1259 status = -ENOMEM;
1260 goto free_packet;
1261 }
1262
1263 /* allocate a packet to send to the target */
1264 packet = htc_alloc_txctrl_packet(target);
1265
1266 if (packet == NULL) {
1267 WARN_ON_ONCE(1);
1268 status = -ENOMEM;
1269 goto free_packet;
1270 }
1271
1272 skb = packet->skb;
1273 length = sizeof(struct htc_conn_service_msg);
1274
1275 /* assemble connect service message */
4df864c1 1276 conn_msg = skb_put(skb, length);
636f8288
KV
1277 if (conn_msg == NULL) {
1278 WARN_ON_ONCE(1);
1279 status = -EINVAL;
1280 goto free_packet;
1281 }
1282
1283 memset(conn_msg, 0,
1284 sizeof(struct htc_conn_service_msg));
1285 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1286 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1287 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1288 ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
1289
1290 /* tell target desired recv alloc for this ep */
1291 flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1292 conn_msg->conn_flags |= cpu_to_le16(flags);
1293
1294 if (conn_req->conn_flags &
1295 HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
1296 disable_credit_flowctrl = true;
1297 }
1298
1299 set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1300 length,
1301 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1302
1303 status = ath6kl_htc_pipe_tx(target, packet);
1304
1305 /* we don't own it anymore */
1306 packet = NULL;
1307 if (status != 0)
1308 goto free_packet;
1309
1310 /* wait for response */
1311 status = htc_wait_recv_ctrl_message(target);
1312 if (status != 0)
1313 goto free_packet;
1314
1315 /* we controlled the buffer creation so it has to be
1316 * properly aligned
1317 */
1318 resp_msg = (struct htc_conn_service_resp *)
1319 target->pipe.ctrl_response_buf;
1320
1321 if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1322 (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1323 /* this message is not valid */
1324 WARN_ON_ONCE(1);
1325 status = -EINVAL;
1326 goto free_packet;
1327 }
1328
1329 ath6kl_dbg(ATH6KL_DBG_TRC,
1330 "%s: service 0x%X conn resp: status: %d ep: %d\n",
1331 __func__, resp_msg->svc_id, resp_msg->status,
1332 resp_msg->eid);
1333
1334 conn_resp->resp_code = resp_msg->status;
1335 /* check response status */
1336 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1337 ath6kl_dbg(ATH6KL_DBG_HTC,
1338 "Target failed service 0x%X connect request (status:%d)\n",
1339 resp_msg->svc_id, resp_msg->status);
1340 status = -EINVAL;
1341 goto free_packet;
1342 }
1343
1344 assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1345 max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1346 }
1347
1348 /* the rest are parameter checks so set the error status */
1349 status = -EINVAL;
1350
1351 if (assigned_epid >= ENDPOINT_MAX) {
1352 WARN_ON_ONCE(1);
1353 goto free_packet;
1354 }
1355
1356 if (max_msg_size == 0) {
1357 WARN_ON_ONCE(1);
1358 goto free_packet;
1359 }
1360
1361 ep = &target->endpoint[assigned_epid];
1362 ep->eid = assigned_epid;
1363 if (ep->svc_id != 0) {
1364 /* endpoint already in use! */
1365 WARN_ON_ONCE(1);
1366 goto free_packet;
1367 }
1368
1369 /* return assigned endpoint to caller */
1370 conn_resp->endpoint = assigned_epid;
1371 conn_resp->len_max = max_msg_size;
1372
1373 /* setup the endpoint */
1374 ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1375 ep->max_txq_depth = conn_req->max_txq_depth;
1376 ep->len_max = max_msg_size;
1377 ep->cred_dist.credits = tx_alloc;
1378 ep->cred_dist.cred_sz = target->tgt_cred_sz;
1379 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1380 if (max_msg_size % target->tgt_cred_sz)
1381 ep->cred_dist.cred_per_msg++;
1382
1383 /* copy all the callbacks */
1384 ep->ep_cb = conn_req->ep_cb;
1385
096797ab
KF
1386 /* initialize tx_drop_packet_threshold */
1387 ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
1388
636f8288
KV
1389 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1390 &ep->pipe.pipeid_ul,
1391 &ep->pipe.pipeid_dl);
1392 if (status != 0)
1393 goto free_packet;
1394
1395 ath6kl_dbg(ATH6KL_DBG_HTC,
1396 "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1397 ep->svc_id, ep->pipe.pipeid_ul,
1398 ep->pipe.pipeid_dl, ep->eid);
1399
1400 if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1401 ep->pipe.tx_credit_flow_enabled = false;
1402 ath6kl_dbg(ATH6KL_DBG_HTC,
1403 "SVC: 0x%4.4X ep:%d TX flow control off\n",
1404 ep->svc_id, assigned_epid);
1405 }
1406
1407free_packet:
1408 if (packet != NULL)
1409 htc_free_txctrl_packet(target, packet);
1410 return status;
1411}
1412
1413/* htc export functions */
1414static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1415{
1416 int status = 0;
1417 struct htc_endpoint *ep = NULL;
1418 struct htc_target *target = NULL;
1419 struct htc_packet *packet;
1420 int i;
1421
1422 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1423 if (target == NULL) {
1424 ath6kl_err("htc create unable to allocate memory\n");
1425 status = -ENOMEM;
1426 goto fail_htc_create;
1427 }
1428
1429 spin_lock_init(&target->htc_lock);
1430 spin_lock_init(&target->rx_lock);
1431 spin_lock_init(&target->tx_lock);
1432
1433 reset_endpoint_states(target);
1434
1435 for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1436 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1437
1438 if (packet != NULL)
1439 free_htc_packet_container(target, packet);
1440 }
1441
1442 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1443 if (!target->dev) {
1444 ath6kl_err("unable to allocate memory\n");
1445 status = -ENOMEM;
1446 goto fail_htc_create;
1447 }
1448 target->dev->ar = ar;
1449 target->dev->htc_cnxt = target;
1450
1451 /* Get HIF default pipe for HTC message exchange */
1452 ep = &target->endpoint[ENDPOINT_0];
1453
1454 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1455 &ep->pipe.pipeid_dl);
1456
1457 return target;
1458
1459fail_htc_create:
1460 if (status != 0) {
1461 if (target != NULL)
1462 ath6kl_htc_pipe_cleanup(target);
1463
1464 target = NULL;
1465 }
1466 return target;
1467}
1468
1469/* cleanup the HTC instance */
1470static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1471{
1472 struct htc_packet *packet;
1473
1474 while (true) {
1475 packet = alloc_htc_packet_container(target);
1476 if (packet == NULL)
1477 break;
1478 kfree(packet);
1479 }
1480
1481 kfree(target->dev);
1482
1483 /* kfree our instance */
1484 kfree(target);
1485}
1486
1487static int ath6kl_htc_pipe_start(struct htc_target *target)
1488{
1489 struct sk_buff *skb;
1490 struct htc_setup_comp_ext_msg *setup;
1491 struct htc_packet *packet;
1492
1493 htc_config_target_hif_pipe(target);
1494
1495 /* allocate a buffer to send */
1496 packet = htc_alloc_txctrl_packet(target);
1497 if (packet == NULL) {
1498 WARN_ON_ONCE(1);
1499 return -ENOMEM;
1500 }
1501
1502 skb = packet->skb;
1503
1504 /* assemble setup complete message */
4df864c1 1505 setup = skb_put(skb, sizeof(*setup));
636f8288
KV
1506 memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1507 setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1508
1509 ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1510
1511 set_htc_pkt_info(packet, NULL, (u8 *) setup,
1512 sizeof(struct htc_setup_comp_ext_msg),
1513 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1514
1515 target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
1516
1517 return ath6kl_htc_pipe_tx(target, packet);
1518}
1519
1520static void ath6kl_htc_pipe_stop(struct htc_target *target)
1521{
1522 int i;
1523 struct htc_endpoint *ep;
1524
1525 /* cleanup endpoints */
1526 for (i = 0; i < ENDPOINT_MAX; i++) {
1527 ep = &target->endpoint[i];
1528 htc_flush_rx_queue(target, ep);
1529 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1530 }
1531
1532 reset_endpoint_states(target);
1533 target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
1534}
1535
1536static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1537 enum htc_endpoint_id endpoint)
1538{
1539 int num;
1540
1541 spin_lock_bh(&target->rx_lock);
1542 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1543 spin_unlock_bh(&target->rx_lock);
1544
1545 return num;
1546}
1547
1548static int ath6kl_htc_pipe_tx(struct htc_target *target,
1549 struct htc_packet *packet)
1550{
1551 struct list_head queue;
1552
1553 ath6kl_dbg(ATH6KL_DBG_HTC,
1554 "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1555 __func__, packet->endpoint, packet->buf,
1556 packet->act_len);
1557
1558 INIT_LIST_HEAD(&queue);
1559 list_add_tail(&packet->list, &queue);
1560
1561 return htc_send_packets_multiple(target, &queue);
1562}
1563
1564static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1565{
1566 struct htc_ready_ext_msg *ready_msg;
1567 struct htc_service_connect_req connect;
1568 struct htc_service_connect_resp resp;
1569 int status = 0;
1570
1571 status = htc_wait_recv_ctrl_message(target);
1572
1573 if (status != 0)
1574 return status;
1575
1576 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
4e1609c9
KV
1577 ath6kl_warn("invalid htc pipe ready msg len: %d\n",
1578 target->pipe.ctrl_response_len);
636f8288
KV
1579 return -ECOMM;
1580 }
1581
1582 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1583
1584 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
4e1609c9
KV
1585 ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
1586 ready_msg->ver2_0_info.msg_id);
636f8288
KV
1587 return -ECOMM;
1588 }
1589
1590 ath6kl_dbg(ATH6KL_DBG_HTC,
1591 "Target Ready! : transmit resources : %d size:%d\n",
1592 ready_msg->ver2_0_info.cred_cnt,
1593 ready_msg->ver2_0_info.cred_sz);
1594
1595 target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1596 target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1597
1598 if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1599 return -ECOMM;
1600
1601 htc_setup_target_buffer_assignments(target);
1602
1603 /* setup our pseudo HTC control endpoint connection */
1604 memset(&connect, 0, sizeof(connect));
1605 memset(&resp, 0, sizeof(resp));
1606 connect.ep_cb.tx_complete = htc_txctrl_complete;
1607 connect.ep_cb.rx = htc_rxctrl_complete;
1608 connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1609 connect.svc_id = HTC_CTRL_RSVD_SVC;
1610
1611 /* connect fake service */
1612 status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1613
1614 return status;
1615}
1616
1617static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1618 enum htc_endpoint_id endpoint, u16 tag)
1619{
1620 struct htc_endpoint *ep = &target->endpoint[endpoint];
1621
1622 if (ep->svc_id == 0) {
1623 WARN_ON_ONCE(1);
1624 /* not in use.. */
1625 return;
1626 }
1627
1628 htc_flush_tx_endpoint(target, ep, tag);
1629}
1630
1631static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1632 struct list_head *pkt_queue)
1633{
1634 struct htc_packet *packet, *tmp_pkt, *first;
1635 struct htc_endpoint *ep;
1636 int status = 0;
1637
1638 if (list_empty(pkt_queue))
1639 return -EINVAL;
1640
1641 first = list_first_entry(pkt_queue, struct htc_packet, list);
636f8288
KV
1642
1643 if (first->endpoint >= ENDPOINT_MAX) {
1644 WARN_ON_ONCE(1);
1645 return -EINVAL;
1646 }
1647
1648 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1649 __func__, first->endpoint, get_queue_depth(pkt_queue),
1650 first->buf_len);
1651
1652 ep = &target->endpoint[first->endpoint];
1653
1654 spin_lock_bh(&target->rx_lock);
1655
1656 /* store receive packets */
1657 list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1658
1659 spin_unlock_bh(&target->rx_lock);
1660
1661 if (status != 0) {
1662 /* walk through queue and mark each one canceled */
1663 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1664 packet->status = -ECANCELED;
1665 }
1666
1667 do_recv_completion(ep, pkt_queue);
1668 }
1669
1670 return status;
1671}
1672
1673static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1674 enum htc_endpoint_id ep,
1675 bool active)
1676{
1677 /* TODO */
1678}
1679
1680static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1681{
b056397e
JW
1682 struct htc_endpoint *endpoint;
1683 struct htc_packet *packet, *tmp_pkt;
1684 int i;
1685
1686 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1687 endpoint = &target->endpoint[i];
1688
1689 spin_lock_bh(&target->rx_lock);
1690
1691 list_for_each_entry_safe(packet, tmp_pkt,
1692 &endpoint->rx_bufq, list) {
1693 list_del(&packet->list);
1694 spin_unlock_bh(&target->rx_lock);
1695 ath6kl_dbg(ATH6KL_DBG_HTC,
1696 "htc rx flush pkt 0x%p len %d ep %d\n",
1697 packet, packet->buf_len,
1698 packet->endpoint);
1699 dev_kfree_skb(packet->pkt_cntxt);
1700 spin_lock_bh(&target->rx_lock);
1701 }
1702
1703 spin_unlock_bh(&target->rx_lock);
1704 }
636f8288
KV
1705}
1706
1707static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1708 struct ath6kl_htc_credit_info *info)
1709{
1710 return 0;
1711}
1712
1713static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1714 .create = ath6kl_htc_pipe_create,
1715 .wait_target = ath6kl_htc_pipe_wait_target,
1716 .start = ath6kl_htc_pipe_start,
1717 .conn_service = ath6kl_htc_pipe_conn_service,
1718 .tx = ath6kl_htc_pipe_tx,
1719 .stop = ath6kl_htc_pipe_stop,
1720 .cleanup = ath6kl_htc_pipe_cleanup,
1721 .flush_txep = ath6kl_htc_pipe_flush_txep,
1722 .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1723 .activity_changed = ath6kl_htc_pipe_activity_changed,
1724 .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1725 .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1726 .credit_setup = ath6kl_htc_pipe_credit_setup,
1727 .tx_complete = ath6kl_htc_pipe_tx_complete,
1728 .rx_complete = ath6kl_htc_pipe_rx_complete,
1729};
1730
1731void ath6kl_htc_pipe_attach(struct ath6kl *ar)
1732{
1733 ar->htc_ops = &ath6kl_htc_pipe_ops;
1734}