Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / drivers / net / wireless / mediatek / mt76 / sdio.c
CommitLineData
d39b52e3
SW
1// SPDX-License-Identifier: ISC
2/* Copyright (C) 2020 MediaTek Inc.
3 *
4 * This file is written based on mt76/usb.c.
5 *
6 * Author: Felix Fietkau <nbd@nbd.name>
7 * Lorenzo Bianconi <lorenzo@kernel.org>
8 * Sean Wang <sean.wang@mediatek.com>
9 */
10
11#include <linux/iopoll.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/mmc/sdio_func.h>
bf08d585
SW
15#include <linux/mmc/card.h>
16#include <linux/mmc/host.h>
d39b52e3
SW
17#include <linux/sched.h>
18#include <linux/kthread.h>
19
20#include "mt76.h"
764dee47
LB
21#include "sdio.h"
22
23static u32 mt76s_read_whisr(struct mt76_dev *dev)
24{
25 return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
26}
27
28u32 mt76s_read_pcr(struct mt76_dev *dev)
29{
30 struct mt76_sdio *sdio = &dev->sdio;
31
32 return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
33}
34EXPORT_SYMBOL_GPL(mt76s_read_pcr);
35
36static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
37{
38 struct sdio_func *func = dev->sdio.func;
39 u32 val = ~0, status;
40 int err;
41
42 sdio_claim_host(func);
43
44 sdio_writel(func, offset, MCR_H2DSM0R, &err);
45 if (err < 0) {
46 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
47 goto out;
48 }
49
50 sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
51 if (err < 0) {
52 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
53 goto out;
54 }
55
56 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
57 status & H2D_SW_INT_READ, 0, 1000000);
58 if (err < 0) {
59 dev_err(dev->dev, "query whisr timeout\n");
60 goto out;
61 }
62
63 sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
64 if (err < 0) {
65 dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
66 goto out;
67 }
68
69 val = sdio_readl(func, MCR_H2DSM0R, &err);
70 if (err < 0) {
71 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
72 goto out;
73 }
74
75 if (val != offset) {
76 dev_err(dev->dev, "register mismatch\n");
77 val = ~0;
78 goto out;
79 }
80
81 val = sdio_readl(func, MCR_D2HRM1R, &err);
82 if (err < 0)
83 dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
84
85out:
86 sdio_release_host(func);
87
88 return val;
89}
90
91static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
92{
93 struct sdio_func *func = dev->sdio.func;
94 u32 status;
95 int err;
96
97 sdio_claim_host(func);
98
99 sdio_writel(func, offset, MCR_H2DSM0R, &err);
100 if (err < 0) {
101 dev_err(dev->dev, "failed setting address [err=%d]\n", err);
102 goto out;
103 }
104
105 sdio_writel(func, val, MCR_H2DSM1R, &err);
106 if (err < 0) {
107 dev_err(dev->dev,
108 "failed setting write value [err=%d]\n", err);
109 goto out;
110 }
111
112 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
113 if (err < 0) {
114 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
115 goto out;
116 }
117
118 err = readx_poll_timeout(mt76s_read_whisr, dev, status,
119 status & H2D_SW_INT_WRITE, 0, 1000000);
120 if (err < 0) {
121 dev_err(dev->dev, "query whisr timeout\n");
122 goto out;
123 }
124
125 sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
126 if (err < 0) {
127 dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
128 goto out;
129 }
130
131 val = sdio_readl(func, MCR_H2DSM0R, &err);
132 if (err < 0) {
133 dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
134 goto out;
135 }
136
137 if (val != offset)
138 dev_err(dev->dev, "register mismatch\n");
139
140out:
141 sdio_release_host(func);
142}
143
144u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
145{
146 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
147 return dev->mcu_ops->mcu_rr(dev, offset);
148 else
149 return mt76s_read_mailbox(dev, offset);
150}
151EXPORT_SYMBOL_GPL(mt76s_rr);
152
153void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
154{
155 if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
156 dev->mcu_ops->mcu_wr(dev, offset, val);
157 else
158 mt76s_write_mailbox(dev, offset, val);
159}
160EXPORT_SYMBOL_GPL(mt76s_wr);
161
162u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
163{
164 val |= mt76s_rr(dev, offset) & ~mask;
165 mt76s_wr(dev, offset, val);
166
167 return val;
168}
169EXPORT_SYMBOL_GPL(mt76s_rmw);
170
171void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
172 const void *data, int len)
173{
174 const u32 *val = data;
175 int i;
176
177 for (i = 0; i < len / sizeof(u32); i++) {
178 mt76s_wr(dev, offset, val[i]);
179 offset += sizeof(u32);
180 }
181}
182EXPORT_SYMBOL_GPL(mt76s_write_copy);
183
184void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
185 void *data, int len)
186{
187 u32 *val = data;
188 int i;
189
190 for (i = 0; i < len / sizeof(u32); i++) {
191 val[i] = mt76s_rr(dev, offset);
192 offset += sizeof(u32);
193 }
194}
195EXPORT_SYMBOL_GPL(mt76s_read_copy);
196
197int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
198 const struct mt76_reg_pair *data,
199 int len)
200{
201 int i;
202
203 for (i = 0; i < len; i++) {
204 mt76s_wr(dev, data->reg, data->value);
205 data++;
206 }
207
208 return 0;
209}
210EXPORT_SYMBOL_GPL(mt76s_wr_rp);
211
212int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
213 struct mt76_reg_pair *data, int len)
214{
215 int i;
216
217 for (i = 0; i < len; i++) {
218 data->value = mt76s_rr(dev, data->reg);
219 data++;
220 }
221
222 return 0;
223}
224EXPORT_SYMBOL_GPL(mt76s_rd_rp);
225
dacf0acf 226int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
764dee47
LB
227{
228 u32 status, ctrl;
229 int ret;
230
dacf0acf
SW
231 dev->sdio.hw_ver = hw_ver;
232
764dee47
LB
233 sdio_claim_host(func);
234
235 ret = sdio_enable_func(func);
236 if (ret < 0)
237 goto release;
238
239 /* Get ownership from the device */
240 sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
241 MCR_WHLPCR, &ret);
242 if (ret < 0)
243 goto disable_func;
244
245 ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
246 status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
247 if (ret < 0) {
248 dev_err(dev->dev, "Cannot get ownership from device");
249 goto disable_func;
250 }
251
252 ret = sdio_set_block_size(func, 512);
253 if (ret < 0)
254 goto disable_func;
255
256 /* Enable interrupt */
257 sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
258 if (ret < 0)
259 goto disable_func;
260
261 ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
dacf0acf
SW
262 if (hw_ver == MT76_CONNAC2_SDIO)
263 ctrl |= WHIER_RX1_DONE_INT_EN;
764dee47
LB
264 sdio_writel(func, ctrl, MCR_WHIER, &ret);
265 if (ret < 0)
266 goto disable_func;
267
dacf0acf
SW
268 switch (hw_ver) {
269 case MT76_CONNAC_SDIO:
270 /* set WHISR as read clear and Rx aggregation number as 16 */
271 ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
272 break;
273 default:
274 ctrl = sdio_readl(func, MCR_WHCR, &ret);
275 if (ret < 0)
276 goto disable_func;
277 ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
278 ctrl &= ~W_INT_CLR_CTRL; /* read clear */
279 ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
280 break;
281 }
282
764dee47
LB
283 sdio_writel(func, ctrl, MCR_WHCR, &ret);
284 if (ret < 0)
285 goto disable_func;
286
287 ret = sdio_claim_irq(func, mt76s_sdio_irq);
288 if (ret < 0)
289 goto disable_func;
290
291 sdio_release_host(func);
292
293 return 0;
294
295disable_func:
296 sdio_disable_func(func);
297release:
298 sdio_release_host(func);
299
300 return ret;
301}
302EXPORT_SYMBOL_GPL(mt76s_hw_init);
d39b52e3 303
d512b008 304int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
d39b52e3
SW
305{
306 struct mt76_queue *q = &dev->q_rx[qid];
307
308 spin_lock_init(&q->lock);
309 q->entry = devm_kcalloc(dev->dev,
b1460bb4 310 MT76S_NUM_RX_ENTRIES, sizeof(*q->entry),
d39b52e3
SW
311 GFP_KERNEL);
312 if (!q->entry)
313 return -ENOMEM;
314
b1460bb4 315 q->ndesc = MT76S_NUM_RX_ENTRIES;
d39b52e3
SW
316 q->head = q->tail = 0;
317 q->queued = 0;
318
319 return 0;
320}
d512b008 321EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
d39b52e3 322
a2a93548
LB
323static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
324{
325 struct mt76_queue *q;
326
327 q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
328 if (!q)
329 return ERR_PTR(-ENOMEM);
330
331 spin_lock_init(&q->lock);
332 q->entry = devm_kcalloc(dev->dev,
b1460bb4 333 MT76S_NUM_TX_ENTRIES, sizeof(*q->entry),
a2a93548
LB
334 GFP_KERNEL);
335 if (!q->entry)
336 return ERR_PTR(-ENOMEM);
337
b1460bb4 338 q->ndesc = MT76S_NUM_TX_ENTRIES;
a2a93548
LB
339
340 return q;
341}
342
d512b008 343int mt76s_alloc_tx(struct mt76_dev *dev)
d39b52e3
SW
344{
345 struct mt76_queue *q;
346 int i;
347
a2a93548
LB
348 for (i = 0; i <= MT_TXQ_PSD; i++) {
349 q = mt76s_alloc_tx_queue(dev);
350 if (IS_ERR(q))
351 return PTR_ERR(q);
d39b52e3 352
91990519 353 dev->phy.q_tx[i] = q;
a2a93548 354 }
d39b52e3 355
a2a93548
LB
356 q = mt76s_alloc_tx_queue(dev);
357 if (IS_ERR(q))
358 return PTR_ERR(q);
d39b52e3 359
e637763b 360 dev->q_mcu[MT_MCUQ_WM] = q;
d39b52e3
SW
361
362 return 0;
363}
d512b008 364EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
d39b52e3
SW
365
366static struct mt76_queue_entry *
367mt76s_get_next_rx_entry(struct mt76_queue *q)
368{
369 struct mt76_queue_entry *e = NULL;
370
371 spin_lock_bh(&q->lock);
372 if (q->queued > 0) {
16254fc5
FF
373 e = &q->entry[q->tail];
374 q->tail = (q->tail + 1) % q->ndesc;
d39b52e3
SW
375 q->queued--;
376 }
377 spin_unlock_bh(&q->lock);
378
379 return e;
380}
381
382static int
383mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
384{
385 int qid = q - &dev->q_rx[MT_RXQ_MAIN];
386 int nframes = 0;
387
388 while (true) {
389 struct mt76_queue_entry *e;
390
391 if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
392 break;
393
394 e = mt76s_get_next_rx_entry(q);
395 if (!e || !e->skb)
396 break;
397
c3137942 398 dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb, NULL);
d39b52e3
SW
399 e->skb = NULL;
400 nframes++;
401 }
402 if (qid == MT_RXQ_MAIN)
403 mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
404
405 return nframes;
406}
407
6a618acb
LB
408static void mt76s_net_worker(struct mt76_worker *w)
409{
410 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
411 net_worker);
412 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
413 int i, nframes;
414
415 do {
416 nframes = 0;
417
418 local_bh_disable();
419 rcu_read_lock();
420
421 mt76_for_each_q_rx(dev, i)
422 nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
423
424 rcu_read_unlock();
425 local_bh_enable();
426 } while (nframes > 0);
427}
428
afc2b59c 429static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
d39b52e3 430{
d39b52e3 431 struct mt76_queue_entry entry;
6a618acb 432 int nframes = 0;
f7217f71 433 bool mcu;
d39b52e3 434
f7217f71
LB
435 if (!q)
436 return 0;
437
438 mcu = q == dev->q_mcu[MT_MCUQ_WM];
fe5b5ab5 439 while (q->queued > 0) {
16254fc5 440 if (!q->entry[q->tail].done)
1f828415
LB
441 break;
442
16254fc5
FF
443 entry = q->entry[q->tail];
444 q->entry[q->tail].done = false;
d39b52e3 445
afc2b59c 446 if (mcu) {
d39b52e3 447 dev_kfree_skb(entry.skb);
fe5b5ab5
FF
448 entry.skb = NULL;
449 }
d39b52e3 450
fe5b5ab5 451 mt76_queue_tx_complete(dev, q, &entry);
6a618acb 452 nframes++;
fe5b5ab5 453 }
d39b52e3 454
d39b52e3
SW
455 if (!q->queued)
456 wake_up(&dev->tx_wait);
457
6a618acb
LB
458 return nframes;
459}
460
461static void mt76s_status_worker(struct mt76_worker *w)
462{
463 struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
464 status_worker);
465 struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
aac5104b 466 bool resched = false;
6a618acb
LB
467 int i, nframes;
468
469 do {
aac5104b
LB
470 int ndata_frames = 0;
471
e637763b
LB
472 nframes = mt76s_process_tx_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
473
474 for (i = 0; i <= MT_TXQ_PSD; i++)
aac5104b
LB
475 ndata_frames += mt76s_process_tx_queue(dev,
476 dev->phy.q_tx[i]);
477 nframes += ndata_frames;
478 if (ndata_frames > 0)
479 resched = true;
6a618acb 480
a323e5f0 481 if (dev->drv->tx_status_data && ndata_frames > 0 &&
5ad4faca
SW
482 !test_and_set_bit(MT76_READING_STATS, &dev->phy.state) &&
483 !test_bit(MT76_STATE_SUSPEND, &dev->phy.state))
92184eae 484 mt76_worker_schedule(&sdio->stat_worker);
6a618acb 485 } while (nframes > 0);
aac5104b
LB
486
487 if (resched)
250b1827 488 mt76_worker_schedule(&dev->tx_worker);
d39b52e3
SW
489}
490
92184eae 491static void mt76s_tx_status_data(struct mt76_worker *worker)
d39b52e3
SW
492{
493 struct mt76_sdio *sdio;
494 struct mt76_dev *dev;
495 u8 update = 1;
496 u16 count = 0;
497
92184eae 498 sdio = container_of(worker, struct mt76_sdio, stat_worker);
d39b52e3
SW
499 dev = container_of(sdio, struct mt76_dev, sdio);
500
501 while (true) {
502 if (test_bit(MT76_REMOVED, &dev->phy.state))
503 break;
504
505 if (!dev->drv->tx_status_data(dev, &update))
506 break;
507 count++;
508 }
509
510 if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
92184eae 511 mt76_worker_schedule(&sdio->status_worker);
d39b52e3
SW
512 else
513 clear_bit(MT76_READING_STATS, &dev->phy.state);
514}
515
516static int
89870594 517mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
d08295f5
FF
518 enum mt76_txq_id qid, struct sk_buff *skb,
519 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
d39b52e3 520{
d39b52e3
SW
521 struct mt76_tx_info tx_info = {
522 .skb = skb,
523 };
524 int err, len = skb->len;
16254fc5 525 u16 idx = q->head;
d39b52e3
SW
526
527 if (q->queued == q->ndesc)
528 return -ENOSPC;
529
530 skb->prev = skb->next = NULL;
d08295f5 531 err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
d39b52e3
SW
532 if (err < 0)
533 return err;
534
16254fc5
FF
535 q->entry[q->head].skb = tx_info.skb;
536 q->entry[q->head].buf_sz = len;
6d51cae2 537 q->entry[q->head].wcid = 0xffff;
45247a85
SW
538
539 smp_wmb();
540
16254fc5 541 q->head = (q->head + 1) % q->ndesc;
1f828415 542 q->queued++;
d39b52e3
SW
543
544 return idx;
545}
546
547static int
d95093a1 548mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
d39b52e3
SW
549 struct sk_buff *skb, u32 tx_info)
550{
e98e6df6 551 int ret = -ENOSPC, len = skb->len, pad;
d39b52e3 552
d39b52e3 553 if (q->queued == q->ndesc)
72372f3a 554 goto error;
d39b52e3 555
e98e6df6
LB
556 pad = round_up(skb->len, 4) - skb->len;
557 ret = mt76_skb_adjust_pad(skb, pad);
d39b52e3 558 if (ret)
72372f3a
LB
559 goto error;
560
561 spin_lock_bh(&q->lock);
d39b52e3 562
16254fc5
FF
563 q->entry[q->head].buf_sz = len;
564 q->entry[q->head].skb = skb;
5f54237a
DW
565
566 /* ensure the entry fully updated before bus access */
567 smp_wmb();
568
16254fc5 569 q->head = (q->head + 1) % q->ndesc;
1f828415 570 q->queued++;
d39b52e3 571
d39b52e3
SW
572 spin_unlock_bh(&q->lock);
573
72372f3a
LB
574 return 0;
575
576error:
577 dev_kfree_skb(skb);
578
d39b52e3
SW
579 return ret;
580}
581
582static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
583{
584 struct mt76_sdio *sdio = &dev->sdio;
585
fefb584d 586 mt76_worker_schedule(&sdio->txrx_worker);
d39b52e3
SW
587}
588
589static const struct mt76_queue_ops sdio_queue_ops = {
590 .tx_queue_skb = mt76s_tx_queue_skb,
591 .kick = mt76s_tx_kick,
592 .tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
593};
594
d39b52e3
SW
595void mt76s_deinit(struct mt76_dev *dev)
596{
597 struct mt76_sdio *sdio = &dev->sdio;
598 int i;
599
fefb584d 600 mt76_worker_teardown(&sdio->txrx_worker);
6a618acb
LB
601 mt76_worker_teardown(&sdio->status_worker);
602 mt76_worker_teardown(&sdio->net_worker);
92184eae 603 mt76_worker_teardown(&sdio->stat_worker);
fefb584d 604
6a618acb
LB
605 clear_bit(MT76_READING_STATS, &dev->phy.state);
606
c02f86ee 607 mt76_tx_status_check(dev, true);
974327a4 608
d39b52e3
SW
609 sdio_claim_host(sdio->func);
610 sdio_release_irq(sdio->func);
611 sdio_release_host(sdio->func);
612
613 mt76_for_each_q_rx(dev, i) {
614 struct mt76_queue *q = &dev->q_rx[i];
615 int j;
616
617 for (j = 0; j < q->ndesc; j++) {
618 struct mt76_queue_entry *e = &q->entry[j];
619
620 if (!e->skb)
621 continue;
622
623 dev_kfree_skb(e->skb);
624 e->skb = NULL;
625 }
626 }
627}
628EXPORT_SYMBOL_GPL(mt76s_deinit);
629
630int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
631 const struct mt76_bus_ops *bus_ops)
632{
633 struct mt76_sdio *sdio = &dev->sdio;
bf08d585 634 u32 host_max_cap;
6a618acb 635 int err;
d39b52e3 636
6a618acb
LB
637 err = mt76_worker_setup(dev->hw, &sdio->status_worker,
638 mt76s_status_worker, "sdio-status");
639 if (err)
640 return err;
641
642 err = mt76_worker_setup(dev->hw, &sdio->net_worker, mt76s_net_worker,
643 "sdio-net");
644 if (err)
645 return err;
646
92184eae
WZ
647 err = mt76_worker_setup(dev->hw, &sdio->stat_worker, mt76s_tx_status_data,
648 "sdio-sta");
649 if (err)
650 return err;
651
6a618acb
LB
652 sched_set_fifo_low(sdio->status_worker.task);
653 sched_set_fifo_low(sdio->net_worker.task);
92184eae 654 sched_set_fifo_low(sdio->stat_worker.task);
d39b52e3 655
d39b52e3
SW
656 dev->queue_ops = &sdio_queue_ops;
657 dev->bus = bus_ops;
658 dev->sdio.func = func;
659
bf08d585
SW
660 host_max_cap = min_t(u32, func->card->host->max_req_size,
661 func->cur_blksize *
662 func->card->host->max_blk_count);
663 dev->sdio.xmit_buf_sz = min_t(u32, host_max_cap, MT76S_XMIT_BUF_SZ);
664 dev->sdio.xmit_buf = devm_kmalloc(dev->dev, dev->sdio.xmit_buf_sz,
665 GFP_KERNEL);
666 if (!dev->sdio.xmit_buf)
667 err = -ENOMEM;
668
669 return err;
d39b52e3
SW
670}
671EXPORT_SYMBOL_GPL(mt76s_init);
672
673MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
674MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
f3f8f050 675MODULE_DESCRIPTION("MediaTek MT76x SDIO helpers");
d39b52e3 676MODULE_LICENSE("Dual BSD/GPL");