Commit | Line | Data |
---|---|---|
af873fce | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c72dfae2 SB |
2 | /* |
3 | * CAIF Interface registration. | |
4 | * Copyright (C) ST-Ericsson AB 2010 | |
26ee65e6 | 5 | * Author: Sjur Brendeland |
c72dfae2 | 6 | * |
31fdc555 | 7 | * Borrowed heavily from file: pn_dev.c. Thanks to Remi Denis-Courmont |
c72dfae2 SB |
8 | * and Sakari Ailus <sakari.ailus@nokia.com> |
9 | */ | |
10 | ||
b31fa5ba JP |
11 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ |
12 | ||
c72dfae2 SB |
13 | #include <linux/kernel.h> |
14 | #include <linux/if_arp.h> | |
15 | #include <linux/net.h> | |
16 | #include <linux/netdevice.h> | |
bd30ce4b | 17 | #include <linux/mutex.h> |
3a9a231d | 18 | #include <linux/module.h> |
0e4c7d85 | 19 | #include <linux/spinlock.h> |
c72dfae2 SB |
20 | #include <net/netns/generic.h> |
21 | #include <net/net_namespace.h> | |
22 | #include <net/pkt_sched.h> | |
23 | #include <net/caif/caif_device.h> | |
c72dfae2 | 24 | #include <net/caif/caif_layer.h> |
8203274e | 25 | #include <net/caif/caif_dev.h> |
c72dfae2 SB |
26 | #include <net/caif/cfpkt.h> |
27 | #include <net/caif/cfcnfg.h> | |
7c18d220 | 28 | #include <net/caif/cfserl.h> |
c72dfae2 SB |
29 | |
30 | MODULE_LICENSE("GPL"); | |
c72dfae2 SB |
31 | |
32 | /* Used for local tracking of the CAIF net devices */ | |
33 | struct caif_device_entry { | |
34 | struct cflayer layer; | |
35 | struct list_head list; | |
c72dfae2 | 36 | struct net_device *netdev; |
bd30ce4b | 37 | int __percpu *pcpu_refcnt; |
0e4c7d85 | 38 | spinlock_t flow_lock; |
7d311304 | 39 | struct sk_buff *xoff_skb; |
40 | void (*xoff_skb_dtor)(struct sk_buff *skb); | |
0e4c7d85 | 41 | bool xoff; |
c72dfae2 SB |
42 | }; |
43 | ||
44 | struct caif_device_entry_list { | |
45 | struct list_head list; | |
46 | /* Protects simulanous deletes in list */ | |
bd30ce4b | 47 | struct mutex lock; |
c72dfae2 SB |
48 | }; |
49 | ||
50 | struct caif_net { | |
bee925db | 51 | struct cfcnfg *cfg; |
c72dfae2 SB |
52 | struct caif_device_entry_list caifdevs; |
53 | }; | |
54 | ||
c7d03a00 | 55 | static unsigned int caif_net_id; |
0e4c7d85 | 56 | static int q_high = 50; /* Percent */ |
bee925db | 57 | |
58 | struct cfcnfg *get_cfcnfg(struct net *net) | |
59 | { | |
60 | struct caif_net *caifn; | |
bee925db | 61 | caifn = net_generic(net, caif_net_id); |
bee925db | 62 | return caifn->cfg; |
63 | } | |
64 | EXPORT_SYMBOL(get_cfcnfg); | |
c72dfae2 SB |
65 | |
66 | static struct caif_device_entry_list *caif_device_list(struct net *net) | |
67 | { | |
68 | struct caif_net *caifn; | |
c72dfae2 | 69 | caifn = net_generic(net, caif_net_id); |
c72dfae2 SB |
70 | return &caifn->caifdevs; |
71 | } | |
72 | ||
bd30ce4b | 73 | static void caifd_put(struct caif_device_entry *e) |
74 | { | |
933393f5 | 75 | this_cpu_dec(*e->pcpu_refcnt); |
bd30ce4b | 76 | } |
77 | ||
78 | static void caifd_hold(struct caif_device_entry *e) | |
79 | { | |
933393f5 | 80 | this_cpu_inc(*e->pcpu_refcnt); |
bd30ce4b | 81 | } |
82 | ||
83 | static int caifd_refcnt_read(struct caif_device_entry *e) | |
84 | { | |
85 | int i, refcnt = 0; | |
86 | for_each_possible_cpu(i) | |
87 | refcnt += *per_cpu_ptr(e->pcpu_refcnt, i); | |
88 | return refcnt; | |
89 | } | |
90 | ||
c72dfae2 SB |
91 | /* Allocate new CAIF device. */ |
92 | static struct caif_device_entry *caif_device_alloc(struct net_device *dev) | |
93 | { | |
c72dfae2 | 94 | struct caif_device_entry *caifd; |
bd30ce4b | 95 | |
4fb66b82 | 96 | caifd = kzalloc(sizeof(*caifd), GFP_KERNEL); |
c72dfae2 SB |
97 | if (!caifd) |
98 | return NULL; | |
bd30ce4b | 99 | caifd->pcpu_refcnt = alloc_percpu(int); |
4fb66b82 ED |
100 | if (!caifd->pcpu_refcnt) { |
101 | kfree(caifd); | |
102 | return NULL; | |
103 | } | |
c72dfae2 | 104 | caifd->netdev = dev; |
bd30ce4b | 105 | dev_hold(dev); |
c72dfae2 SB |
106 | return caifd; |
107 | } | |
108 | ||
109 | static struct caif_device_entry *caif_get(struct net_device *dev) | |
110 | { | |
111 | struct caif_device_entry_list *caifdevs = | |
112 | caif_device_list(dev_net(dev)); | |
113 | struct caif_device_entry *caifd; | |
7c18d220 | 114 | |
f9fc28a8 AG |
115 | list_for_each_entry_rcu(caifd, &caifdevs->list, list, |
116 | lockdep_rtnl_is_held()) { | |
c72dfae2 SB |
117 | if (caifd->netdev == dev) |
118 | return caifd; | |
119 | } | |
120 | return NULL; | |
121 | } | |
122 | ||
d6e89c0b | 123 | static void caif_flow_cb(struct sk_buff *skb) |
0e4c7d85 | 124 | { |
125 | struct caif_device_entry *caifd; | |
7d311304 | 126 | void (*dtor)(struct sk_buff *skb) = NULL; |
0e4c7d85 | 127 | bool send_xoff; |
128 | ||
129 | WARN_ON(skb->dev == NULL); | |
130 | ||
131 | rcu_read_lock(); | |
132 | caifd = caif_get(skb->dev); | |
c95567c8 KLX |
133 | |
134 | WARN_ON(caifd == NULL); | |
64119e05 Y |
135 | if (!caifd) { |
136 | rcu_read_unlock(); | |
c95567c8 | 137 | return; |
64119e05 | 138 | } |
c95567c8 | 139 | |
0e4c7d85 | 140 | caifd_hold(caifd); |
141 | rcu_read_unlock(); | |
142 | ||
143 | spin_lock_bh(&caifd->flow_lock); | |
144 | send_xoff = caifd->xoff; | |
8518307d | 145 | caifd->xoff = false; |
59f608d8 | 146 | dtor = caifd->xoff_skb_dtor; |
147 | ||
148 | if (WARN_ON(caifd->xoff_skb != skb)) | |
149 | skb = NULL; | |
150 | ||
151 | caifd->xoff_skb = NULL; | |
152 | caifd->xoff_skb_dtor = NULL; | |
153 | ||
0e4c7d85 | 154 | spin_unlock_bh(&caifd->flow_lock); |
155 | ||
59f608d8 | 156 | if (dtor && skb) |
7d311304 | 157 | dtor(skb); |
158 | ||
0e4c7d85 | 159 | if (send_xoff) |
160 | caifd->layer.up-> | |
161 | ctrlcmd(caifd->layer.up, | |
162 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND, | |
163 | caifd->layer.id); | |
164 | caifd_put(caifd); | |
165 | } | |
166 | ||
c72dfae2 SB |
167 | static int transmit(struct cflayer *layer, struct cfpkt *pkt) |
168 | { | |
0e4c7d85 | 169 | int err, high = 0, qlen = 0; |
c72dfae2 SB |
170 | struct caif_device_entry *caifd = |
171 | container_of(layer, struct caif_device_entry, layer); | |
4dd820c0 | 172 | struct sk_buff *skb; |
0e4c7d85 | 173 | struct netdev_queue *txq; |
174 | ||
175 | rcu_read_lock_bh(); | |
4dd820c0 | 176 | |
c72dfae2 SB |
177 | skb = cfpkt_tonative(pkt); |
178 | skb->dev = caifd->netdev; | |
7c18d220 | 179 | skb_reset_network_header(skb); |
180 | skb->protocol = htons(ETH_P_CAIF); | |
0e4c7d85 | 181 | |
182 | /* Check if we need to handle xoff */ | |
4676a152 | 183 | if (likely(caifd->netdev->priv_flags & IFF_NO_QUEUE)) |
0e4c7d85 | 184 | goto noxoff; |
185 | ||
186 | if (unlikely(caifd->xoff)) | |
187 | goto noxoff; | |
188 | ||
189 | if (likely(!netif_queue_stopped(caifd->netdev))) { | |
b0a231a2 PA |
190 | struct Qdisc *sch; |
191 | ||
0e4c7d85 | 192 | /* If we run with a TX queue, check if the queue is too long*/ |
193 | txq = netdev_get_tx_queue(skb->dev, 0); | |
b0a231a2 PA |
194 | sch = rcu_dereference_bh(txq->qdisc); |
195 | if (likely(qdisc_is_empty(sch))) | |
0e4c7d85 | 196 | goto noxoff; |
197 | ||
b0a231a2 PA |
198 | /* can check for explicit qdisc len value only !NOLOCK, |
199 | * always set flow off otherwise | |
200 | */ | |
0e4c7d85 | 201 | high = (caifd->netdev->tx_queue_len * q_high) / 100; |
b0a231a2 | 202 | if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high)) |
0e4c7d85 | 203 | goto noxoff; |
204 | } | |
205 | ||
206 | /* Hold lock while accessing xoff */ | |
207 | spin_lock_bh(&caifd->flow_lock); | |
208 | if (caifd->xoff) { | |
209 | spin_unlock_bh(&caifd->flow_lock); | |
210 | goto noxoff; | |
211 | } | |
212 | ||
213 | /* | |
214 | * Handle flow off, we do this by temporary hi-jacking this | |
215 | * skb's destructor function, and replace it with our own | |
216 | * flow-on callback. The callback will set flow-on and call | |
217 | * the original destructor. | |
218 | */ | |
219 | ||
220 | pr_debug("queue has stopped(%d) or is full (%d > %d)\n", | |
221 | netif_queue_stopped(caifd->netdev), | |
222 | qlen, high); | |
8518307d | 223 | caifd->xoff = true; |
7d311304 | 224 | caifd->xoff_skb = skb; |
225 | caifd->xoff_skb_dtor = skb->destructor; | |
226 | skb->destructor = caif_flow_cb; | |
0e4c7d85 | 227 | spin_unlock_bh(&caifd->flow_lock); |
0e4c7d85 | 228 | |
229 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
230 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | |
231 | caifd->layer.id); | |
232 | noxoff: | |
233 | rcu_read_unlock_bh(); | |
4dd820c0 | 234 | |
c85c2951 | 235 | err = dev_queue_xmit(skb); |
236 | if (err > 0) | |
237 | err = -EIO; | |
c72dfae2 | 238 | |
c85c2951 | 239 | return err; |
c72dfae2 SB |
240 | } |
241 | ||
c72dfae2 | 242 | /* |
bd30ce4b | 243 | * Stuff received packets into the CAIF stack. |
c72dfae2 SB |
244 | * On error, returns non-zero and releases the skb. |
245 | */ | |
246 | static int receive(struct sk_buff *skb, struct net_device *dev, | |
247 | struct packet_type *pkttype, struct net_device *orig_dev) | |
248 | { | |
c72dfae2 SB |
249 | struct cfpkt *pkt; |
250 | struct caif_device_entry *caifd; | |
69c867c9 | 251 | int err; |
bd30ce4b | 252 | |
c72dfae2 | 253 | pkt = cfpkt_fromnative(CAIF_DIR_IN, skb); |
bd30ce4b | 254 | |
255 | rcu_read_lock(); | |
c72dfae2 | 256 | caifd = caif_get(dev); |
c72dfae2 | 257 | |
bd30ce4b | 258 | if (!caifd || !caifd->layer.up || !caifd->layer.up->receive || |
259 | !netif_oper_up(caifd->netdev)) { | |
260 | rcu_read_unlock(); | |
261 | kfree_skb(skb); | |
c72dfae2 | 262 | return NET_RX_DROP; |
bd30ce4b | 263 | } |
264 | ||
265 | /* Hold reference to netdevice while using CAIF stack */ | |
266 | caifd_hold(caifd); | |
267 | rcu_read_unlock(); | |
c72dfae2 | 268 | |
69c867c9 | 269 | err = caifd->layer.up->receive(caifd->layer.up, pkt); |
270 | ||
0812beb7 | 271 | /* For -EILSEQ the packet is not freed so free it now */ |
69c867c9 | 272 | if (err == -EILSEQ) |
273 | cfpkt_destroy(pkt); | |
bd30ce4b | 274 | |
275 | /* Release reference to stack upwards */ | |
276 | caifd_put(caifd); | |
7c18d220 | 277 | |
278 | if (err != 0) | |
279 | err = NET_RX_DROP; | |
280 | return err; | |
c72dfae2 SB |
281 | } |
282 | ||
283 | static struct packet_type caif_packet_type __read_mostly = { | |
284 | .type = cpu_to_be16(ETH_P_CAIF), | |
285 | .func = receive, | |
286 | }; | |
287 | ||
288 | static void dev_flowctrl(struct net_device *dev, int on) | |
289 | { | |
bd30ce4b | 290 | struct caif_device_entry *caifd; |
291 | ||
292 | rcu_read_lock(); | |
293 | ||
294 | caifd = caif_get(dev); | |
295 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { | |
296 | rcu_read_unlock(); | |
c72dfae2 | 297 | return; |
bd30ce4b | 298 | } |
299 | ||
300 | caifd_hold(caifd); | |
301 | rcu_read_unlock(); | |
c72dfae2 SB |
302 | |
303 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
304 | on ? | |
305 | _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND : | |
306 | _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND, | |
307 | caifd->layer.id); | |
bd30ce4b | 308 | caifd_put(caifd); |
c72dfae2 SB |
309 | } |
310 | ||
a2805dca | 311 | int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, |
3bffc475 SMP |
312 | struct cflayer *link_support, int head_room, |
313 | struct cflayer **layer, | |
314 | int (**rcv_func)(struct sk_buff *, struct net_device *, | |
315 | struct packet_type *, | |
316 | struct net_device *)) | |
7c18d220 | 317 | { |
318 | struct caif_device_entry *caifd; | |
319 | enum cfcnfg_phy_preference pref; | |
320 | struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); | |
321 | struct caif_device_entry_list *caifdevs; | |
a2805dca | 322 | int res; |
7c18d220 | 323 | |
324 | caifdevs = caif_device_list(dev_net(dev)); | |
7c18d220 | 325 | caifd = caif_device_alloc(dev); |
326 | if (!caifd) | |
a2805dca | 327 | return -ENOMEM; |
7c18d220 | 328 | *layer = &caifd->layer; |
0e4c7d85 | 329 | spin_lock_init(&caifd->flow_lock); |
7c18d220 | 330 | |
331 | switch (caifdev->link_select) { | |
332 | case CAIF_LINK_HIGH_BANDW: | |
333 | pref = CFPHYPREF_HIGH_BW; | |
334 | break; | |
335 | case CAIF_LINK_LOW_LATENCY: | |
336 | pref = CFPHYPREF_LOW_LAT; | |
337 | break; | |
338 | default: | |
339 | pref = CFPHYPREF_HIGH_BW; | |
340 | break; | |
341 | } | |
342 | mutex_lock(&caifdevs->lock); | |
343 | list_add_rcu(&caifd->list, &caifdevs->list); | |
344 | ||
df207b00 | 345 | strscpy(caifd->layer.name, dev->name, |
3dc2fa47 | 346 | sizeof(caifd->layer.name)); |
7c18d220 | 347 | caifd->layer.transmit = transmit; |
a2805dca | 348 | res = cfcnfg_add_phy_layer(cfg, |
7c18d220 | 349 | dev, |
350 | &caifd->layer, | |
351 | pref, | |
352 | link_support, | |
353 | caifdev->use_fcs, | |
354 | head_room); | |
355 | mutex_unlock(&caifdevs->lock); | |
356 | if (rcv_func) | |
357 | *rcv_func = receive; | |
a2805dca | 358 | return res; |
7c18d220 | 359 | } |
7ad65bf6 | 360 | EXPORT_SYMBOL(caif_enroll_dev); |
7c18d220 | 361 | |
c72dfae2 SB |
362 | /* notify Caif of device events */ |
363 | static int caif_device_notify(struct notifier_block *me, unsigned long what, | |
351638e7 | 364 | void *ptr) |
c72dfae2 | 365 | { |
351638e7 | 366 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
c72dfae2 SB |
367 | struct caif_device_entry *caifd = NULL; |
368 | struct caif_dev_common *caifdev; | |
bee925db | 369 | struct cfcnfg *cfg; |
7c18d220 | 370 | struct cflayer *layer, *link_support; |
371 | int head_room = 0; | |
08613e46 | 372 | struct caif_device_entry_list *caifdevs; |
b53558a9 | 373 | int res; |
c72dfae2 | 374 | |
bee925db | 375 | cfg = get_cfcnfg(dev_net(dev)); |
7c18d220 | 376 | caifdevs = caif_device_list(dev_net(dev)); |
bee925db | 377 | |
7c18d220 | 378 | caifd = caif_get(dev); |
379 | if (caifd == NULL && dev->type != ARPHRD_CAIF) | |
380 | return 0; | |
08613e46 | 381 | |
c72dfae2 SB |
382 | switch (what) { |
383 | case NETDEV_REGISTER: | |
7c18d220 | 384 | if (caifd != NULL) |
385 | break; | |
bd30ce4b | 386 | |
c72dfae2 | 387 | caifdev = netdev_priv(dev); |
c72dfae2 | 388 | |
7c18d220 | 389 | link_support = NULL; |
390 | if (caifdev->use_frag) { | |
391 | head_room = 1; | |
392 | link_support = cfserl_create(dev->ifindex, | |
e977b4cf | 393 | caifdev->use_stx); |
7c18d220 | 394 | if (!link_support) { |
395 | pr_warn("Out of memory\n"); | |
396 | break; | |
397 | } | |
c72dfae2 | 398 | } |
b53558a9 | 399 | res = caif_enroll_dev(dev, caifdev, link_support, head_room, |
7c18d220 | 400 | &layer, NULL); |
b53558a9 PS |
401 | if (res) |
402 | cfserl_release(link_support); | |
7c18d220 | 403 | caifdev->flowctrl = dev_flowctrl; |
c72dfae2 SB |
404 | break; |
405 | ||
bd30ce4b | 406 | case NETDEV_UP: |
407 | rcu_read_lock(); | |
408 | ||
c72dfae2 | 409 | caifd = caif_get(dev); |
bd30ce4b | 410 | if (caifd == NULL) { |
411 | rcu_read_unlock(); | |
c72dfae2 | 412 | break; |
bd30ce4b | 413 | } |
c72dfae2 | 414 | |
8518307d | 415 | caifd->xoff = false; |
bd30ce4b | 416 | cfcnfg_set_phy_state(cfg, &caifd->layer, true); |
417 | rcu_read_unlock(); | |
c72dfae2 | 418 | |
c72dfae2 SB |
419 | break; |
420 | ||
421 | case NETDEV_DOWN: | |
bd30ce4b | 422 | rcu_read_lock(); |
423 | ||
c72dfae2 | 424 | caifd = caif_get(dev); |
bd30ce4b | 425 | if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd) { |
426 | rcu_read_unlock(); | |
427 | return -EINVAL; | |
428 | } | |
429 | ||
430 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); | |
431 | caifd_hold(caifd); | |
432 | rcu_read_unlock(); | |
433 | ||
434 | caifd->layer.up->ctrlcmd(caifd->layer.up, | |
435 | _CAIF_CTRLCMD_PHYIF_DOWN_IND, | |
436 | caifd->layer.id); | |
7d311304 | 437 | |
438 | spin_lock_bh(&caifd->flow_lock); | |
439 | ||
440 | /* | |
441 | * Replace our xoff-destructor with original destructor. | |
442 | * We trust that skb->destructor *always* is called before | |
443 | * the skb reference is invalid. The hijacked SKB destructor | |
444 | * takes the flow_lock so manipulating the skb->destructor here | |
445 | * should be safe. | |
446 | */ | |
447 | if (caifd->xoff_skb_dtor != NULL && caifd->xoff_skb != NULL) | |
448 | caifd->xoff_skb->destructor = caifd->xoff_skb_dtor; | |
449 | ||
8518307d | 450 | caifd->xoff = false; |
7d311304 | 451 | caifd->xoff_skb_dtor = NULL; |
452 | caifd->xoff_skb = NULL; | |
453 | ||
454 | spin_unlock_bh(&caifd->flow_lock); | |
bd30ce4b | 455 | caifd_put(caifd); |
c72dfae2 SB |
456 | break; |
457 | ||
458 | case NETDEV_UNREGISTER: | |
bd30ce4b | 459 | mutex_lock(&caifdevs->lock); |
460 | ||
c72dfae2 | 461 | caifd = caif_get(dev); |
bd30ce4b | 462 | if (caifd == NULL) { |
463 | mutex_unlock(&caifdevs->lock); | |
f2527ec4 | 464 | break; |
bd30ce4b | 465 | } |
466 | list_del_rcu(&caifd->list); | |
467 | ||
468 | /* | |
469 | * NETDEV_UNREGISTER is called repeatedly until all reference | |
470 | * counts for the net-device are released. If references to | |
471 | * caifd is taken, simply ignore NETDEV_UNREGISTER and wait for | |
472 | * the next call to NETDEV_UNREGISTER. | |
473 | * | |
474 | * If any packets are in flight down the CAIF Stack, | |
475 | * cfcnfg_del_phy_layer will return nonzero. | |
476 | * If no packets are in flight, the CAIF Stack associated | |
477 | * with the net-device un-registering is freed. | |
478 | */ | |
479 | ||
480 | if (caifd_refcnt_read(caifd) != 0 || | |
481 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0) { | |
482 | ||
483 | pr_info("Wait for device inuse\n"); | |
484 | /* Enrole device if CAIF Stack is still in use */ | |
485 | list_add_rcu(&caifd->list, &caifdevs->list); | |
486 | mutex_unlock(&caifdevs->lock); | |
487 | break; | |
488 | } | |
489 | ||
490 | synchronize_rcu(); | |
491 | dev_put(caifd->netdev); | |
492 | free_percpu(caifd->pcpu_refcnt); | |
493 | kfree(caifd); | |
494 | ||
495 | mutex_unlock(&caifdevs->lock); | |
c72dfae2 SB |
496 | break; |
497 | } | |
498 | return 0; | |
499 | } | |
500 | ||
501 | static struct notifier_block caif_device_notifier = { | |
502 | .notifier_call = caif_device_notify, | |
503 | .priority = 0, | |
504 | }; | |
505 | ||
c72dfae2 SB |
506 | /* Per-namespace Caif devices handling */ |
507 | static int caif_init_net(struct net *net) | |
508 | { | |
509 | struct caif_net *caifn = net_generic(net, caif_net_id); | |
510 | INIT_LIST_HEAD(&caifn->caifdevs.list); | |
bd30ce4b | 511 | mutex_init(&caifn->caifdevs.lock); |
bee925db | 512 | |
513 | caifn->cfg = cfcnfg_create(); | |
f84ea779 | 514 | if (!caifn->cfg) |
bee925db | 515 | return -ENOMEM; |
bee925db | 516 | |
c72dfae2 SB |
517 | return 0; |
518 | } | |
519 | ||
520 | static void caif_exit_net(struct net *net) | |
521 | { | |
bd30ce4b | 522 | struct caif_device_entry *caifd, *tmp; |
523 | struct caif_device_entry_list *caifdevs = | |
524 | caif_device_list(net); | |
7c18d220 | 525 | struct cfcnfg *cfg = get_cfcnfg(net); |
526 | ||
c72dfae2 | 527 | rtnl_lock(); |
bd30ce4b | 528 | mutex_lock(&caifdevs->lock); |
529 | ||
530 | list_for_each_entry_safe(caifd, tmp, &caifdevs->list, list) { | |
531 | int i = 0; | |
532 | list_del_rcu(&caifd->list); | |
533 | cfcnfg_set_phy_state(cfg, &caifd->layer, false); | |
534 | ||
535 | while (i < 10 && | |
536 | (caifd_refcnt_read(caifd) != 0 || | |
537 | cfcnfg_del_phy_layer(cfg, &caifd->layer) != 0)) { | |
538 | ||
539 | pr_info("Wait for device inuse\n"); | |
540 | msleep(250); | |
541 | i++; | |
542 | } | |
543 | synchronize_rcu(); | |
544 | dev_put(caifd->netdev); | |
545 | free_percpu(caifd->pcpu_refcnt); | |
546 | kfree(caifd); | |
c72dfae2 | 547 | } |
bee925db | 548 | cfcnfg_remove(cfg); |
bd30ce4b | 549 | |
550 | mutex_unlock(&caifdevs->lock); | |
c72dfae2 SB |
551 | rtnl_unlock(); |
552 | } | |
553 | ||
554 | static struct pernet_operations caif_net_ops = { | |
555 | .init = caif_init_net, | |
556 | .exit = caif_exit_net, | |
557 | .id = &caif_net_id, | |
558 | .size = sizeof(struct caif_net), | |
559 | }; | |
560 | ||
561 | /* Initialize Caif devices list */ | |
562 | static int __init caif_device_init(void) | |
563 | { | |
564 | int result; | |
bd30ce4b | 565 | |
8a8ee9af | 566 | result = register_pernet_subsys(&caif_net_ops); |
c72dfae2 | 567 | |
bee925db | 568 | if (result) |
c72dfae2 | 569 | return result; |
bee925db | 570 | |
c72dfae2 | 571 | register_netdevice_notifier(&caif_device_notifier); |
bee925db | 572 | dev_add_pack(&caif_packet_type); |
c72dfae2 SB |
573 | |
574 | return result; | |
c72dfae2 SB |
575 | } |
576 | ||
577 | static void __exit caif_device_exit(void) | |
578 | { | |
c72dfae2 | 579 | unregister_netdevice_notifier(&caif_device_notifier); |
bee925db | 580 | dev_remove_pack(&caif_packet_type); |
96f80d12 | 581 | unregister_pernet_subsys(&caif_net_ops); |
c72dfae2 SB |
582 | } |
583 | ||
584 | module_init(caif_device_init); | |
585 | module_exit(caif_device_exit); |