Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[linux-2.6-block.git] / net / xdp / xdp_umem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* XDP user-space packet buffer
3  * Copyright(c) 2018 Intel Corporation.
4  */
5
6 #include <linux/init.h>
7 #include <linux/sched/mm.h>
8 #include <linux/sched/signal.h>
9 #include <linux/sched/task.h>
10 #include <linux/uaccess.h>
11 #include <linux/slab.h>
12 #include <linux/bpf.h>
13 #include <linux/mm.h>
14 #include <linux/netdevice.h>
15 #include <linux/rtnetlink.h>
16
17 #include "xdp_umem.h"
18 #include "xsk_queue.h"
19
20 #define XDP_UMEM_MIN_CHUNK_SIZE 2048
21
22 void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
23 {
24         unsigned long flags;
25
26         spin_lock_irqsave(&umem->xsk_list_lock, flags);
27         list_add_rcu(&xs->list, &umem->xsk_list);
28         spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
29 }
30
31 void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
32 {
33         unsigned long flags;
34
35         spin_lock_irqsave(&umem->xsk_list_lock, flags);
36         list_del_rcu(&xs->list);
37         spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
38 }
39
40 /* The umem is stored both in the _rx struct and the _tx struct as we do
41  * not know if the device has more tx queues than rx, or the opposite.
42  * This might also change during run time.
43  */
44 static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
45                                u16 queue_id)
46 {
47         if (queue_id >= max_t(unsigned int,
48                               dev->real_num_rx_queues,
49                               dev->real_num_tx_queues))
50                 return -EINVAL;
51
52         if (queue_id < dev->real_num_rx_queues)
53                 dev->_rx[queue_id].umem = umem;
54         if (queue_id < dev->real_num_tx_queues)
55                 dev->_tx[queue_id].umem = umem;
56
57         return 0;
58 }
59
60 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
61                                        u16 queue_id)
62 {
63         if (queue_id < dev->real_num_rx_queues)
64                 return dev->_rx[queue_id].umem;
65         if (queue_id < dev->real_num_tx_queues)
66                 return dev->_tx[queue_id].umem;
67
68         return NULL;
69 }
70
71 static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
72 {
73         if (queue_id < dev->real_num_rx_queues)
74                 dev->_rx[queue_id].umem = NULL;
75         if (queue_id < dev->real_num_tx_queues)
76                 dev->_tx[queue_id].umem = NULL;
77 }
78
79 int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
80                         u16 queue_id, u16 flags)
81 {
82         bool force_zc, force_copy;
83         struct netdev_bpf bpf;
84         int err = 0;
85
86         force_zc = flags & XDP_ZEROCOPY;
87         force_copy = flags & XDP_COPY;
88
89         if (force_zc && force_copy)
90                 return -EINVAL;
91
92         rtnl_lock();
93         if (xdp_get_umem_from_qid(dev, queue_id)) {
94                 err = -EBUSY;
95                 goto out_rtnl_unlock;
96         }
97
98         err = xdp_reg_umem_at_qid(dev, umem, queue_id);
99         if (err)
100                 goto out_rtnl_unlock;
101
102         umem->dev = dev;
103         umem->queue_id = queue_id;
104         if (force_copy)
105                 /* For copy-mode, we are done. */
106                 goto out_rtnl_unlock;
107
108         if (!dev->netdev_ops->ndo_bpf ||
109             !dev->netdev_ops->ndo_xsk_async_xmit) {
110                 err = -EOPNOTSUPP;
111                 goto err_unreg_umem;
112         }
113
114         bpf.command = XDP_SETUP_XSK_UMEM;
115         bpf.xsk.umem = umem;
116         bpf.xsk.queue_id = queue_id;
117
118         err = dev->netdev_ops->ndo_bpf(dev, &bpf);
119         if (err)
120                 goto err_unreg_umem;
121         rtnl_unlock();
122
123         dev_hold(dev);
124         umem->zc = true;
125         return 0;
126
127 err_unreg_umem:
128         if (!force_zc)
129                 err = 0; /* fallback to copy mode */
130         if (err)
131                 xdp_clear_umem_at_qid(dev, queue_id);
132 out_rtnl_unlock:
133         rtnl_unlock();
134         return err;
135 }
136
137 static void xdp_umem_clear_dev(struct xdp_umem *umem)
138 {
139         struct netdev_bpf bpf;
140         int err;
141
142         if (umem->zc) {
143                 bpf.command = XDP_SETUP_XSK_UMEM;
144                 bpf.xsk.umem = NULL;
145                 bpf.xsk.queue_id = umem->queue_id;
146
147                 rtnl_lock();
148                 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
149                 rtnl_unlock();
150
151                 if (err)
152                         WARN(1, "failed to disable umem!\n");
153         }
154
155         if (umem->dev) {
156                 rtnl_lock();
157                 xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
158                 rtnl_unlock();
159         }
160
161         if (umem->zc) {
162                 dev_put(umem->dev);
163                 umem->zc = false;
164         }
165 }
166
167 static void xdp_umem_unpin_pages(struct xdp_umem *umem)
168 {
169         unsigned int i;
170
171         for (i = 0; i < umem->npgs; i++) {
172                 struct page *page = umem->pgs[i];
173
174                 set_page_dirty_lock(page);
175                 put_page(page);
176         }
177
178         kfree(umem->pgs);
179         umem->pgs = NULL;
180 }
181
182 static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
183 {
184         if (umem->user) {
185                 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
186                 free_uid(umem->user);
187         }
188 }
189
190 static void xdp_umem_release(struct xdp_umem *umem)
191 {
192         struct task_struct *task;
193         struct mm_struct *mm;
194
195         xdp_umem_clear_dev(umem);
196
197         if (umem->fq) {
198                 xskq_destroy(umem->fq);
199                 umem->fq = NULL;
200         }
201
202         if (umem->cq) {
203                 xskq_destroy(umem->cq);
204                 umem->cq = NULL;
205         }
206
207         xsk_reuseq_destroy(umem);
208
209         xdp_umem_unpin_pages(umem);
210
211         task = get_pid_task(umem->pid, PIDTYPE_PID);
212         put_pid(umem->pid);
213         if (!task)
214                 goto out;
215         mm = get_task_mm(task);
216         put_task_struct(task);
217         if (!mm)
218                 goto out;
219
220         mmput(mm);
221         kfree(umem->pages);
222         umem->pages = NULL;
223
224         xdp_umem_unaccount_pages(umem);
225 out:
226         kfree(umem);
227 }
228
229 static void xdp_umem_release_deferred(struct work_struct *work)
230 {
231         struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
232
233         xdp_umem_release(umem);
234 }
235
236 void xdp_get_umem(struct xdp_umem *umem)
237 {
238         refcount_inc(&umem->users);
239 }
240
241 void xdp_put_umem(struct xdp_umem *umem)
242 {
243         if (!umem)
244                 return;
245
246         if (refcount_dec_and_test(&umem->users)) {
247                 INIT_WORK(&umem->work, xdp_umem_release_deferred);
248                 schedule_work(&umem->work);
249         }
250 }
251
252 static int xdp_umem_pin_pages(struct xdp_umem *umem)
253 {
254         unsigned int gup_flags = FOLL_WRITE;
255         long npgs;
256         int err;
257
258         umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
259                             GFP_KERNEL | __GFP_NOWARN);
260         if (!umem->pgs)
261                 return -ENOMEM;
262
263         down_read(&current->mm->mmap_sem);
264         npgs = get_user_pages_longterm(umem->address, umem->npgs,
265                                        gup_flags, &umem->pgs[0], NULL);
266         up_read(&current->mm->mmap_sem);
267
268         if (npgs != umem->npgs) {
269                 if (npgs >= 0) {
270                         umem->npgs = npgs;
271                         err = -ENOMEM;
272                         goto out_pin;
273                 }
274                 err = npgs;
275                 goto out_pgs;
276         }
277         return 0;
278
279 out_pin:
280         xdp_umem_unpin_pages(umem);
281 out_pgs:
282         kfree(umem->pgs);
283         umem->pgs = NULL;
284         return err;
285 }
286
287 static int xdp_umem_account_pages(struct xdp_umem *umem)
288 {
289         unsigned long lock_limit, new_npgs, old_npgs;
290
291         if (capable(CAP_IPC_LOCK))
292                 return 0;
293
294         lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
295         umem->user = get_uid(current_user());
296
297         do {
298                 old_npgs = atomic_long_read(&umem->user->locked_vm);
299                 new_npgs = old_npgs + umem->npgs;
300                 if (new_npgs > lock_limit) {
301                         free_uid(umem->user);
302                         umem->user = NULL;
303                         return -ENOBUFS;
304                 }
305         } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
306                                      new_npgs) != old_npgs);
307         return 0;
308 }
309
310 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
311 {
312         u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
313         unsigned int chunks, chunks_per_page;
314         u64 addr = mr->addr, size = mr->len;
315         int size_chk, err, i;
316
317         if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
318                 /* Strictly speaking we could support this, if:
319                  * - huge pages, or*
320                  * - using an IOMMU, or
321                  * - making sure the memory area is consecutive
322                  * but for now, we simply say "computer says no".
323                  */
324                 return -EINVAL;
325         }
326
327         if (!is_power_of_2(chunk_size))
328                 return -EINVAL;
329
330         if (!PAGE_ALIGNED(addr)) {
331                 /* Memory area has to be page size aligned. For
332                  * simplicity, this might change.
333                  */
334                 return -EINVAL;
335         }
336
337         if ((addr + size) < addr)
338                 return -EINVAL;
339
340         chunks = (unsigned int)div_u64(size, chunk_size);
341         if (chunks == 0)
342                 return -EINVAL;
343
344         chunks_per_page = PAGE_SIZE / chunk_size;
345         if (chunks < chunks_per_page || chunks % chunks_per_page)
346                 return -EINVAL;
347
348         headroom = ALIGN(headroom, 64);
349
350         size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
351         if (size_chk < 0)
352                 return -EINVAL;
353
354         umem->pid = get_task_pid(current, PIDTYPE_PID);
355         umem->address = (unsigned long)addr;
356         umem->chunk_mask = ~((u64)chunk_size - 1);
357         umem->size = size;
358         umem->headroom = headroom;
359         umem->chunk_size_nohr = chunk_size - headroom;
360         umem->npgs = size / PAGE_SIZE;
361         umem->pgs = NULL;
362         umem->user = NULL;
363         INIT_LIST_HEAD(&umem->xsk_list);
364         spin_lock_init(&umem->xsk_list_lock);
365
366         refcount_set(&umem->users, 1);
367
368         err = xdp_umem_account_pages(umem);
369         if (err)
370                 goto out;
371
372         err = xdp_umem_pin_pages(umem);
373         if (err)
374                 goto out_account;
375
376         umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
377         if (!umem->pages) {
378                 err = -ENOMEM;
379                 goto out_account;
380         }
381
382         for (i = 0; i < umem->npgs; i++)
383                 umem->pages[i].addr = page_address(umem->pgs[i]);
384
385         return 0;
386
387 out_account:
388         xdp_umem_unaccount_pages(umem);
389 out:
390         put_pid(umem->pid);
391         return err;
392 }
393
394 struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
395 {
396         struct xdp_umem *umem;
397         int err;
398
399         umem = kzalloc(sizeof(*umem), GFP_KERNEL);
400         if (!umem)
401                 return ERR_PTR(-ENOMEM);
402
403         err = xdp_umem_reg(umem, mr);
404         if (err) {
405                 kfree(umem);
406                 return ERR_PTR(err);
407         }
408
409         return umem;
410 }
411
412 bool xdp_umem_validate_queues(struct xdp_umem *umem)
413 {
414         return umem->fq && umem->cq;
415 }