IB/core: Enforce PKey security on QPs
[linux-2.6-block.git] / drivers / infiniband / core / security.c
1 /*
2  * Copyright (c) 2016 Mellanox Technologies Ltd.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #ifdef CONFIG_SECURITY_INFINIBAND
34
35 #include <linux/security.h>
36 #include <linux/completion.h>
37 #include <linux/list.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_cache.h>
41 #include "core_priv.h"
42
43 static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp)
44 {
45         struct pkey_index_qp_list *pkey = NULL;
46         struct pkey_index_qp_list *tmp_pkey;
47         struct ib_device *dev = pp->sec->dev;
48
49         spin_lock(&dev->port_pkey_list[pp->port_num].list_lock);
50         list_for_each_entry(tmp_pkey,
51                             &dev->port_pkey_list[pp->port_num].pkey_list,
52                             pkey_index_list) {
53                 if (tmp_pkey->pkey_index == pp->pkey_index) {
54                         pkey = tmp_pkey;
55                         break;
56                 }
57         }
58         spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock);
59         return pkey;
60 }
61
62 static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp,
63                                       u16 *pkey,
64                                       u64 *subnet_prefix)
65 {
66         struct ib_device *dev = pp->sec->dev;
67         int ret;
68
69         ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey);
70         if (ret)
71                 return ret;
72
73         ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix);
74
75         return ret;
76 }
77
78 static int enforce_qp_pkey_security(u16 pkey,
79                                     u64 subnet_prefix,
80                                     struct ib_qp_security *qp_sec)
81 {
82         struct ib_qp_security *shared_qp_sec;
83         int ret;
84
85         ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey);
86         if (ret)
87                 return ret;
88
89         if (qp_sec->qp == qp_sec->qp->real_qp) {
90                 list_for_each_entry(shared_qp_sec,
91                                     &qp_sec->shared_qp_list,
92                                     shared_qp_list) {
93                         ret = security_ib_pkey_access(shared_qp_sec->security,
94                                                       subnet_prefix,
95                                                       pkey);
96                         if (ret)
97                                 return ret;
98                 }
99         }
100         return 0;
101 }
102
103 /* The caller of this function must hold the QP security
104  * mutex of the QP of the security structure in *pps.
105  *
106  * It takes separate ports_pkeys and security structure
107  * because in some cases the pps will be for a new settings
108  * or the pps will be for the real QP and security structure
109  * will be for a shared QP.
110  */
111 static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps,
112                                        struct ib_qp_security *sec)
113 {
114         u64 subnet_prefix;
115         u16 pkey;
116         int ret = 0;
117
118         if (!pps)
119                 return 0;
120
121         if (pps->main.state != IB_PORT_PKEY_NOT_VALID) {
122                 get_pkey_and_subnet_prefix(&pps->main,
123                                            &pkey,
124                                            &subnet_prefix);
125
126                 ret = enforce_qp_pkey_security(pkey,
127                                                subnet_prefix,
128                                                sec);
129         }
130         if (ret)
131                 return ret;
132
133         if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) {
134                 get_pkey_and_subnet_prefix(&pps->alt,
135                                            &pkey,
136                                            &subnet_prefix);
137
138                 ret = enforce_qp_pkey_security(pkey,
139                                                subnet_prefix,
140                                                sec);
141         }
142
143         return ret;
144 }
145
146 /* The caller of this function must hold the QP security
147  * mutex.
148  */
149 static void qp_to_error(struct ib_qp_security *sec)
150 {
151         struct ib_qp_security *shared_qp_sec;
152         struct ib_qp_attr attr = {
153                 .qp_state = IB_QPS_ERR
154         };
155         struct ib_event event = {
156                 .event = IB_EVENT_QP_FATAL
157         };
158
159         /* If the QP is in the process of being destroyed
160          * the qp pointer in the security structure is
161          * undefined.  It cannot be modified now.
162          */
163         if (sec->destroying)
164                 return;
165
166         ib_modify_qp(sec->qp,
167                      &attr,
168                      IB_QP_STATE);
169
170         if (sec->qp->event_handler && sec->qp->qp_context) {
171                 event.element.qp = sec->qp;
172                 sec->qp->event_handler(&event,
173                                        sec->qp->qp_context);
174         }
175
176         list_for_each_entry(shared_qp_sec,
177                             &sec->shared_qp_list,
178                             shared_qp_list) {
179                 struct ib_qp *qp = shared_qp_sec->qp;
180
181                 if (qp->event_handler && qp->qp_context) {
182                         event.element.qp = qp;
183                         event.device = qp->device;
184                         qp->event_handler(&event,
185                                           qp->qp_context);
186                 }
187         }
188 }
189
190 static inline void check_pkey_qps(struct pkey_index_qp_list *pkey,
191                                   struct ib_device *device,
192                                   u8 port_num,
193                                   u64 subnet_prefix)
194 {
195         struct ib_port_pkey *pp, *tmp_pp;
196         bool comp;
197         LIST_HEAD(to_error_list);
198         u16 pkey_val;
199
200         if (!ib_get_cached_pkey(device,
201                                 port_num,
202                                 pkey->pkey_index,
203                                 &pkey_val)) {
204                 spin_lock(&pkey->qp_list_lock);
205                 list_for_each_entry(pp, &pkey->qp_list, qp_list) {
206                         if (atomic_read(&pp->sec->error_list_count))
207                                 continue;
208
209                         if (enforce_qp_pkey_security(pkey_val,
210                                                      subnet_prefix,
211                                                      pp->sec)) {
212                                 atomic_inc(&pp->sec->error_list_count);
213                                 list_add(&pp->to_error_list,
214                                          &to_error_list);
215                         }
216                 }
217                 spin_unlock(&pkey->qp_list_lock);
218         }
219
220         list_for_each_entry_safe(pp,
221                                  tmp_pp,
222                                  &to_error_list,
223                                  to_error_list) {
224                 mutex_lock(&pp->sec->mutex);
225                 qp_to_error(pp->sec);
226                 list_del(&pp->to_error_list);
227                 atomic_dec(&pp->sec->error_list_count);
228                 comp = pp->sec->destroying;
229                 mutex_unlock(&pp->sec->mutex);
230
231                 if (comp)
232                         complete(&pp->sec->error_complete);
233         }
234 }
235
236 /* The caller of this function must hold the QP security
237  * mutex.
238  */
239 static int port_pkey_list_insert(struct ib_port_pkey *pp)
240 {
241         struct pkey_index_qp_list *tmp_pkey;
242         struct pkey_index_qp_list *pkey;
243         struct ib_device *dev;
244         u8 port_num = pp->port_num;
245         int ret = 0;
246
247         if (pp->state != IB_PORT_PKEY_VALID)
248                 return 0;
249
250         dev = pp->sec->dev;
251
252         pkey = get_pkey_idx_qp_list(pp);
253
254         if (!pkey) {
255                 bool found = false;
256
257                 pkey = kzalloc(sizeof(*pkey), GFP_KERNEL);
258                 if (!pkey)
259                         return -ENOMEM;
260
261                 spin_lock(&dev->port_pkey_list[port_num].list_lock);
262                 /* Check for the PKey again.  A racing process may
263                  * have created it.
264                  */
265                 list_for_each_entry(tmp_pkey,
266                                     &dev->port_pkey_list[port_num].pkey_list,
267                                     pkey_index_list) {
268                         if (tmp_pkey->pkey_index == pp->pkey_index) {
269                                 kfree(pkey);
270                                 pkey = tmp_pkey;
271                                 found = true;
272                                 break;
273                         }
274                 }
275
276                 if (!found) {
277                         pkey->pkey_index = pp->pkey_index;
278                         spin_lock_init(&pkey->qp_list_lock);
279                         INIT_LIST_HEAD(&pkey->qp_list);
280                         list_add(&pkey->pkey_index_list,
281                                  &dev->port_pkey_list[port_num].pkey_list);
282                 }
283                 spin_unlock(&dev->port_pkey_list[port_num].list_lock);
284         }
285
286         spin_lock(&pkey->qp_list_lock);
287         list_add(&pp->qp_list, &pkey->qp_list);
288         spin_unlock(&pkey->qp_list_lock);
289
290         pp->state = IB_PORT_PKEY_LISTED;
291
292         return ret;
293 }
294
295 /* The caller of this function must hold the QP security
296  * mutex.
297  */
298 static void port_pkey_list_remove(struct ib_port_pkey *pp)
299 {
300         struct pkey_index_qp_list *pkey;
301
302         if (pp->state != IB_PORT_PKEY_LISTED)
303                 return;
304
305         pkey = get_pkey_idx_qp_list(pp);
306
307         spin_lock(&pkey->qp_list_lock);
308         list_del(&pp->qp_list);
309         spin_unlock(&pkey->qp_list_lock);
310
311         /* The setting may still be valid, i.e. after
312          * a destroy has failed for example.
313          */
314         pp->state = IB_PORT_PKEY_VALID;
315 }
316
317 static void destroy_qp_security(struct ib_qp_security *sec)
318 {
319         security_ib_free_security(sec->security);
320         kfree(sec->ports_pkeys);
321         kfree(sec);
322 }
323
324 /* The caller of this function must hold the QP security
325  * mutex.
326  */
327 static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
328                                           const struct ib_qp_attr *qp_attr,
329                                           int qp_attr_mask)
330 {
331         struct ib_ports_pkeys *new_pps;
332         struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys;
333
334         new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL);
335         if (!new_pps)
336                 return NULL;
337
338         if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
339                 if (!qp_pps) {
340                         new_pps->main.port_num = qp_attr->port_num;
341                         new_pps->main.pkey_index = qp_attr->pkey_index;
342                 } else {
343                         new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
344                                                   qp_attr->port_num :
345                                                   qp_pps->main.port_num;
346
347                         new_pps->main.pkey_index =
348                                         (qp_attr_mask & IB_QP_PKEY_INDEX) ?
349                                          qp_attr->pkey_index :
350                                          qp_pps->main.pkey_index;
351                 }
352                 new_pps->main.state = IB_PORT_PKEY_VALID;
353         } else if (qp_pps) {
354                 new_pps->main.port_num = qp_pps->main.port_num;
355                 new_pps->main.pkey_index = qp_pps->main.pkey_index;
356                 if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
357                         new_pps->main.state = IB_PORT_PKEY_VALID;
358         }
359
360         if (qp_attr_mask & IB_QP_ALT_PATH) {
361                 new_pps->alt.port_num = qp_attr->alt_port_num;
362                 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
363                 new_pps->alt.state = IB_PORT_PKEY_VALID;
364         } else if (qp_pps) {
365                 new_pps->alt.port_num = qp_pps->alt.port_num;
366                 new_pps->alt.pkey_index = qp_pps->alt.pkey_index;
367                 if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID)
368                         new_pps->alt.state = IB_PORT_PKEY_VALID;
369         }
370
371         new_pps->main.sec = qp->qp_sec;
372         new_pps->alt.sec = qp->qp_sec;
373         return new_pps;
374 }
375
376 int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev)
377 {
378         struct ib_qp *real_qp = qp->real_qp;
379         int ret;
380
381         ret = ib_create_qp_security(qp, dev);
382
383         if (ret)
384                 return ret;
385
386         mutex_lock(&real_qp->qp_sec->mutex);
387         ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys,
388                                           qp->qp_sec);
389
390         if (ret)
391                 goto ret;
392
393         if (qp != real_qp)
394                 list_add(&qp->qp_sec->shared_qp_list,
395                          &real_qp->qp_sec->shared_qp_list);
396 ret:
397         mutex_unlock(&real_qp->qp_sec->mutex);
398         if (ret)
399                 destroy_qp_security(qp->qp_sec);
400
401         return ret;
402 }
403
404 void ib_close_shared_qp_security(struct ib_qp_security *sec)
405 {
406         struct ib_qp *real_qp = sec->qp->real_qp;
407
408         mutex_lock(&real_qp->qp_sec->mutex);
409         list_del(&sec->shared_qp_list);
410         mutex_unlock(&real_qp->qp_sec->mutex);
411
412         destroy_qp_security(sec);
413 }
414
415 int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
416 {
417         int ret;
418
419         qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL);
420         if (!qp->qp_sec)
421                 return -ENOMEM;
422
423         qp->qp_sec->qp = qp;
424         qp->qp_sec->dev = dev;
425         mutex_init(&qp->qp_sec->mutex);
426         INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list);
427         atomic_set(&qp->qp_sec->error_list_count, 0);
428         init_completion(&qp->qp_sec->error_complete);
429         ret = security_ib_alloc_security(&qp->qp_sec->security);
430         if (ret)
431                 kfree(qp->qp_sec);
432
433         return ret;
434 }
435 EXPORT_SYMBOL(ib_create_qp_security);
436
437 void ib_destroy_qp_security_begin(struct ib_qp_security *sec)
438 {
439         mutex_lock(&sec->mutex);
440
441         /* Remove the QP from the lists so it won't get added to
442          * a to_error_list during the destroy process.
443          */
444         if (sec->ports_pkeys) {
445                 port_pkey_list_remove(&sec->ports_pkeys->main);
446                 port_pkey_list_remove(&sec->ports_pkeys->alt);
447         }
448
449         /* If the QP is already in one or more of those lists
450          * the destroying flag will ensure the to error flow
451          * doesn't operate on an undefined QP.
452          */
453         sec->destroying = true;
454
455         /* Record the error list count to know how many completions
456          * to wait for.
457          */
458         sec->error_comps_pending = atomic_read(&sec->error_list_count);
459
460         mutex_unlock(&sec->mutex);
461 }
462
463 void ib_destroy_qp_security_abort(struct ib_qp_security *sec)
464 {
465         int ret;
466         int i;
467
468         /* If a concurrent cache update is in progress this
469          * QP security could be marked for an error state
470          * transition.  Wait for this to complete.
471          */
472         for (i = 0; i < sec->error_comps_pending; i++)
473                 wait_for_completion(&sec->error_complete);
474
475         mutex_lock(&sec->mutex);
476         sec->destroying = false;
477
478         /* Restore the position in the lists and verify
479          * access is still allowed in case a cache update
480          * occurred while attempting to destroy.
481          *
482          * Because these setting were listed already
483          * and removed during ib_destroy_qp_security_begin
484          * we know the pkey_index_qp_list for the PKey
485          * already exists so port_pkey_list_insert won't fail.
486          */
487         if (sec->ports_pkeys) {
488                 port_pkey_list_insert(&sec->ports_pkeys->main);
489                 port_pkey_list_insert(&sec->ports_pkeys->alt);
490         }
491
492         ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec);
493         if (ret)
494                 qp_to_error(sec);
495
496         mutex_unlock(&sec->mutex);
497 }
498
499 void ib_destroy_qp_security_end(struct ib_qp_security *sec)
500 {
501         int i;
502
503         /* If a concurrent cache update is occurring we must
504          * wait until this QP security structure is processed
505          * in the QP to error flow before destroying it because
506          * the to_error_list is in use.
507          */
508         for (i = 0; i < sec->error_comps_pending; i++)
509                 wait_for_completion(&sec->error_complete);
510
511         destroy_qp_security(sec);
512 }
513
514 void ib_security_cache_change(struct ib_device *device,
515                               u8 port_num,
516                               u64 subnet_prefix)
517 {
518         struct pkey_index_qp_list *pkey;
519
520         list_for_each_entry(pkey,
521                             &device->port_pkey_list[port_num].pkey_list,
522                             pkey_index_list) {
523                 check_pkey_qps(pkey,
524                                device,
525                                port_num,
526                                subnet_prefix);
527         }
528 }
529
530 void ib_security_destroy_port_pkey_list(struct ib_device *device)
531 {
532         struct pkey_index_qp_list *pkey, *tmp_pkey;
533         int i;
534
535         for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
536                 spin_lock(&device->port_pkey_list[i].list_lock);
537                 list_for_each_entry_safe(pkey,
538                                          tmp_pkey,
539                                          &device->port_pkey_list[i].pkey_list,
540                                          pkey_index_list) {
541                         list_del(&pkey->pkey_index_list);
542                         kfree(pkey);
543                 }
544                 spin_unlock(&device->port_pkey_list[i].list_lock);
545         }
546 }
547
548 int ib_security_modify_qp(struct ib_qp *qp,
549                           struct ib_qp_attr *qp_attr,
550                           int qp_attr_mask,
551                           struct ib_udata *udata)
552 {
553         int ret = 0;
554         struct ib_ports_pkeys *tmp_pps;
555         struct ib_ports_pkeys *new_pps;
556         bool special_qp = (qp->qp_type == IB_QPT_SMI ||
557                            qp->qp_type == IB_QPT_GSI ||
558                            qp->qp_type >= IB_QPT_RESERVED1);
559         bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) ||
560                            (qp_attr_mask & IB_QP_ALT_PATH));
561
562         if (pps_change && !special_qp) {
563                 mutex_lock(&qp->qp_sec->mutex);
564                 new_pps = get_new_pps(qp,
565                                       qp_attr,
566                                       qp_attr_mask);
567
568                 /* Add this QP to the lists for the new port
569                  * and pkey settings before checking for permission
570                  * in case there is a concurrent cache update
571                  * occurring.  Walking the list for a cache change
572                  * doesn't acquire the security mutex unless it's
573                  * sending the QP to error.
574                  */
575                 ret = port_pkey_list_insert(&new_pps->main);
576
577                 if (!ret)
578                         ret = port_pkey_list_insert(&new_pps->alt);
579
580                 if (!ret)
581                         ret = check_qp_port_pkey_settings(new_pps,
582                                                           qp->qp_sec);
583         }
584
585         if (!ret)
586                 ret = qp->device->modify_qp(qp->real_qp,
587                                             qp_attr,
588                                             qp_attr_mask,
589                                             udata);
590
591         if (pps_change && !special_qp) {
592                 /* Clean up the lists and free the appropriate
593                  * ports_pkeys structure.
594                  */
595                 if (ret) {
596                         tmp_pps = new_pps;
597                 } else {
598                         tmp_pps = qp->qp_sec->ports_pkeys;
599                         qp->qp_sec->ports_pkeys = new_pps;
600                 }
601
602                 if (tmp_pps) {
603                         port_pkey_list_remove(&tmp_pps->main);
604                         port_pkey_list_remove(&tmp_pps->alt);
605                 }
606                 kfree(tmp_pps);
607                 mutex_unlock(&qp->qp_sec->mutex);
608         }
609         return ret;
610 }
611 EXPORT_SYMBOL(ib_security_modify_qp);
612
613 #endif /* CONFIG_SECURITY_INFINIBAND */