2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
50 #include <linux/interrupt.h>
57 struct hfi1_affinity_node_list node_affinity = {
58 .list = LIST_HEAD_INIT(node_affinity.list),
59 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
62 /* Name of IRQ types, indexed by enum irq_type */
63 static const char * const irq_type_names[] = {
70 /* Per NUMA node count of HFI devices */
71 static unsigned int *hfi1_per_node_cntr;
73 static inline void init_cpu_mask_set(struct cpu_mask_set *set)
75 cpumask_clear(&set->mask);
76 cpumask_clear(&set->used);
80 /* Increment generation of CPU set if needed */
81 static void _cpu_mask_set_gen_inc(struct cpu_mask_set *set)
83 if (cpumask_equal(&set->mask, &set->used)) {
85 * We've used up all the CPUs, bump up the generation
86 * and reset the 'used' map
89 cpumask_clear(&set->used);
93 static void _cpu_mask_set_gen_dec(struct cpu_mask_set *set)
95 if (cpumask_empty(&set->used) && set->gen) {
97 cpumask_copy(&set->used, &set->mask);
101 /* Get the first CPU from the list of unused CPUs in a CPU set data structure */
102 static int cpu_mask_set_get_first(struct cpu_mask_set *set, cpumask_var_t diff)
109 _cpu_mask_set_gen_inc(set);
111 /* Find out CPUs left in CPU mask */
112 cpumask_andnot(diff, &set->mask, &set->used);
114 cpu = cpumask_first(diff);
115 if (cpu >= nr_cpu_ids) /* empty */
118 cpumask_set_cpu(cpu, &set->used);
123 static void cpu_mask_set_put(struct cpu_mask_set *set, int cpu)
128 cpumask_clear_cpu(cpu, &set->used);
129 _cpu_mask_set_gen_dec(set);
132 /* Initialize non-HT cpu cores mask */
133 void init_real_cpu_mask(void)
135 int possible, curr_cpu, i, ht;
137 cpumask_clear(&node_affinity.real_cpu_mask);
139 /* Start with cpu online mask as the real cpu mask */
140 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
143 * Remove HT cores from the real cpu mask. Do this in two steps below.
145 possible = cpumask_weight(&node_affinity.real_cpu_mask);
146 ht = cpumask_weight(topology_sibling_cpumask(
147 cpumask_first(&node_affinity.real_cpu_mask)));
149 * Step 1. Skip over the first N HT siblings and use them as the
150 * "real" cores. Assumes that HT cores are not enumerated in
151 * succession (except in the single core case).
153 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
154 for (i = 0; i < possible / ht; i++)
155 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
157 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
160 for (; i < possible; i++) {
161 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
162 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
166 int node_affinity_init(void)
169 struct pci_dev *dev = NULL;
170 const struct pci_device_id *ids = hfi1_pci_tbl;
172 cpumask_clear(&node_affinity.proc.used);
173 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
175 node_affinity.proc.gen = 0;
176 node_affinity.num_core_siblings =
177 cpumask_weight(topology_sibling_cpumask(
178 cpumask_first(&node_affinity.proc.mask)
180 node_affinity.num_possible_nodes = num_possible_nodes();
181 node_affinity.num_online_nodes = num_online_nodes();
182 node_affinity.num_online_cpus = num_online_cpus();
185 * The real cpu mask is part of the affinity struct but it has to be
186 * initialized early. It is needed to calculate the number of user
187 * contexts in set_up_context_variables().
189 init_real_cpu_mask();
191 hfi1_per_node_cntr = kcalloc(node_affinity.num_possible_nodes,
192 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
193 if (!hfi1_per_node_cntr)
196 while (ids->vendor) {
198 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
199 node = pcibus_to_node(dev->bus);
203 hfi1_per_node_cntr[node]++;
212 * Invalid PCI NUMA node information found, note it, and populate
215 pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
216 pr_err("HFI: System BIOS may need to be upgraded\n");
217 for (node = 0; node < node_affinity.num_possible_nodes; node++)
218 hfi1_per_node_cntr[node] = 1;
223 static void node_affinity_destroy(struct hfi1_affinity_node *entry)
225 free_percpu(entry->comp_vect_affinity);
229 void node_affinity_destroy_all(void)
231 struct list_head *pos, *q;
232 struct hfi1_affinity_node *entry;
234 mutex_lock(&node_affinity.lock);
235 list_for_each_safe(pos, q, &node_affinity.list) {
236 entry = list_entry(pos, struct hfi1_affinity_node,
239 node_affinity_destroy(entry);
241 mutex_unlock(&node_affinity.lock);
242 kfree(hfi1_per_node_cntr);
245 static struct hfi1_affinity_node *node_affinity_allocate(int node)
247 struct hfi1_affinity_node *entry;
249 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
253 entry->comp_vect_affinity = alloc_percpu(u16);
254 INIT_LIST_HEAD(&entry->list);
260 * It appends an entry to the list.
261 * It *must* be called with node_affinity.lock held.
263 static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
265 list_add_tail(&entry->list, &node_affinity.list);
268 /* It must be called with node_affinity.lock held */
269 static struct hfi1_affinity_node *node_affinity_lookup(int node)
271 struct list_head *pos;
272 struct hfi1_affinity_node *entry;
274 list_for_each(pos, &node_affinity.list) {
275 entry = list_entry(pos, struct hfi1_affinity_node, list);
276 if (entry->node == node)
283 static int per_cpu_affinity_get(cpumask_var_t possible_cpumask,
284 u16 __percpu *comp_vect_affinity)
291 if (!possible_cpumask) {
296 if (!comp_vect_affinity) {
301 ret_cpu = cpumask_first(possible_cpumask);
302 if (ret_cpu >= nr_cpu_ids) {
307 prev_cntr = *per_cpu_ptr(comp_vect_affinity, ret_cpu);
308 for_each_cpu(curr_cpu, possible_cpumask) {
309 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
311 if (cntr < prev_cntr) {
317 *per_cpu_ptr(comp_vect_affinity, ret_cpu) += 1;
323 static int per_cpu_affinity_put_max(cpumask_var_t possible_cpumask,
324 u16 __percpu *comp_vect_affinity)
331 if (!possible_cpumask)
334 if (!comp_vect_affinity)
337 max_cpu = cpumask_first(possible_cpumask);
338 if (max_cpu >= nr_cpu_ids)
341 prev_cntr = *per_cpu_ptr(comp_vect_affinity, max_cpu);
342 for_each_cpu(curr_cpu, possible_cpumask) {
343 cntr = *per_cpu_ptr(comp_vect_affinity, curr_cpu);
345 if (cntr > prev_cntr) {
351 *per_cpu_ptr(comp_vect_affinity, max_cpu) -= 1;
357 * Non-interrupt CPUs are used first, then interrupt CPUs.
358 * Two already allocated cpu masks must be passed.
360 static int _dev_comp_vect_cpu_get(struct hfi1_devdata *dd,
361 struct hfi1_affinity_node *entry,
362 cpumask_var_t non_intr_cpus,
363 cpumask_var_t available_cpus)
364 __must_hold(&node_affinity.lock)
367 struct cpu_mask_set *set = dd->comp_vect;
369 lockdep_assert_held(&node_affinity.lock);
370 if (!non_intr_cpus) {
375 if (!available_cpus) {
380 /* Available CPUs for pinning completion vectors */
381 _cpu_mask_set_gen_inc(set);
382 cpumask_andnot(available_cpus, &set->mask, &set->used);
384 /* Available CPUs without SDMA engine interrupts */
385 cpumask_andnot(non_intr_cpus, available_cpus,
386 &entry->def_intr.used);
388 /* If there are non-interrupt CPUs available, use them first */
389 if (!cpumask_empty(non_intr_cpus))
390 cpu = cpumask_first(non_intr_cpus);
391 else /* Otherwise, use interrupt CPUs */
392 cpu = cpumask_first(available_cpus);
394 if (cpu >= nr_cpu_ids) { /* empty */
398 cpumask_set_cpu(cpu, &set->used);
404 static void _dev_comp_vect_cpu_put(struct hfi1_devdata *dd, int cpu)
406 struct cpu_mask_set *set = dd->comp_vect;
411 cpu_mask_set_put(set, cpu);
414 /* _dev_comp_vect_mappings_destroy() is reentrant */
415 static void _dev_comp_vect_mappings_destroy(struct hfi1_devdata *dd)
419 if (!dd->comp_vect_mappings)
422 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
423 cpu = dd->comp_vect_mappings[i];
424 _dev_comp_vect_cpu_put(dd, cpu);
425 dd->comp_vect_mappings[i] = -1;
427 "[%s] Release CPU %d from completion vector %d",
428 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), cpu, i);
431 kfree(dd->comp_vect_mappings);
432 dd->comp_vect_mappings = NULL;
436 * This function creates the table for looking up CPUs for completion vectors.
437 * num_comp_vectors needs to have been initilized before calling this function.
439 static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
440 struct hfi1_affinity_node *entry)
441 __must_hold(&node_affinity.lock)
444 cpumask_var_t non_intr_cpus;
445 cpumask_var_t available_cpus;
447 lockdep_assert_held(&node_affinity.lock);
449 if (!zalloc_cpumask_var(&non_intr_cpus, GFP_KERNEL))
452 if (!zalloc_cpumask_var(&available_cpus, GFP_KERNEL)) {
453 free_cpumask_var(non_intr_cpus);
457 dd->comp_vect_mappings = kcalloc(dd->comp_vect_possible_cpus,
458 sizeof(*dd->comp_vect_mappings),
460 if (!dd->comp_vect_mappings) {
464 for (i = 0; i < dd->comp_vect_possible_cpus; i++)
465 dd->comp_vect_mappings[i] = -1;
467 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
468 cpu = _dev_comp_vect_cpu_get(dd, entry, non_intr_cpus,
475 dd->comp_vect_mappings[i] = cpu;
477 "[%s] Completion Vector %d -> CPU %d",
478 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
484 free_cpumask_var(available_cpus);
485 free_cpumask_var(non_intr_cpus);
486 _dev_comp_vect_mappings_destroy(dd);
491 int hfi1_comp_vectors_set_up(struct hfi1_devdata *dd)
494 struct hfi1_affinity_node *entry;
496 mutex_lock(&node_affinity.lock);
497 entry = node_affinity_lookup(dd->node);
502 ret = _dev_comp_vect_mappings_create(dd, entry);
504 mutex_unlock(&node_affinity.lock);
509 void hfi1_comp_vectors_clean_up(struct hfi1_devdata *dd)
511 _dev_comp_vect_mappings_destroy(dd);
514 int hfi1_comp_vect_mappings_lookup(struct rvt_dev_info *rdi, int comp_vect)
516 struct hfi1_ibdev *verbs_dev = dev_from_rdi(rdi);
517 struct hfi1_devdata *dd = dd_from_dev(verbs_dev);
519 if (!dd->comp_vect_mappings)
521 if (comp_vect >= dd->comp_vect_possible_cpus)
524 return dd->comp_vect_mappings[comp_vect];
528 * It assumes dd->comp_vect_possible_cpus is available.
530 static int _dev_comp_vect_cpu_mask_init(struct hfi1_devdata *dd,
531 struct hfi1_affinity_node *entry,
533 __must_hold(&node_affinity.lock)
536 int possible_cpus_comp_vect = 0;
537 struct cpumask *dev_comp_vect_mask = &dd->comp_vect->mask;
539 lockdep_assert_held(&node_affinity.lock);
541 * If there's only one CPU available for completion vectors, then
542 * there will only be one completion vector available. Othewise,
543 * the number of completion vector available will be the number of
544 * available CPUs divide it by the number of devices in the
547 if (cpumask_weight(&entry->comp_vect_mask) == 1) {
548 possible_cpus_comp_vect = 1;
550 "Number of kernel receive queues is too large for completion vector affinity to be effective\n");
552 possible_cpus_comp_vect +=
553 cpumask_weight(&entry->comp_vect_mask) /
554 hfi1_per_node_cntr[dd->node];
557 * If the completion vector CPUs available doesn't divide
558 * evenly among devices, then the first device device to be
559 * initialized gets an extra CPU.
561 if (first_dev_init &&
562 cpumask_weight(&entry->comp_vect_mask) %
563 hfi1_per_node_cntr[dd->node] != 0)
564 possible_cpus_comp_vect++;
567 dd->comp_vect_possible_cpus = possible_cpus_comp_vect;
569 /* Reserving CPUs for device completion vector */
570 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
571 curr_cpu = per_cpu_affinity_get(&entry->comp_vect_mask,
572 entry->comp_vect_affinity);
576 cpumask_set_cpu(curr_cpu, dev_comp_vect_mask);
580 "[%s] Completion vector affinity CPU set(s) %*pbl",
581 rvt_get_ibdev_name(&(dd)->verbs_dev.rdi),
582 cpumask_pr_args(dev_comp_vect_mask));
587 for (j = 0; j < i; j++)
588 per_cpu_affinity_put_max(&entry->comp_vect_mask,
589 entry->comp_vect_affinity);
595 * It assumes dd->comp_vect_possible_cpus is available.
597 static void _dev_comp_vect_cpu_mask_clean_up(struct hfi1_devdata *dd,
598 struct hfi1_affinity_node *entry)
599 __must_hold(&node_affinity.lock)
603 lockdep_assert_held(&node_affinity.lock);
604 if (!dd->comp_vect_possible_cpus)
607 for (i = 0; i < dd->comp_vect_possible_cpus; i++) {
608 cpu = per_cpu_affinity_put_max(&dd->comp_vect->mask,
609 entry->comp_vect_affinity);
610 /* Clearing CPU in device completion vector cpu mask */
612 cpumask_clear_cpu(cpu, &dd->comp_vect->mask);
615 dd->comp_vect_possible_cpus = 0;
619 * Interrupt affinity.
621 * non-rcv avail gets a default mask that
622 * starts as possible cpus with threads reset
623 * and each rcv avail reset.
625 * rcv avail gets node relative 1 wrapping back
626 * to the node relative 1 as necessary.
629 int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
631 int node = pcibus_to_node(dd->pcidev->bus);
632 struct hfi1_affinity_node *entry;
633 const struct cpumask *local_mask;
634 int curr_cpu, possible, i, ret;
635 bool new_entry = false;
638 * If the BIOS does not have the NUMA node information set, select
639 * NUMA 0 so we get consistent performance.
642 dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
647 local_mask = cpumask_of_node(dd->node);
648 if (cpumask_first(local_mask) >= nr_cpu_ids)
649 local_mask = topology_core_cpumask(0);
651 mutex_lock(&node_affinity.lock);
652 entry = node_affinity_lookup(dd->node);
655 * If this is the first time this NUMA node's affinity is used,
656 * create an entry in the global affinity structure and initialize it.
659 entry = node_affinity_allocate(node);
662 "Unable to allocate global affinity node\n");
668 init_cpu_mask_set(&entry->def_intr);
669 init_cpu_mask_set(&entry->rcv_intr);
670 cpumask_clear(&entry->comp_vect_mask);
671 cpumask_clear(&entry->general_intr_mask);
672 /* Use the "real" cpu mask of this node as the default */
673 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
676 /* fill in the receive list */
677 possible = cpumask_weight(&entry->def_intr.mask);
678 curr_cpu = cpumask_first(&entry->def_intr.mask);
681 /* only one CPU, everyone will use it */
682 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
683 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
686 * The general/control context will be the first CPU in
687 * the default list, so it is removed from the default
688 * list and added to the general interrupt list.
690 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
691 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
692 curr_cpu = cpumask_next(curr_cpu,
693 &entry->def_intr.mask);
696 * Remove the remaining kernel receive queues from
697 * the default list and add them to the receive list.
700 i < (dd->n_krcv_queues - 1) *
701 hfi1_per_node_cntr[dd->node];
703 cpumask_clear_cpu(curr_cpu,
704 &entry->def_intr.mask);
705 cpumask_set_cpu(curr_cpu,
706 &entry->rcv_intr.mask);
707 curr_cpu = cpumask_next(curr_cpu,
708 &entry->def_intr.mask);
709 if (curr_cpu >= nr_cpu_ids)
714 * If there ends up being 0 CPU cores leftover for SDMA
715 * engines, use the same CPU cores as general/control
718 if (cpumask_weight(&entry->def_intr.mask) == 0)
719 cpumask_copy(&entry->def_intr.mask,
720 &entry->general_intr_mask);
723 /* Determine completion vector CPUs for the entire node */
724 cpumask_and(&entry->comp_vect_mask,
725 &node_affinity.real_cpu_mask, local_mask);
726 cpumask_andnot(&entry->comp_vect_mask,
727 &entry->comp_vect_mask,
728 &entry->rcv_intr.mask);
729 cpumask_andnot(&entry->comp_vect_mask,
730 &entry->comp_vect_mask,
731 &entry->general_intr_mask);
734 * If there ends up being 0 CPU cores leftover for completion
735 * vectors, use the same CPU core as the general/control
738 if (cpumask_weight(&entry->comp_vect_mask) == 0)
739 cpumask_copy(&entry->comp_vect_mask,
740 &entry->general_intr_mask);
743 ret = _dev_comp_vect_cpu_mask_init(dd, entry, new_entry);
748 node_affinity_add_tail(entry);
750 mutex_unlock(&node_affinity.lock);
756 node_affinity_destroy(entry);
757 mutex_unlock(&node_affinity.lock);
761 void hfi1_dev_affinity_clean_up(struct hfi1_devdata *dd)
763 struct hfi1_affinity_node *entry;
768 mutex_lock(&node_affinity.lock);
769 entry = node_affinity_lookup(dd->node);
774 * Free device completion vector CPUs to be used by future
777 _dev_comp_vect_cpu_mask_clean_up(dd, entry);
779 mutex_unlock(&node_affinity.lock);
784 * Function updates the irq affinity hint for msix after it has been changed
785 * by the user using the /proc/irq interface. This function only accepts
786 * one cpu in the mask.
788 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry *msix, int cpu)
790 struct sdma_engine *sde = msix->arg;
791 struct hfi1_devdata *dd = sde->dd;
792 struct hfi1_affinity_node *entry;
793 struct cpu_mask_set *set;
796 if (cpu > num_online_cpus() || cpu == sde->cpu)
799 mutex_lock(&node_affinity.lock);
800 entry = node_affinity_lookup(dd->node);
806 cpumask_clear(&msix->mask);
807 cpumask_set_cpu(cpu, &msix->mask);
808 dd_dev_dbg(dd, "IRQ: %u, type %s engine %u -> cpu: %d\n",
809 msix->irq, irq_type_names[msix->type],
811 irq_set_affinity_hint(msix->irq, &msix->mask);
814 * Set the new cpu in the hfi1_affinity_node and clean
815 * the old cpu if it is not used by any other IRQ
817 set = &entry->def_intr;
818 cpumask_set_cpu(cpu, &set->mask);
819 cpumask_set_cpu(cpu, &set->used);
820 for (i = 0; i < dd->num_msix_entries; i++) {
821 struct hfi1_msix_entry *other_msix;
823 other_msix = &dd->msix_entries[i];
824 if (other_msix->type != IRQ_SDMA || other_msix == msix)
827 if (cpumask_test_cpu(old_cpu, &other_msix->mask))
830 cpumask_clear_cpu(old_cpu, &set->mask);
831 cpumask_clear_cpu(old_cpu, &set->used);
833 mutex_unlock(&node_affinity.lock);
836 static void hfi1_irq_notifier_notify(struct irq_affinity_notify *notify,
837 const cpumask_t *mask)
839 int cpu = cpumask_first(mask);
840 struct hfi1_msix_entry *msix = container_of(notify,
841 struct hfi1_msix_entry,
844 /* Only one CPU configuration supported currently */
845 hfi1_update_sdma_affinity(msix, cpu);
848 static void hfi1_irq_notifier_release(struct kref *ref)
851 * This is required by affinity notifier. We don't have anything to
856 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry *msix)
858 struct irq_affinity_notify *notify = &msix->notify;
860 notify->irq = msix->irq;
861 notify->notify = hfi1_irq_notifier_notify;
862 notify->release = hfi1_irq_notifier_release;
864 if (irq_set_affinity_notifier(notify->irq, notify))
865 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
869 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
871 struct irq_affinity_notify *notify = &msix->notify;
873 if (irq_set_affinity_notifier(notify->irq, NULL))
874 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
879 * Function sets the irq affinity for msix.
880 * It *must* be called with node_affinity.lock held.
882 static int get_irq_affinity(struct hfi1_devdata *dd,
883 struct hfi1_msix_entry *msix)
886 struct hfi1_affinity_node *entry;
887 struct cpu_mask_set *set = NULL;
888 struct sdma_engine *sde = NULL;
889 struct hfi1_ctxtdata *rcd = NULL;
894 cpumask_clear(&msix->mask);
896 entry = node_affinity_lookup(dd->node);
898 switch (msix->type) {
900 sde = (struct sdma_engine *)msix->arg;
901 scnprintf(extra, 64, "engine %u", sde->this_idx);
902 set = &entry->def_intr;
905 cpu = cpumask_first(&entry->general_intr_mask);
908 rcd = (struct hfi1_ctxtdata *)msix->arg;
909 if (rcd->ctxt == HFI1_CTRL_CTXT)
910 cpu = cpumask_first(&entry->general_intr_mask);
912 set = &entry->rcv_intr;
913 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
916 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
921 * The general and control contexts are placed on a particular
922 * CPU, which is set above. Skip accounting for it. Everything else
923 * finds its CPU here.
925 if (cpu == -1 && set) {
926 if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
929 cpu = cpu_mask_set_get_first(set, diff);
931 free_cpumask_var(diff);
932 dd_dev_err(dd, "Failure to obtain CPU for IRQ\n");
936 free_cpumask_var(diff);
939 cpumask_set_cpu(cpu, &msix->mask);
940 dd_dev_info(dd, "IRQ: %u, type %s %s -> cpu: %d\n",
941 msix->irq, irq_type_names[msix->type],
943 irq_set_affinity_hint(msix->irq, &msix->mask);
945 if (msix->type == IRQ_SDMA) {
947 hfi1_setup_sdma_notifier(msix);
953 int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
957 mutex_lock(&node_affinity.lock);
958 ret = get_irq_affinity(dd, msix);
959 mutex_unlock(&node_affinity.lock);
963 void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
964 struct hfi1_msix_entry *msix)
966 struct cpu_mask_set *set = NULL;
967 struct hfi1_ctxtdata *rcd;
968 struct hfi1_affinity_node *entry;
970 mutex_lock(&node_affinity.lock);
971 entry = node_affinity_lookup(dd->node);
973 switch (msix->type) {
975 set = &entry->def_intr;
976 hfi1_cleanup_sdma_notifier(msix);
979 /* Don't do accounting for general contexts */
982 rcd = (struct hfi1_ctxtdata *)msix->arg;
983 /* Don't do accounting for control contexts */
984 if (rcd->ctxt != HFI1_CTRL_CTXT)
985 set = &entry->rcv_intr;
988 mutex_unlock(&node_affinity.lock);
993 cpumask_andnot(&set->used, &set->used, &msix->mask);
994 _cpu_mask_set_gen_dec(set);
997 irq_set_affinity_hint(msix->irq, NULL);
998 cpumask_clear(&msix->mask);
999 mutex_unlock(&node_affinity.lock);
1002 /* This should be called with node_affinity.lock held */
1003 static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
1004 struct hfi1_affinity_node_list *affinity)
1006 int possible, curr_cpu, i;
1007 uint num_cores_per_socket = node_affinity.num_online_cpus /
1008 affinity->num_core_siblings /
1009 node_affinity.num_online_nodes;
1011 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
1012 if (affinity->num_core_siblings > 0) {
1013 /* Removing other siblings not needed for now */
1014 possible = cpumask_weight(hw_thread_mask);
1015 curr_cpu = cpumask_first(hw_thread_mask);
1017 i < num_cores_per_socket * node_affinity.num_online_nodes;
1019 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1021 for (; i < possible; i++) {
1022 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
1023 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
1026 /* Identifying correct HW threads within physical cores */
1027 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
1028 num_cores_per_socket *
1029 node_affinity.num_online_nodes *
1034 int hfi1_get_proc_affinity(int node)
1036 int cpu = -1, ret, i;
1037 struct hfi1_affinity_node *entry;
1038 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
1039 const struct cpumask *node_mask,
1040 *proc_mask = ¤t->cpus_allowed;
1041 struct hfi1_affinity_node_list *affinity = &node_affinity;
1042 struct cpu_mask_set *set = &affinity->proc;
1045 * check whether process/context affinity has already
1048 if (cpumask_weight(proc_mask) == 1) {
1049 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
1050 current->pid, current->comm,
1051 cpumask_pr_args(proc_mask));
1053 * Mark the pre-set CPU as used. This is atomic so we don't
1056 cpu = cpumask_first(proc_mask);
1057 cpumask_set_cpu(cpu, &set->used);
1059 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
1060 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
1061 current->pid, current->comm,
1062 cpumask_pr_args(proc_mask));
1067 * The process does not have a preset CPU affinity so find one to
1068 * recommend using the following algorithm:
1070 * For each user process that is opening a context on HFI Y:
1071 * a) If all cores are filled, reinitialize the bitmask
1072 * b) Fill real cores first, then HT cores (First set of HT
1073 * cores on all physical cores, then second set of HT core,
1074 * and, so on) in the following order:
1076 * 1. Same NUMA node as HFI Y and not running an IRQ
1078 * 2. Same NUMA node as HFI Y and running an IRQ handler
1079 * 3. Different NUMA node to HFI Y and not running an IRQ
1081 * 4. Different NUMA node to HFI Y and running an IRQ
1083 * c) Mark core as filled in the bitmask. As user processes are
1084 * done, clear cores from the bitmask.
1087 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
1090 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
1093 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
1095 goto free_hw_thread_mask;
1096 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
1098 goto free_available_mask;
1100 mutex_lock(&affinity->lock);
1102 * If we've used all available HW threads, clear the mask and start
1105 _cpu_mask_set_gen_inc(set);
1108 * If NUMA node has CPUs used by interrupt handlers, include them in the
1109 * interrupt handler mask.
1111 entry = node_affinity_lookup(node);
1113 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
1114 &entry->def_intr.mask :
1115 &entry->def_intr.used));
1116 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
1117 &entry->rcv_intr.mask :
1118 &entry->rcv_intr.used));
1119 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
1121 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
1122 cpumask_pr_args(intrs_mask));
1124 cpumask_copy(hw_thread_mask, &set->mask);
1127 * If HT cores are enabled, identify which HW threads within the
1128 * physical cores should be used.
1130 if (affinity->num_core_siblings > 0) {
1131 for (i = 0; i < affinity->num_core_siblings; i++) {
1132 find_hw_thread_mask(i, hw_thread_mask, affinity);
1135 * If there's at least one available core for this HW
1136 * thread number, stop looking for a core.
1138 * diff will always be not empty at least once in this
1139 * loop as the used mask gets reset when
1140 * (set->mask == set->used) before this loop.
1142 cpumask_andnot(diff, hw_thread_mask, &set->used);
1143 if (!cpumask_empty(diff))
1147 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
1148 cpumask_pr_args(hw_thread_mask));
1150 node_mask = cpumask_of_node(node);
1151 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
1152 cpumask_pr_args(node_mask));
1154 /* Get cpumask of available CPUs on preferred NUMA */
1155 cpumask_and(available_mask, hw_thread_mask, node_mask);
1156 cpumask_andnot(available_mask, available_mask, &set->used);
1157 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
1158 cpumask_pr_args(available_mask));
1161 * At first, we don't want to place processes on the same
1162 * CPUs as interrupt handlers. Then, CPUs running interrupt
1163 * handlers are used.
1165 * 1) If diff is not empty, then there are CPUs not running
1166 * non-interrupt handlers available, so diff gets copied
1167 * over to available_mask.
1168 * 2) If diff is empty, then all CPUs not running interrupt
1169 * handlers are taken, so available_mask contains all
1170 * available CPUs running interrupt handlers.
1171 * 3) If available_mask is empty, then all CPUs on the
1172 * preferred NUMA node are taken, so other NUMA nodes are
1173 * used for process assignments using the same method as
1174 * the preferred NUMA node.
1176 cpumask_andnot(diff, available_mask, intrs_mask);
1177 if (!cpumask_empty(diff))
1178 cpumask_copy(available_mask, diff);
1180 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
1181 if (cpumask_empty(available_mask)) {
1182 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
1183 /* Excluding preferred NUMA cores */
1184 cpumask_andnot(available_mask, available_mask, node_mask);
1186 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
1187 cpumask_pr_args(available_mask));
1190 * At first, we don't want to place processes on the same
1191 * CPUs as interrupt handlers.
1193 cpumask_andnot(diff, available_mask, intrs_mask);
1194 if (!cpumask_empty(diff))
1195 cpumask_copy(available_mask, diff);
1197 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
1198 cpumask_pr_args(available_mask));
1200 cpu = cpumask_first(available_mask);
1201 if (cpu >= nr_cpu_ids) /* empty */
1204 cpumask_set_cpu(cpu, &set->used);
1206 mutex_unlock(&affinity->lock);
1207 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
1209 free_cpumask_var(intrs_mask);
1210 free_available_mask:
1211 free_cpumask_var(available_mask);
1212 free_hw_thread_mask:
1213 free_cpumask_var(hw_thread_mask);
1215 free_cpumask_var(diff);
1220 void hfi1_put_proc_affinity(int cpu)
1222 struct hfi1_affinity_node_list *affinity = &node_affinity;
1223 struct cpu_mask_set *set = &affinity->proc;
1228 mutex_lock(&affinity->lock);
1229 cpu_mask_set_put(set, cpu);
1230 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
1231 mutex_unlock(&affinity->lock);