blob: 17c805ab8b3b313ecb2cf82ff17dc884f503c9ff [file] [log] [blame]
Mitko Haralanov957558c2016-02-03 14:33:40 -08001/*
Jubin John05d6ac12016-02-14 20:22:17 -08002 * Copyright(c) 2015, 2016 Intel Corporation.
Mitko Haralanov957558c2016-02-03 14:33:40 -08003 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
Mitko Haralanov957558c2016-02-03 14:33:40 -08009 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
Mitko Haralanov957558c2016-02-03 14:33:40 -080020 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/topology.h>
48#include <linux/cpumask.h>
49#include <linux/module.h>
50
51#include "hfi.h"
52#include "affinity.h"
53#include "sdma.h"
54#include "trace.h"
55
Dennis Dalessandro41973442016-07-25 07:52:36 -070056struct hfi1_affinity_node_list node_affinity = {
57 .list = LIST_HEAD_INIT(node_affinity.list),
Tadeusz Struk584d9572016-09-06 04:36:18 -070058 .lock = __MUTEX_INITIALIZER(node_affinity.lock)
Dennis Dalessandro41973442016-07-25 07:52:36 -070059};
60
Mitko Haralanov957558c2016-02-03 14:33:40 -080061/* Name of IRQ types, indexed by enum irq_type */
62static const char * const irq_type_names[] = {
63 "SDMA",
64 "RCVCTXT",
65 "GENERAL",
66 "OTHER",
67};
68
Sebastian Sanchezd6373012016-07-25 07:54:48 -070069/* Per NUMA node count of HFI devices */
70static unsigned int *hfi1_per_node_cntr;
71
Mitko Haralanov957558c2016-02-03 14:33:40 -080072static inline void init_cpu_mask_set(struct cpu_mask_set *set)
73{
74 cpumask_clear(&set->mask);
75 cpumask_clear(&set->used);
76 set->gen = 0;
77}
78
Jubin John0852d242016-04-12 11:30:08 -070079/* Initialize non-HT cpu cores mask */
Dennis Dalessandro41973442016-07-25 07:52:36 -070080void init_real_cpu_mask(void)
Jubin John0852d242016-04-12 11:30:08 -070081{
Jubin John0852d242016-04-12 11:30:08 -070082 int possible, curr_cpu, i, ht;
83
Dennis Dalessandro41973442016-07-25 07:52:36 -070084 cpumask_clear(&node_affinity.real_cpu_mask);
Jubin John0852d242016-04-12 11:30:08 -070085
86 /* Start with cpu online mask as the real cpu mask */
Dennis Dalessandro41973442016-07-25 07:52:36 -070087 cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask);
Jubin John0852d242016-04-12 11:30:08 -070088
89 /*
90 * Remove HT cores from the real cpu mask. Do this in two steps below.
91 */
Dennis Dalessandro41973442016-07-25 07:52:36 -070092 possible = cpumask_weight(&node_affinity.real_cpu_mask);
Jubin John0852d242016-04-12 11:30:08 -070093 ht = cpumask_weight(topology_sibling_cpumask(
Dennis Dalessandro41973442016-07-25 07:52:36 -070094 cpumask_first(&node_affinity.real_cpu_mask)));
Jubin John0852d242016-04-12 11:30:08 -070095 /*
96 * Step 1. Skip over the first N HT siblings and use them as the
97 * "real" cores. Assumes that HT cores are not enumerated in
98 * succession (except in the single core case).
99 */
Dennis Dalessandro41973442016-07-25 07:52:36 -0700100 curr_cpu = cpumask_first(&node_affinity.real_cpu_mask);
Jubin John0852d242016-04-12 11:30:08 -0700101 for (i = 0; i < possible / ht; i++)
Dennis Dalessandro41973442016-07-25 07:52:36 -0700102 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
Jubin John0852d242016-04-12 11:30:08 -0700103 /*
104 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
105 * skip any gaps.
106 */
107 for (; i < possible; i++) {
Dennis Dalessandro41973442016-07-25 07:52:36 -0700108 cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask);
109 curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask);
110 }
111}
112
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700113int node_affinity_init(void)
Dennis Dalessandro41973442016-07-25 07:52:36 -0700114{
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700115 int node;
116 struct pci_dev *dev = NULL;
117 const struct pci_device_id *ids = hfi1_pci_tbl;
118
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700119 cpumask_clear(&node_affinity.proc.used);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700120 cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700121
122 node_affinity.proc.gen = 0;
123 node_affinity.num_core_siblings =
124 cpumask_weight(topology_sibling_cpumask(
125 cpumask_first(&node_affinity.proc.mask)
126 ));
127 node_affinity.num_online_nodes = num_online_nodes();
128 node_affinity.num_online_cpus = num_online_cpus();
129
Dennis Dalessandro41973442016-07-25 07:52:36 -0700130 /*
131 * The real cpu mask is part of the affinity struct but it has to be
132 * initialized early. It is needed to calculate the number of user
133 * contexts in set_up_context_variables().
134 */
135 init_real_cpu_mask();
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700136
137 hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
138 sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
139 if (!hfi1_per_node_cntr)
140 return -ENOMEM;
141
142 while (ids->vendor) {
143 dev = NULL;
144 while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
145 node = pcibus_to_node(dev->bus);
146 if (node < 0)
147 node = numa_node_id();
148
149 hfi1_per_node_cntr[node]++;
150 }
151 ids++;
152 }
153
154 return 0;
Dennis Dalessandro41973442016-07-25 07:52:36 -0700155}
156
157void node_affinity_destroy(void)
158{
159 struct list_head *pos, *q;
160 struct hfi1_affinity_node *entry;
161
Tadeusz Struk584d9572016-09-06 04:36:18 -0700162 mutex_lock(&node_affinity.lock);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700163 list_for_each_safe(pos, q, &node_affinity.list) {
164 entry = list_entry(pos, struct hfi1_affinity_node,
165 list);
166 list_del(pos);
167 kfree(entry);
168 }
Tadeusz Struk584d9572016-09-06 04:36:18 -0700169 mutex_unlock(&node_affinity.lock);
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700170 kfree(hfi1_per_node_cntr);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700171}
172
173static struct hfi1_affinity_node *node_affinity_allocate(int node)
174{
175 struct hfi1_affinity_node *entry;
176
177 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
178 if (!entry)
179 return NULL;
180 entry->node = node;
181 INIT_LIST_HEAD(&entry->list);
182
183 return entry;
184}
185
186/*
187 * It appends an entry to the list.
188 * It *must* be called with node_affinity.lock held.
189 */
190static void node_affinity_add_tail(struct hfi1_affinity_node *entry)
191{
192 list_add_tail(&entry->list, &node_affinity.list);
193}
194
195/* It must be called with node_affinity.lock held */
196static struct hfi1_affinity_node *node_affinity_lookup(int node)
197{
198 struct list_head *pos;
199 struct hfi1_affinity_node *entry;
200
201 list_for_each(pos, &node_affinity.list) {
202 entry = list_entry(pos, struct hfi1_affinity_node, list);
203 if (entry->node == node)
204 return entry;
Jubin John0852d242016-04-12 11:30:08 -0700205 }
206
Dennis Dalessandro41973442016-07-25 07:52:36 -0700207 return NULL;
Jubin John0852d242016-04-12 11:30:08 -0700208}
209
Mitko Haralanov957558c2016-02-03 14:33:40 -0800210/*
211 * Interrupt affinity.
212 *
213 * non-rcv avail gets a default mask that
214 * starts as possible cpus with threads reset
215 * and each rcv avail reset.
216 *
217 * rcv avail gets node relative 1 wrapping back
218 * to the node relative 1 as necessary.
219 *
220 */
Dennis Dalessandro41973442016-07-25 07:52:36 -0700221int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
Mitko Haralanov957558c2016-02-03 14:33:40 -0800222{
223 int node = pcibus_to_node(dd->pcidev->bus);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700224 struct hfi1_affinity_node *entry;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800225 const struct cpumask *local_mask;
Jubin John0852d242016-04-12 11:30:08 -0700226 int curr_cpu, possible, i;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800227
228 if (node < 0)
229 node = numa_node_id();
230 dd->node = node;
231
Mitko Haralanov957558c2016-02-03 14:33:40 -0800232 local_mask = cpumask_of_node(dd->node);
233 if (cpumask_first(local_mask) >= nr_cpu_ids)
234 local_mask = topology_core_cpumask(0);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800235
Tadeusz Struk584d9572016-09-06 04:36:18 -0700236 mutex_lock(&node_affinity.lock);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700237 entry = node_affinity_lookup(dd->node);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700238
239 /*
240 * If this is the first time this NUMA node's affinity is used,
241 * create an entry in the global affinity structure and initialize it.
242 */
243 if (!entry) {
244 entry = node_affinity_allocate(node);
245 if (!entry) {
246 dd_dev_err(dd,
247 "Unable to allocate global affinity node\n");
Tadeusz Struk584d9572016-09-06 04:36:18 -0700248 mutex_unlock(&node_affinity.lock);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700249 return -ENOMEM;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800250 }
Dennis Dalessandro41973442016-07-25 07:52:36 -0700251 init_cpu_mask_set(&entry->def_intr);
252 init_cpu_mask_set(&entry->rcv_intr);
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700253 cpumask_clear(&entry->general_intr_mask);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700254 /* Use the "real" cpu mask of this node as the default */
255 cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
256 local_mask);
257
258 /* fill in the receive list */
259 possible = cpumask_weight(&entry->def_intr.mask);
260 curr_cpu = cpumask_first(&entry->def_intr.mask);
261
262 if (possible == 1) {
263 /* only one CPU, everyone will use it */
264 cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700265 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700266 } else {
267 /*
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700268 * The general/control context will be the first CPU in
269 * the default list, so it is removed from the default
270 * list and added to the general interrupt list.
Dennis Dalessandro41973442016-07-25 07:52:36 -0700271 */
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700272 cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
273 cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700274 curr_cpu = cpumask_next(curr_cpu,
275 &entry->def_intr.mask);
276
277 /*
278 * Remove the remaining kernel receive queues from
279 * the default list and add them to the receive list.
280 */
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700281 for (i = 0;
282 i < (dd->n_krcv_queues - 1) *
283 hfi1_per_node_cntr[dd->node];
284 i++) {
Dennis Dalessandro41973442016-07-25 07:52:36 -0700285 cpumask_clear_cpu(curr_cpu,
286 &entry->def_intr.mask);
287 cpumask_set_cpu(curr_cpu,
288 &entry->rcv_intr.mask);
289 curr_cpu = cpumask_next(curr_cpu,
290 &entry->def_intr.mask);
291 if (curr_cpu >= nr_cpu_ids)
292 break;
293 }
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700294
295 /*
296 * If there ends up being 0 CPU cores leftover for SDMA
297 * engines, use the same CPU cores as general/control
298 * context.
299 */
300 if (cpumask_weight(&entry->def_intr.mask) == 0)
301 cpumask_copy(&entry->def_intr.mask,
302 &entry->general_intr_mask);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700303 }
304
Dennis Dalessandro41973442016-07-25 07:52:36 -0700305 node_affinity_add_tail(entry);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800306 }
Tadeusz Struk584d9572016-09-06 04:36:18 -0700307 mutex_unlock(&node_affinity.lock);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700308 return 0;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800309}
310
Tadeusz Struk584d9572016-09-06 04:36:18 -0700311/*
312 * Function sets the irq affinity for msix.
313 * It *must* be called with node_affinity.lock held.
314 */
315static int get_irq_affinity(struct hfi1_devdata *dd,
316 struct hfi1_msix_entry *msix)
Mitko Haralanov957558c2016-02-03 14:33:40 -0800317{
318 int ret;
319 cpumask_var_t diff;
Dennis Dalessandro41973442016-07-25 07:52:36 -0700320 struct hfi1_affinity_node *entry;
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700321 struct cpu_mask_set *set = NULL;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800322 struct sdma_engine *sde = NULL;
323 struct hfi1_ctxtdata *rcd = NULL;
324 char extra[64];
325 int cpu = -1;
326
327 extra[0] = '\0';
328 cpumask_clear(&msix->mask);
329
330 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
331 if (!ret)
332 return -ENOMEM;
333
Dennis Dalessandro41973442016-07-25 07:52:36 -0700334 entry = node_affinity_lookup(dd->node);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700335
Mitko Haralanov957558c2016-02-03 14:33:40 -0800336 switch (msix->type) {
337 case IRQ_SDMA:
338 sde = (struct sdma_engine *)msix->arg;
339 scnprintf(extra, 64, "engine %u", sde->this_idx);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700340 set = &entry->def_intr;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800341 break;
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700342 case IRQ_GENERAL:
343 cpu = cpumask_first(&entry->general_intr_mask);
344 break;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800345 case IRQ_RCVCTXT:
346 rcd = (struct hfi1_ctxtdata *)msix->arg;
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700347 if (rcd->ctxt == HFI1_CTRL_CTXT)
348 cpu = cpumask_first(&entry->general_intr_mask);
349 else
Dennis Dalessandro41973442016-07-25 07:52:36 -0700350 set = &entry->rcv_intr;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800351 scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
352 break;
353 default:
354 dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type);
355 return -EINVAL;
356 }
357
358 /*
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700359 * The general and control contexts are placed on a particular
360 * CPU, which is set above. Skip accounting for it. Everything else
361 * finds its CPU here.
Mitko Haralanov957558c2016-02-03 14:33:40 -0800362 */
Dennis Dalessandro41973442016-07-25 07:52:36 -0700363 if (cpu == -1 && set) {
Mitko Haralanov957558c2016-02-03 14:33:40 -0800364 if (cpumask_equal(&set->mask, &set->used)) {
365 /*
366 * We've used up all the CPUs, bump up the generation
367 * and reset the 'used' map
368 */
369 set->gen++;
370 cpumask_clear(&set->used);
371 }
372 cpumask_andnot(diff, &set->mask, &set->used);
373 cpu = cpumask_first(diff);
374 cpumask_set_cpu(cpu, &set->used);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800375 }
376
377 switch (msix->type) {
378 case IRQ_SDMA:
379 sde->cpu = cpu;
380 break;
381 case IRQ_GENERAL:
382 case IRQ_RCVCTXT:
383 case IRQ_OTHER:
384 break;
385 }
386
387 cpumask_set_cpu(cpu, &msix->mask);
388 dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n",
389 msix->msix.vector, irq_type_names[msix->type],
390 extra, cpu);
391 irq_set_affinity_hint(msix->msix.vector, &msix->mask);
392
393 free_cpumask_var(diff);
394 return 0;
395}
396
Tadeusz Struk584d9572016-09-06 04:36:18 -0700397int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
398{
399 int ret;
400
401 mutex_lock(&node_affinity.lock);
402 ret = get_irq_affinity(dd, msix);
403 mutex_unlock(&node_affinity.lock);
404 return ret;
405}
406
Mitko Haralanov957558c2016-02-03 14:33:40 -0800407void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
408 struct hfi1_msix_entry *msix)
409{
410 struct cpu_mask_set *set = NULL;
411 struct hfi1_ctxtdata *rcd;
Dennis Dalessandro41973442016-07-25 07:52:36 -0700412 struct hfi1_affinity_node *entry;
413
Tadeusz Struk584d9572016-09-06 04:36:18 -0700414 mutex_lock(&node_affinity.lock);
Dennis Dalessandro41973442016-07-25 07:52:36 -0700415 entry = node_affinity_lookup(dd->node);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800416
417 switch (msix->type) {
418 case IRQ_SDMA:
Dennis Dalessandro41973442016-07-25 07:52:36 -0700419 set = &entry->def_intr;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800420 break;
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700421 case IRQ_GENERAL:
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700422 /* Don't do accounting for general contexts */
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700423 break;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800424 case IRQ_RCVCTXT:
425 rcd = (struct hfi1_ctxtdata *)msix->arg;
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700426 /* Don't do accounting for control contexts */
Mitko Haralanov957558c2016-02-03 14:33:40 -0800427 if (rcd->ctxt != HFI1_CTRL_CTXT)
Dennis Dalessandro41973442016-07-25 07:52:36 -0700428 set = &entry->rcv_intr;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800429 break;
430 default:
Tadeusz Struk584d9572016-09-06 04:36:18 -0700431 mutex_unlock(&node_affinity.lock);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800432 return;
433 }
434
435 if (set) {
Mitko Haralanov957558c2016-02-03 14:33:40 -0800436 cpumask_andnot(&set->used, &set->used, &msix->mask);
437 if (cpumask_empty(&set->used) && set->gen) {
438 set->gen--;
439 cpumask_copy(&set->used, &set->mask);
440 }
Mitko Haralanov957558c2016-02-03 14:33:40 -0800441 }
442
443 irq_set_affinity_hint(msix->msix.vector, NULL);
444 cpumask_clear(&msix->mask);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700445 mutex_unlock(&node_affinity.lock);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800446}
447
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700448/* This should be called with node_affinity.lock held */
449static void find_hw_thread_mask(uint hw_thread_no, cpumask_var_t hw_thread_mask,
450 struct hfi1_affinity_node_list *affinity)
Mitko Haralanov957558c2016-02-03 14:33:40 -0800451{
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700452 int possible, curr_cpu, i;
453 uint num_cores_per_socket = node_affinity.num_online_cpus /
454 affinity->num_core_siblings /
455 node_affinity.num_online_nodes;
456
457 cpumask_copy(hw_thread_mask, &affinity->proc.mask);
458 if (affinity->num_core_siblings > 0) {
459 /* Removing other siblings not needed for now */
460 possible = cpumask_weight(hw_thread_mask);
461 curr_cpu = cpumask_first(hw_thread_mask);
462 for (i = 0;
463 i < num_cores_per_socket * node_affinity.num_online_nodes;
464 i++)
465 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
466
467 for (; i < possible; i++) {
468 cpumask_clear_cpu(curr_cpu, hw_thread_mask);
469 curr_cpu = cpumask_next(curr_cpu, hw_thread_mask);
470 }
471
472 /* Identifying correct HW threads within physical cores */
473 cpumask_shift_left(hw_thread_mask, hw_thread_mask,
474 num_cores_per_socket *
475 node_affinity.num_online_nodes *
476 hw_thread_no);
477 }
478}
479
480int hfi1_get_proc_affinity(int node)
481{
482 int cpu = -1, ret, i;
Dennis Dalessandro41973442016-07-25 07:52:36 -0700483 struct hfi1_affinity_node *entry;
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700484 cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800485 const struct cpumask *node_mask,
486 *proc_mask = tsk_cpus_allowed(current);
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700487 struct hfi1_affinity_node_list *affinity = &node_affinity;
488 struct cpu_mask_set *set = &affinity->proc;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800489
490 /*
491 * check whether process/context affinity has already
492 * been set
493 */
494 if (cpumask_weight(proc_mask) == 1) {
Leon Romanovskyf242d93a2016-05-31 10:54:36 +0300495 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
496 current->pid, current->comm,
497 cpumask_pr_args(proc_mask));
Mitko Haralanov957558c2016-02-03 14:33:40 -0800498 /*
499 * Mark the pre-set CPU as used. This is atomic so we don't
500 * need the lock
501 */
502 cpu = cpumask_first(proc_mask);
503 cpumask_set_cpu(cpu, &set->used);
504 goto done;
505 } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
Leon Romanovskyf242d93a2016-05-31 10:54:36 +0300506 hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
507 current->pid, current->comm,
508 cpumask_pr_args(proc_mask));
Mitko Haralanov957558c2016-02-03 14:33:40 -0800509 goto done;
510 }
511
512 /*
513 * The process does not have a preset CPU affinity so find one to
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700514 * recommend using the following algorithm:
515 *
516 * For each user process that is opening a context on HFI Y:
517 * a) If all cores are filled, reinitialize the bitmask
518 * b) Fill real cores first, then HT cores (First set of HT
519 * cores on all physical cores, then second set of HT core,
520 * and, so on) in the following order:
521 *
522 * 1. Same NUMA node as HFI Y and not running an IRQ
523 * handler
524 * 2. Same NUMA node as HFI Y and running an IRQ handler
525 * 3. Different NUMA node to HFI Y and not running an IRQ
526 * handler
527 * 4. Different NUMA node to HFI Y and running an IRQ
528 * handler
529 * c) Mark core as filled in the bitmask. As user processes are
530 * done, clear cores from the bitmask.
Mitko Haralanov957558c2016-02-03 14:33:40 -0800531 */
532
533 ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
534 if (!ret)
535 goto done;
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700536 ret = zalloc_cpumask_var(&hw_thread_mask, GFP_KERNEL);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800537 if (!ret)
538 goto free_diff;
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700539 ret = zalloc_cpumask_var(&available_mask, GFP_KERNEL);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800540 if (!ret)
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700541 goto free_hw_thread_mask;
542 ret = zalloc_cpumask_var(&intrs_mask, GFP_KERNEL);
543 if (!ret)
544 goto free_available_mask;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800545
Tadeusz Struk584d9572016-09-06 04:36:18 -0700546 mutex_lock(&affinity->lock);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800547 /*
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700548 * If we've used all available HW threads, clear the mask and start
Mitko Haralanov957558c2016-02-03 14:33:40 -0800549 * overloading.
550 */
551 if (cpumask_equal(&set->mask, &set->used)) {
552 set->gen++;
553 cpumask_clear(&set->used);
554 }
555
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700556 /*
557 * If NUMA node has CPUs used by interrupt handlers, include them in the
558 * interrupt handler mask.
559 */
560 entry = node_affinity_lookup(node);
561 if (entry) {
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700562 cpumask_copy(intrs_mask, (entry->def_intr.gen ?
563 &entry->def_intr.mask :
564 &entry->def_intr.used));
565 cpumask_or(intrs_mask, intrs_mask, (entry->rcv_intr.gen ?
566 &entry->rcv_intr.mask :
567 &entry->rcv_intr.used));
568 cpumask_or(intrs_mask, intrs_mask, &entry->general_intr_mask);
Sebastian Sanchezd6373012016-07-25 07:54:48 -0700569 }
Leon Romanovskyf242d93a2016-05-31 10:54:36 +0300570 hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700571 cpumask_pr_args(intrs_mask));
572
573 cpumask_copy(hw_thread_mask, &set->mask);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800574
575 /*
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700576 * If HT cores are enabled, identify which HW threads within the
577 * physical cores should be used.
Mitko Haralanov957558c2016-02-03 14:33:40 -0800578 */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700579 if (affinity->num_core_siblings > 0) {
580 for (i = 0; i < affinity->num_core_siblings; i++) {
581 find_hw_thread_mask(i, hw_thread_mask, affinity);
582
583 /*
584 * If there's at least one available core for this HW
585 * thread number, stop looking for a core.
586 *
587 * diff will always be not empty at least once in this
588 * loop as the used mask gets reset when
589 * (set->mask == set->used) before this loop.
590 */
591 cpumask_andnot(diff, hw_thread_mask, &set->used);
592 if (!cpumask_empty(diff))
593 break;
594 }
595 }
596 hfi1_cdbg(PROC, "Same available HW thread on all physical CPUs: %*pbl",
597 cpumask_pr_args(hw_thread_mask));
598
Mitko Haralanov957558c2016-02-03 14:33:40 -0800599 node_mask = cpumask_of_node(node);
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700600 hfi1_cdbg(PROC, "Device on NUMA %u, CPUs %*pbl", node,
Leon Romanovskyf242d93a2016-05-31 10:54:36 +0300601 cpumask_pr_args(node_mask));
Mitko Haralanov957558c2016-02-03 14:33:40 -0800602
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700603 /* Get cpumask of available CPUs on preferred NUMA */
604 cpumask_and(available_mask, hw_thread_mask, node_mask);
605 cpumask_andnot(available_mask, available_mask, &set->used);
606 hfi1_cdbg(PROC, "Available CPUs on NUMA %u: %*pbl", node,
607 cpumask_pr_args(available_mask));
Mitko Haralanov957558c2016-02-03 14:33:40 -0800608
609 /*
610 * At first, we don't want to place processes on the same
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700611 * CPUs as interrupt handlers. Then, CPUs running interrupt
612 * handlers are used.
613 *
614 * 1) If diff is not empty, then there are CPUs not running
615 * non-interrupt handlers available, so diff gets copied
616 * over to available_mask.
617 * 2) If diff is empty, then all CPUs not running interrupt
618 * handlers are taken, so available_mask contains all
619 * available CPUs running interrupt handlers.
620 * 3) If available_mask is empty, then all CPUs on the
621 * preferred NUMA node are taken, so other NUMA nodes are
622 * used for process assignments using the same method as
623 * the preferred NUMA node.
Mitko Haralanov957558c2016-02-03 14:33:40 -0800624 */
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700625 cpumask_andnot(diff, available_mask, intrs_mask);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800626 if (!cpumask_empty(diff))
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700627 cpumask_copy(available_mask, diff);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800628
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700629 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
630 if (cpumask_empty(available_mask)) {
631 cpumask_andnot(available_mask, hw_thread_mask, &set->used);
632 /* Excluding preferred NUMA cores */
633 cpumask_andnot(available_mask, available_mask, node_mask);
634 hfi1_cdbg(PROC,
635 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
636 cpumask_pr_args(available_mask));
637
638 /*
639 * At first, we don't want to place processes on the same
640 * CPUs as interrupt handlers.
641 */
642 cpumask_andnot(diff, available_mask, intrs_mask);
643 if (!cpumask_empty(diff))
644 cpumask_copy(available_mask, diff);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800645 }
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700646 hfi1_cdbg(PROC, "Possible CPUs for process: %*pbl",
647 cpumask_pr_args(available_mask));
Mitko Haralanov957558c2016-02-03 14:33:40 -0800648
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700649 cpu = cpumask_first(available_mask);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800650 if (cpu >= nr_cpu_ids) /* empty */
651 cpu = -1;
652 else
653 cpumask_set_cpu(cpu, &set->used);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700654
655 mutex_unlock(&affinity->lock);
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700656 hfi1_cdbg(PROC, "Process assigned to CPU %d", cpu);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800657
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700658 free_cpumask_var(intrs_mask);
659free_available_mask:
660 free_cpumask_var(available_mask);
661free_hw_thread_mask:
662 free_cpumask_var(hw_thread_mask);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800663free_diff:
664 free_cpumask_var(diff);
665done:
666 return cpu;
667}
668
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700669void hfi1_put_proc_affinity(int cpu)
Mitko Haralanov957558c2016-02-03 14:33:40 -0800670{
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700671 struct hfi1_affinity_node_list *affinity = &node_affinity;
672 struct cpu_mask_set *set = &affinity->proc;
Mitko Haralanov957558c2016-02-03 14:33:40 -0800673
674 if (cpu < 0)
675 return;
Tadeusz Struk584d9572016-09-06 04:36:18 -0700676
677 mutex_lock(&affinity->lock);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800678 cpumask_clear_cpu(cpu, &set->used);
Sebastian Sanchezb094a362016-07-25 07:54:57 -0700679 hfi1_cdbg(PROC, "Returning CPU %d for future process assignment", cpu);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800680 if (cpumask_empty(&set->used) && set->gen) {
681 set->gen--;
682 cpumask_copy(&set->used, &set->mask);
683 }
Tadeusz Struk584d9572016-09-06 04:36:18 -0700684 mutex_unlock(&affinity->lock);
Mitko Haralanov957558c2016-02-03 14:33:40 -0800685}
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700686
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700687int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf,
688 size_t count)
689{
690 struct hfi1_affinity_node *entry;
Tadeusz Struk8303f682016-08-03 20:19:32 -0400691 cpumask_var_t mask;
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700692 int ret, i;
693
Tadeusz Struk584d9572016-09-06 04:36:18 -0700694 mutex_lock(&node_affinity.lock);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700695 entry = node_affinity_lookup(dd->node);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700696
Tadeusz Struk584d9572016-09-06 04:36:18 -0700697 if (!entry) {
698 ret = -EINVAL;
699 goto unlock;
700 }
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700701
Tadeusz Struk8303f682016-08-03 20:19:32 -0400702 ret = zalloc_cpumask_var(&mask, GFP_KERNEL);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700703 if (!ret) {
704 ret = -ENOMEM;
705 goto unlock;
706 }
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700707
Tadeusz Struk8303f682016-08-03 20:19:32 -0400708 ret = cpulist_parse(buf, mask);
709 if (ret)
710 goto out;
711
712 if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) {
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700713 dd_dev_warn(dd, "Invalid CPU mask\n");
Tadeusz Struk8303f682016-08-03 20:19:32 -0400714 ret = -EINVAL;
715 goto out;
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700716 }
717
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700718 /* reset the SDMA interrupt affinity details */
719 init_cpu_mask_set(&entry->def_intr);
Tadeusz Struk8303f682016-08-03 20:19:32 -0400720 cpumask_copy(&entry->def_intr.mask, mask);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700721
722 /* Reassign the affinity for each SDMA interrupt. */
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700723 for (i = 0; i < dd->num_msix_entries; i++) {
724 struct hfi1_msix_entry *msix;
725
726 msix = &dd->msix_entries[i];
727 if (msix->type != IRQ_SDMA)
728 continue;
729
Tadeusz Struk584d9572016-09-06 04:36:18 -0700730 ret = get_irq_affinity(dd, msix);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700731
732 if (ret)
733 break;
734 }
Tadeusz Struk8303f682016-08-03 20:19:32 -0400735out:
736 free_cpumask_var(mask);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700737unlock:
738 mutex_unlock(&node_affinity.lock);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700739 return ret ? ret : strnlen(buf, PAGE_SIZE);
740}
741
742int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf)
743{
744 struct hfi1_affinity_node *entry;
745
Tadeusz Struk584d9572016-09-06 04:36:18 -0700746 mutex_lock(&node_affinity.lock);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700747 entry = node_affinity_lookup(dd->node);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700748
Tadeusz Struk584d9572016-09-06 04:36:18 -0700749 if (!entry) {
750 mutex_unlock(&node_affinity.lock);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700751 return -EINVAL;
Tadeusz Struk584d9572016-09-06 04:36:18 -0700752 }
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700753
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700754 cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask);
Tadeusz Struk584d9572016-09-06 04:36:18 -0700755 mutex_unlock(&node_affinity.lock);
Tadeusz Strukb14db1f2016-07-25 13:39:27 -0700756 return strnlen(buf, PAGE_SIZE);
757}