Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 1 | /* |
Jubin John | 05d6ac1 | 2016-02-14 20:22:17 -0800 | [diff] [blame] | 2 | * Copyright(c) 2015, 2016 Intel Corporation. |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | #include <linux/topology.h> |
| 48 | #include <linux/cpumask.h> |
| 49 | #include <linux/module.h> |
| 50 | |
| 51 | #include "hfi.h" |
| 52 | #include "affinity.h" |
| 53 | #include "sdma.h" |
| 54 | #include "trace.h" |
| 55 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 56 | struct hfi1_affinity_node_list node_affinity = { |
| 57 | .list = LIST_HEAD_INIT(node_affinity.list), |
| 58 | .lock = __SPIN_LOCK_UNLOCKED(&node_affinity.lock), |
| 59 | }; |
| 60 | |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 61 | /* Name of IRQ types, indexed by enum irq_type */ |
| 62 | static const char * const irq_type_names[] = { |
| 63 | "SDMA", |
| 64 | "RCVCTXT", |
| 65 | "GENERAL", |
| 66 | "OTHER", |
| 67 | }; |
| 68 | |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 69 | /* Per NUMA node count of HFI devices */ |
| 70 | static unsigned int *hfi1_per_node_cntr; |
| 71 | |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 72 | static inline void init_cpu_mask_set(struct cpu_mask_set *set) |
| 73 | { |
| 74 | cpumask_clear(&set->mask); |
| 75 | cpumask_clear(&set->used); |
| 76 | set->gen = 0; |
| 77 | } |
| 78 | |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 79 | /* Initialize non-HT cpu cores mask */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 80 | void init_real_cpu_mask(void) |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 81 | { |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 82 | int possible, curr_cpu, i, ht; |
| 83 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 84 | cpumask_clear(&node_affinity.real_cpu_mask); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 85 | |
| 86 | /* Start with cpu online mask as the real cpu mask */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 87 | cpumask_copy(&node_affinity.real_cpu_mask, cpu_online_mask); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 88 | |
| 89 | /* |
| 90 | * Remove HT cores from the real cpu mask. Do this in two steps below. |
| 91 | */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 92 | possible = cpumask_weight(&node_affinity.real_cpu_mask); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 93 | ht = cpumask_weight(topology_sibling_cpumask( |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 94 | cpumask_first(&node_affinity.real_cpu_mask))); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 95 | /* |
| 96 | * Step 1. Skip over the first N HT siblings and use them as the |
| 97 | * "real" cores. Assumes that HT cores are not enumerated in |
| 98 | * succession (except in the single core case). |
| 99 | */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 100 | curr_cpu = cpumask_first(&node_affinity.real_cpu_mask); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 101 | for (i = 0; i < possible / ht; i++) |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 102 | curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 103 | /* |
| 104 | * Step 2. Remove the remaining HT siblings. Use cpumask_next() to |
| 105 | * skip any gaps. |
| 106 | */ |
| 107 | for (; i < possible; i++) { |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 108 | cpumask_clear_cpu(curr_cpu, &node_affinity.real_cpu_mask); |
| 109 | curr_cpu = cpumask_next(curr_cpu, &node_affinity.real_cpu_mask); |
| 110 | } |
| 111 | } |
| 112 | |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 113 | int node_affinity_init(void) |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 114 | { |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 115 | int node; |
| 116 | struct pci_dev *dev = NULL; |
| 117 | const struct pci_device_id *ids = hfi1_pci_tbl; |
| 118 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 119 | cpumask_copy(&node_affinity.proc.mask, cpu_online_mask); |
| 120 | /* |
| 121 | * The real cpu mask is part of the affinity struct but it has to be |
| 122 | * initialized early. It is needed to calculate the number of user |
| 123 | * contexts in set_up_context_variables(). |
| 124 | */ |
| 125 | init_real_cpu_mask(); |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 126 | |
| 127 | hfi1_per_node_cntr = kcalloc(num_possible_nodes(), |
| 128 | sizeof(*hfi1_per_node_cntr), GFP_KERNEL); |
| 129 | if (!hfi1_per_node_cntr) |
| 130 | return -ENOMEM; |
| 131 | |
| 132 | while (ids->vendor) { |
| 133 | dev = NULL; |
| 134 | while ((dev = pci_get_device(ids->vendor, ids->device, dev))) { |
| 135 | node = pcibus_to_node(dev->bus); |
| 136 | if (node < 0) |
| 137 | node = numa_node_id(); |
| 138 | |
| 139 | hfi1_per_node_cntr[node]++; |
| 140 | } |
| 141 | ids++; |
| 142 | } |
| 143 | |
| 144 | return 0; |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 145 | } |
| 146 | |
| 147 | void node_affinity_destroy(void) |
| 148 | { |
| 149 | struct list_head *pos, *q; |
| 150 | struct hfi1_affinity_node *entry; |
| 151 | |
| 152 | spin_lock(&node_affinity.lock); |
| 153 | list_for_each_safe(pos, q, &node_affinity.list) { |
| 154 | entry = list_entry(pos, struct hfi1_affinity_node, |
| 155 | list); |
| 156 | list_del(pos); |
| 157 | kfree(entry); |
| 158 | } |
| 159 | spin_unlock(&node_affinity.lock); |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 160 | kfree(hfi1_per_node_cntr); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 161 | } |
| 162 | |
| 163 | static struct hfi1_affinity_node *node_affinity_allocate(int node) |
| 164 | { |
| 165 | struct hfi1_affinity_node *entry; |
| 166 | |
| 167 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
| 168 | if (!entry) |
| 169 | return NULL; |
| 170 | entry->node = node; |
| 171 | INIT_LIST_HEAD(&entry->list); |
| 172 | |
| 173 | return entry; |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * It appends an entry to the list. |
| 178 | * It *must* be called with node_affinity.lock held. |
| 179 | */ |
| 180 | static void node_affinity_add_tail(struct hfi1_affinity_node *entry) |
| 181 | { |
| 182 | list_add_tail(&entry->list, &node_affinity.list); |
| 183 | } |
| 184 | |
| 185 | /* It must be called with node_affinity.lock held */ |
| 186 | static struct hfi1_affinity_node *node_affinity_lookup(int node) |
| 187 | { |
| 188 | struct list_head *pos; |
| 189 | struct hfi1_affinity_node *entry; |
| 190 | |
| 191 | list_for_each(pos, &node_affinity.list) { |
| 192 | entry = list_entry(pos, struct hfi1_affinity_node, list); |
| 193 | if (entry->node == node) |
| 194 | return entry; |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 195 | } |
| 196 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 197 | return NULL; |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 198 | } |
| 199 | |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 200 | /* |
| 201 | * Interrupt affinity. |
| 202 | * |
| 203 | * non-rcv avail gets a default mask that |
| 204 | * starts as possible cpus with threads reset |
| 205 | * and each rcv avail reset. |
| 206 | * |
| 207 | * rcv avail gets node relative 1 wrapping back |
| 208 | * to the node relative 1 as necessary. |
| 209 | * |
| 210 | */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 211 | int hfi1_dev_affinity_init(struct hfi1_devdata *dd) |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 212 | { |
| 213 | int node = pcibus_to_node(dd->pcidev->bus); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 214 | struct hfi1_affinity_node *entry; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 215 | const struct cpumask *local_mask; |
Jubin John | 0852d24 | 2016-04-12 11:30:08 -0700 | [diff] [blame] | 216 | int curr_cpu, possible, i; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 217 | |
| 218 | if (node < 0) |
| 219 | node = numa_node_id(); |
| 220 | dd->node = node; |
| 221 | |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 222 | local_mask = cpumask_of_node(dd->node); |
| 223 | if (cpumask_first(local_mask) >= nr_cpu_ids) |
| 224 | local_mask = topology_core_cpumask(0); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 225 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 226 | spin_lock(&node_affinity.lock); |
| 227 | entry = node_affinity_lookup(dd->node); |
| 228 | spin_unlock(&node_affinity.lock); |
| 229 | |
| 230 | /* |
| 231 | * If this is the first time this NUMA node's affinity is used, |
| 232 | * create an entry in the global affinity structure and initialize it. |
| 233 | */ |
| 234 | if (!entry) { |
| 235 | entry = node_affinity_allocate(node); |
| 236 | if (!entry) { |
| 237 | dd_dev_err(dd, |
| 238 | "Unable to allocate global affinity node\n"); |
| 239 | return -ENOMEM; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 240 | } |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 241 | init_cpu_mask_set(&entry->def_intr); |
| 242 | init_cpu_mask_set(&entry->rcv_intr); |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 243 | cpumask_clear(&entry->general_intr_mask); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 244 | /* Use the "real" cpu mask of this node as the default */ |
| 245 | cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, |
| 246 | local_mask); |
| 247 | |
| 248 | /* fill in the receive list */ |
| 249 | possible = cpumask_weight(&entry->def_intr.mask); |
| 250 | curr_cpu = cpumask_first(&entry->def_intr.mask); |
| 251 | |
| 252 | if (possible == 1) { |
| 253 | /* only one CPU, everyone will use it */ |
| 254 | cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 255 | cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 256 | } else { |
| 257 | /* |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 258 | * The general/control context will be the first CPU in |
| 259 | * the default list, so it is removed from the default |
| 260 | * list and added to the general interrupt list. |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 261 | */ |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 262 | cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask); |
| 263 | cpumask_set_cpu(curr_cpu, &entry->general_intr_mask); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 264 | curr_cpu = cpumask_next(curr_cpu, |
| 265 | &entry->def_intr.mask); |
| 266 | |
| 267 | /* |
| 268 | * Remove the remaining kernel receive queues from |
| 269 | * the default list and add them to the receive list. |
| 270 | */ |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 271 | for (i = 0; |
| 272 | i < (dd->n_krcv_queues - 1) * |
| 273 | hfi1_per_node_cntr[dd->node]; |
| 274 | i++) { |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 275 | cpumask_clear_cpu(curr_cpu, |
| 276 | &entry->def_intr.mask); |
| 277 | cpumask_set_cpu(curr_cpu, |
| 278 | &entry->rcv_intr.mask); |
| 279 | curr_cpu = cpumask_next(curr_cpu, |
| 280 | &entry->def_intr.mask); |
| 281 | if (curr_cpu >= nr_cpu_ids) |
| 282 | break; |
| 283 | } |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 284 | |
| 285 | /* |
| 286 | * If there ends up being 0 CPU cores leftover for SDMA |
| 287 | * engines, use the same CPU cores as general/control |
| 288 | * context. |
| 289 | */ |
| 290 | if (cpumask_weight(&entry->def_intr.mask) == 0) |
| 291 | cpumask_copy(&entry->def_intr.mask, |
| 292 | &entry->general_intr_mask); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | spin_lock(&node_affinity.lock); |
| 296 | node_affinity_add_tail(entry); |
| 297 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 298 | } |
| 299 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 300 | return 0; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 301 | } |
| 302 | |
| 303 | int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) |
| 304 | { |
| 305 | int ret; |
| 306 | cpumask_var_t diff; |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 307 | struct hfi1_affinity_node *entry; |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 308 | struct cpu_mask_set *set = NULL; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 309 | struct sdma_engine *sde = NULL; |
| 310 | struct hfi1_ctxtdata *rcd = NULL; |
| 311 | char extra[64]; |
| 312 | int cpu = -1; |
| 313 | |
| 314 | extra[0] = '\0'; |
| 315 | cpumask_clear(&msix->mask); |
| 316 | |
| 317 | ret = zalloc_cpumask_var(&diff, GFP_KERNEL); |
| 318 | if (!ret) |
| 319 | return -ENOMEM; |
| 320 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 321 | spin_lock(&node_affinity.lock); |
| 322 | entry = node_affinity_lookup(dd->node); |
| 323 | spin_unlock(&node_affinity.lock); |
| 324 | |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 325 | switch (msix->type) { |
| 326 | case IRQ_SDMA: |
| 327 | sde = (struct sdma_engine *)msix->arg; |
| 328 | scnprintf(extra, 64, "engine %u", sde->this_idx); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 329 | set = &entry->def_intr; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 330 | break; |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 331 | case IRQ_GENERAL: |
| 332 | cpu = cpumask_first(&entry->general_intr_mask); |
| 333 | break; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 334 | case IRQ_RCVCTXT: |
| 335 | rcd = (struct hfi1_ctxtdata *)msix->arg; |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 336 | if (rcd->ctxt == HFI1_CTRL_CTXT) |
| 337 | cpu = cpumask_first(&entry->general_intr_mask); |
| 338 | else |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 339 | set = &entry->rcv_intr; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 340 | scnprintf(extra, 64, "ctxt %u", rcd->ctxt); |
| 341 | break; |
| 342 | default: |
| 343 | dd_dev_err(dd, "Invalid IRQ type %d\n", msix->type); |
| 344 | return -EINVAL; |
| 345 | } |
| 346 | |
| 347 | /* |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 348 | * The general and control contexts are placed on a particular |
| 349 | * CPU, which is set above. Skip accounting for it. Everything else |
| 350 | * finds its CPU here. |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 351 | */ |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 352 | if (cpu == -1 && set) { |
| 353 | spin_lock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 354 | if (cpumask_equal(&set->mask, &set->used)) { |
| 355 | /* |
| 356 | * We've used up all the CPUs, bump up the generation |
| 357 | * and reset the 'used' map |
| 358 | */ |
| 359 | set->gen++; |
| 360 | cpumask_clear(&set->used); |
| 361 | } |
| 362 | cpumask_andnot(diff, &set->mask, &set->used); |
| 363 | cpu = cpumask_first(diff); |
| 364 | cpumask_set_cpu(cpu, &set->used); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 365 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | switch (msix->type) { |
| 369 | case IRQ_SDMA: |
| 370 | sde->cpu = cpu; |
| 371 | break; |
| 372 | case IRQ_GENERAL: |
| 373 | case IRQ_RCVCTXT: |
| 374 | case IRQ_OTHER: |
| 375 | break; |
| 376 | } |
| 377 | |
| 378 | cpumask_set_cpu(cpu, &msix->mask); |
| 379 | dd_dev_info(dd, "IRQ vector: %u, type %s %s -> cpu: %d\n", |
| 380 | msix->msix.vector, irq_type_names[msix->type], |
| 381 | extra, cpu); |
| 382 | irq_set_affinity_hint(msix->msix.vector, &msix->mask); |
| 383 | |
| 384 | free_cpumask_var(diff); |
| 385 | return 0; |
| 386 | } |
| 387 | |
| 388 | void hfi1_put_irq_affinity(struct hfi1_devdata *dd, |
| 389 | struct hfi1_msix_entry *msix) |
| 390 | { |
| 391 | struct cpu_mask_set *set = NULL; |
| 392 | struct hfi1_ctxtdata *rcd; |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 393 | struct hfi1_affinity_node *entry; |
| 394 | |
| 395 | spin_lock(&node_affinity.lock); |
| 396 | entry = node_affinity_lookup(dd->node); |
| 397 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 398 | |
| 399 | switch (msix->type) { |
| 400 | case IRQ_SDMA: |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 401 | set = &entry->def_intr; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 402 | break; |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 403 | case IRQ_GENERAL: |
| 404 | /* Don't accounting for general contexts */ |
| 405 | break; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 406 | case IRQ_RCVCTXT: |
| 407 | rcd = (struct hfi1_ctxtdata *)msix->arg; |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 408 | /* Don't do accounting for control contexts */ |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 409 | if (rcd->ctxt != HFI1_CTRL_CTXT) |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 410 | set = &entry->rcv_intr; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 411 | break; |
| 412 | default: |
| 413 | return; |
| 414 | } |
| 415 | |
| 416 | if (set) { |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 417 | spin_lock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 418 | cpumask_andnot(&set->used, &set->used, &msix->mask); |
| 419 | if (cpumask_empty(&set->used) && set->gen) { |
| 420 | set->gen--; |
| 421 | cpumask_copy(&set->used, &set->mask); |
| 422 | } |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 423 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 424 | } |
| 425 | |
| 426 | irq_set_affinity_hint(msix->msix.vector, NULL); |
| 427 | cpumask_clear(&msix->mask); |
| 428 | } |
| 429 | |
| 430 | int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) |
| 431 | { |
| 432 | int cpu = -1, ret; |
| 433 | cpumask_var_t diff, mask, intrs; |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 434 | struct hfi1_affinity_node *entry; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 435 | const struct cpumask *node_mask, |
| 436 | *proc_mask = tsk_cpus_allowed(current); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 437 | struct cpu_mask_set *set = &node_affinity.proc; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 438 | |
| 439 | /* |
| 440 | * check whether process/context affinity has already |
| 441 | * been set |
| 442 | */ |
| 443 | if (cpumask_weight(proc_mask) == 1) { |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 444 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
| 445 | current->pid, current->comm, |
| 446 | cpumask_pr_args(proc_mask)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 447 | /* |
| 448 | * Mark the pre-set CPU as used. This is atomic so we don't |
| 449 | * need the lock |
| 450 | */ |
| 451 | cpu = cpumask_first(proc_mask); |
| 452 | cpumask_set_cpu(cpu, &set->used); |
| 453 | goto done; |
| 454 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 455 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
| 456 | current->pid, current->comm, |
| 457 | cpumask_pr_args(proc_mask)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 458 | goto done; |
| 459 | } |
| 460 | |
| 461 | /* |
| 462 | * The process does not have a preset CPU affinity so find one to |
| 463 | * recommend. We prefer CPUs on the same NUMA as the device. |
| 464 | */ |
| 465 | |
| 466 | ret = zalloc_cpumask_var(&diff, GFP_KERNEL); |
| 467 | if (!ret) |
| 468 | goto done; |
| 469 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
| 470 | if (!ret) |
| 471 | goto free_diff; |
| 472 | ret = zalloc_cpumask_var(&intrs, GFP_KERNEL); |
| 473 | if (!ret) |
| 474 | goto free_mask; |
| 475 | |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 476 | spin_lock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 477 | /* |
| 478 | * If we've used all available CPUs, clear the mask and start |
| 479 | * overloading. |
| 480 | */ |
| 481 | if (cpumask_equal(&set->mask, &set->used)) { |
| 482 | set->gen++; |
| 483 | cpumask_clear(&set->used); |
| 484 | } |
| 485 | |
Sebastian Sanchez | d637301 | 2016-07-25 07:54:48 -0700 | [diff] [blame^] | 486 | /* |
| 487 | * If NUMA node has CPUs used by interrupt handlers, include them in the |
| 488 | * interrupt handler mask. |
| 489 | */ |
| 490 | entry = node_affinity_lookup(node); |
| 491 | if (entry) { |
| 492 | cpumask_copy(intrs, (entry->def_intr.gen ? |
| 493 | &entry->def_intr.mask : |
| 494 | &entry->def_intr.used)); |
| 495 | cpumask_or(intrs, intrs, (entry->rcv_intr.gen ? |
| 496 | &entry->rcv_intr.mask : |
| 497 | &entry->rcv_intr.used)); |
| 498 | cpumask_or(intrs, intrs, &entry->general_intr_mask); |
| 499 | } |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 500 | hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", |
| 501 | cpumask_pr_args(intrs)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 502 | |
| 503 | /* |
| 504 | * If we don't have a NUMA node requested, preference is towards |
| 505 | * device NUMA node |
| 506 | */ |
| 507 | if (node == -1) |
| 508 | node = dd->node; |
| 509 | node_mask = cpumask_of_node(node); |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 510 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, |
| 511 | cpumask_pr_args(node_mask)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 512 | |
| 513 | /* diff will hold all unused cpus */ |
| 514 | cpumask_andnot(diff, &set->mask, &set->used); |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 515 | hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 516 | |
| 517 | /* get cpumask of available CPUs on preferred NUMA */ |
| 518 | cpumask_and(mask, diff, node_mask); |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 519 | hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 520 | |
| 521 | /* |
| 522 | * At first, we don't want to place processes on the same |
| 523 | * CPUs as interrupt handlers. |
| 524 | */ |
| 525 | cpumask_andnot(diff, mask, intrs); |
| 526 | if (!cpumask_empty(diff)) |
| 527 | cpumask_copy(mask, diff); |
| 528 | |
| 529 | /* |
| 530 | * if we don't have a cpu on the preferred NUMA, get |
| 531 | * the list of the remaining available CPUs |
| 532 | */ |
| 533 | if (cpumask_empty(mask)) { |
| 534 | cpumask_andnot(diff, &set->mask, &set->used); |
| 535 | cpumask_andnot(mask, diff, node_mask); |
| 536 | } |
Leon Romanovsky | f242d93a | 2016-05-31 10:54:36 +0300 | [diff] [blame] | 537 | hfi1_cdbg(PROC, "possible CPUs for process %*pbl", |
| 538 | cpumask_pr_args(mask)); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 539 | |
| 540 | cpu = cpumask_first(mask); |
| 541 | if (cpu >= nr_cpu_ids) /* empty */ |
| 542 | cpu = -1; |
| 543 | else |
| 544 | cpumask_set_cpu(cpu, &set->used); |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 545 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 546 | |
| 547 | free_cpumask_var(intrs); |
| 548 | free_mask: |
| 549 | free_cpumask_var(mask); |
| 550 | free_diff: |
| 551 | free_cpumask_var(diff); |
| 552 | done: |
| 553 | return cpu; |
| 554 | } |
| 555 | |
| 556 | void hfi1_put_proc_affinity(struct hfi1_devdata *dd, int cpu) |
| 557 | { |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 558 | struct cpu_mask_set *set = &node_affinity.proc; |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 559 | |
| 560 | if (cpu < 0) |
| 561 | return; |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 562 | spin_lock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 563 | cpumask_clear_cpu(cpu, &set->used); |
| 564 | if (cpumask_empty(&set->used) && set->gen) { |
| 565 | set->gen--; |
| 566 | cpumask_copy(&set->used, &set->mask); |
| 567 | } |
Dennis Dalessandro | 4197344 | 2016-07-25 07:52:36 -0700 | [diff] [blame] | 568 | spin_unlock(&node_affinity.lock); |
Mitko Haralanov | 957558c | 2016-02-03 14:33:40 -0800 | [diff] [blame] | 569 | } |
| 570 | |