blob: 7c61c70ba3f65c67d26fbdde32de8067a8334bd3 [file] [log] [blame]
Christophe Lombard594ff7d2016-03-04 12:26:38 +01001#include <linux/kernel.h>
2#include <linux/fs.h>
3#include <linux/semaphore.h>
4#include <linux/slab.h>
5#include <linux/uaccess.h>
6#include <asm/rtas.h>
7
8#include "cxl.h"
9#include "hcalls.h"
10
11#define DOWNLOAD_IMAGE 1
12#define VALIDATE_IMAGE 2
13
14struct ai_header {
15 u16 version;
16 u8 reserved0[6];
17 u16 vendor;
18 u16 device;
19 u16 subsystem_vendor;
20 u16 subsystem;
21 u64 image_offset;
22 u64 image_length;
23 u8 reserved1[96];
24};
25
26static struct semaphore sem;
Andrew Donnellan64417a32016-04-18 15:03:50 +100027static unsigned long *buffer[CXL_AI_MAX_ENTRIES];
28static struct sg_list *le;
Christophe Lombard594ff7d2016-03-04 12:26:38 +010029static u64 continue_token;
30static unsigned int transfer;
31
32struct update_props_workarea {
33 __be32 phandle;
34 __be32 state;
35 __be64 reserved;
36 __be32 nprops;
37} __packed;
38
39struct update_nodes_workarea {
40 __be32 state;
41 __be64 unit_address;
42 __be32 reserved;
43} __packed;
44
45#define DEVICE_SCOPE 3
46#define NODE_ACTION_MASK 0xff000000
47#define NODE_COUNT_MASK 0x00ffffff
48#define OPCODE_DELETE 0x01000000
49#define OPCODE_UPDATE 0x02000000
50#define OPCODE_ADD 0x03000000
51
52static int rcall(int token, char *buf, s32 scope)
53{
54 int rc;
55
56 spin_lock(&rtas_data_buf_lock);
57
58 memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
59 rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
60 memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
61
62 spin_unlock(&rtas_data_buf_lock);
63 return rc;
64}
65
66static int update_property(struct device_node *dn, const char *name,
67 u32 vd, char *value)
68{
69 struct property *new_prop;
70 u32 *val;
71 int rc;
72
73 new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
74 if (!new_prop)
75 return -ENOMEM;
76
77 new_prop->name = kstrdup(name, GFP_KERNEL);
78 if (!new_prop->name) {
79 kfree(new_prop);
80 return -ENOMEM;
81 }
82
83 new_prop->length = vd;
84 new_prop->value = kzalloc(new_prop->length, GFP_KERNEL);
85 if (!new_prop->value) {
86 kfree(new_prop->name);
87 kfree(new_prop);
88 return -ENOMEM;
89 }
90 memcpy(new_prop->value, value, vd);
91
92 val = (u32 *)new_prop->value;
93 rc = cxl_update_properties(dn, new_prop);
94 pr_devel("%s: update property (%s, length: %i, value: %#x)\n",
95 dn->name, name, vd, be32_to_cpu(*val));
96
97 if (rc) {
98 kfree(new_prop->name);
99 kfree(new_prop->value);
100 kfree(new_prop);
101 }
102 return rc;
103}
104
105static int update_node(__be32 phandle, s32 scope)
106{
107 struct update_props_workarea *upwa;
108 struct device_node *dn;
109 int i, rc, ret;
110 char *prop_data;
111 char *buf;
112 int token;
113 u32 nprops;
114 u32 vd;
115
116 token = rtas_token("ibm,update-properties");
117 if (token == RTAS_UNKNOWN_SERVICE)
118 return -EINVAL;
119
120 buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
121 if (!buf)
122 return -ENOMEM;
123
124 dn = of_find_node_by_phandle(be32_to_cpu(phandle));
125 if (!dn) {
126 kfree(buf);
127 return -ENOENT;
128 }
129
130 upwa = (struct update_props_workarea *)&buf[0];
131 upwa->phandle = phandle;
132 do {
133 rc = rcall(token, buf, scope);
134 if (rc < 0)
135 break;
136
137 prop_data = buf + sizeof(*upwa);
138 nprops = be32_to_cpu(upwa->nprops);
139
140 if (*prop_data == 0) {
141 prop_data++;
142 vd = be32_to_cpu(*(__be32 *)prop_data);
143 prop_data += vd + sizeof(vd);
144 nprops--;
145 }
146
147 for (i = 0; i < nprops; i++) {
148 char *prop_name;
149
150 prop_name = prop_data;
151 prop_data += strlen(prop_name) + 1;
152 vd = be32_to_cpu(*(__be32 *)prop_data);
153 prop_data += sizeof(vd);
154
155 if ((vd != 0x00000000) && (vd != 0x80000000)) {
156 ret = update_property(dn, prop_name, vd,
157 prop_data);
158 if (ret)
159 pr_err("cxl: Could not update property %s - %i\n",
160 prop_name, ret);
161
162 prop_data += vd;
163 }
164 }
165 } while (rc == 1);
166
167 of_node_put(dn);
168 kfree(buf);
169 return rc;
170}
171
172static int update_devicetree(struct cxl *adapter, s32 scope)
173{
174 struct update_nodes_workarea *unwa;
175 u32 action, node_count;
176 int token, rc, i;
177 __be32 *data, drc_index, phandle;
178 char *buf;
179
180 token = rtas_token("ibm,update-nodes");
181 if (token == RTAS_UNKNOWN_SERVICE)
182 return -EINVAL;
183
184 buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
185 if (!buf)
186 return -ENOMEM;
187
188 unwa = (struct update_nodes_workarea *)&buf[0];
189 unwa->unit_address = cpu_to_be64(adapter->guest->handle);
190 do {
191 rc = rcall(token, buf, scope);
192 if (rc && rc != 1)
193 break;
194
195 data = (__be32 *)buf + 4;
196 while (be32_to_cpu(*data) & NODE_ACTION_MASK) {
197 action = be32_to_cpu(*data) & NODE_ACTION_MASK;
198 node_count = be32_to_cpu(*data) & NODE_COUNT_MASK;
199 pr_devel("device reconfiguration - action: %#x, nodes: %#x\n",
200 action, node_count);
201 data++;
202
203 for (i = 0; i < node_count; i++) {
204 phandle = *data++;
205
206 switch (action) {
207 case OPCODE_DELETE:
208 /* nothing to do */
209 break;
210 case OPCODE_UPDATE:
211 update_node(phandle, scope);
212 break;
213 case OPCODE_ADD:
214 /* nothing to do, just move pointer */
215 drc_index = *data++;
216 break;
217 }
218 }
219 }
220 } while (rc == 1);
221
222 kfree(buf);
223 return 0;
224}
225
226static int handle_image(struct cxl *adapter, int operation,
227 long (*fct)(u64, u64, u64, u64 *),
228 struct cxl_adapter_image *ai)
229{
230 size_t mod, s_copy, len_chunk = 0;
231 struct ai_header *header = NULL;
232 unsigned int entries = 0, i;
233 void *dest, *from;
234 int rc = 0, need_header;
235
236 /* base adapter image header */
237 need_header = (ai->flags & CXL_AI_NEED_HEADER);
238 if (need_header) {
239 header = kzalloc(sizeof(struct ai_header), GFP_KERNEL);
240 if (!header)
241 return -ENOMEM;
242 header->version = cpu_to_be16(1);
243 header->vendor = cpu_to_be16(adapter->guest->vendor);
244 header->device = cpu_to_be16(adapter->guest->device);
245 header->subsystem_vendor = cpu_to_be16(adapter->guest->subsystem_vendor);
246 header->subsystem = cpu_to_be16(adapter->guest->subsystem);
247 header->image_offset = cpu_to_be64(CXL_AI_HEADER_SIZE);
248 header->image_length = cpu_to_be64(ai->len_image);
249 }
250
251 /* number of entries in the list */
252 len_chunk = ai->len_data;
253 if (need_header)
254 len_chunk += CXL_AI_HEADER_SIZE;
255
256 entries = len_chunk / CXL_AI_BUFFER_SIZE;
257 mod = len_chunk % CXL_AI_BUFFER_SIZE;
258 if (mod)
259 entries++;
260
261 if (entries > CXL_AI_MAX_ENTRIES) {
262 rc = -EINVAL;
263 goto err;
264 }
265
266 /* < -- MAX_CHUNK_SIZE = 4096 * 256 = 1048576 bytes -->
267 * chunk 0 ----------------------------------------------------
268 * | header | data |
269 * ----------------------------------------------------
270 * chunk 1 ----------------------------------------------------
271 * | data |
272 * ----------------------------------------------------
273 * ....
274 * chunk n ----------------------------------------------------
275 * | data |
276 * ----------------------------------------------------
277 */
278 from = (void *) ai->data;
279 for (i = 0; i < entries; i++) {
280 dest = buffer[i];
281 s_copy = CXL_AI_BUFFER_SIZE;
282
283 if ((need_header) && (i == 0)) {
284 /* add adapter image header */
285 memcpy(buffer[i], header, sizeof(struct ai_header));
286 s_copy = CXL_AI_BUFFER_SIZE - CXL_AI_HEADER_SIZE;
287 dest += CXL_AI_HEADER_SIZE; /* image offset */
288 }
289 if ((i == (entries - 1)) && mod)
290 s_copy = mod;
291
292 /* copy data */
293 if (copy_from_user(dest, from, s_copy))
294 goto err;
295
296 /* fill in the list */
297 le[i].phys_addr = cpu_to_be64(virt_to_phys(buffer[i]));
298 le[i].len = cpu_to_be64(CXL_AI_BUFFER_SIZE);
299 if ((i == (entries - 1)) && mod)
300 le[i].len = cpu_to_be64(mod);
301 from += s_copy;
302 }
303 pr_devel("%s (op: %i, need header: %i, entries: %i, token: %#llx)\n",
304 __func__, operation, need_header, entries, continue_token);
305
306 /*
307 * download/validate the adapter image to the coherent
308 * platform facility
309 */
310 rc = fct(adapter->guest->handle, virt_to_phys(le), entries,
311 &continue_token);
312 if (rc == 0) /* success of download/validation operation */
313 continue_token = 0;
314
315err:
316 kfree(header);
317
318 return rc;
319}
320
321static int transfer_image(struct cxl *adapter, int operation,
322 struct cxl_adapter_image *ai)
323{
324 int rc = 0;
325 int afu;
326
327 switch (operation) {
328 case DOWNLOAD_IMAGE:
329 rc = handle_image(adapter, operation,
330 &cxl_h_download_adapter_image, ai);
331 if (rc < 0) {
332 pr_devel("resetting adapter\n");
333 cxl_h_reset_adapter(adapter->guest->handle);
334 }
335 return rc;
336
337 case VALIDATE_IMAGE:
338 rc = handle_image(adapter, operation,
339 &cxl_h_validate_adapter_image, ai);
340 if (rc < 0) {
341 pr_devel("resetting adapter\n");
342 cxl_h_reset_adapter(adapter->guest->handle);
343 return rc;
344 }
345 if (rc == 0) {
Masanari Iida0a951602016-11-23 22:44:47 +0900346 pr_devel("remove current afu\n");
Christophe Lombard594ff7d2016-03-04 12:26:38 +0100347 for (afu = 0; afu < adapter->slices; afu++)
348 cxl_guest_remove_afu(adapter->afu[afu]);
349
350 pr_devel("resetting adapter\n");
351 cxl_h_reset_adapter(adapter->guest->handle);
352
353 /* The entire image has now been
354 * downloaded and the validation has
355 * been successfully performed.
356 * After that, the partition should call
357 * ibm,update-nodes and
358 * ibm,update-properties to receive the
359 * current configuration
360 */
361 rc = update_devicetree(adapter, DEVICE_SCOPE);
362 transfer = 1;
363 }
364 return rc;
365 }
366
367 return -EINVAL;
368}
369
370static long ioctl_transfer_image(struct cxl *adapter, int operation,
371 struct cxl_adapter_image __user *uai)
372{
373 struct cxl_adapter_image ai;
374
375 pr_devel("%s\n", __func__);
376
377 if (copy_from_user(&ai, uai, sizeof(struct cxl_adapter_image)))
378 return -EFAULT;
379
380 /*
381 * Make sure reserved fields and bits are set to 0
382 */
383 if (ai.reserved1 || ai.reserved2 || ai.reserved3 || ai.reserved4 ||
384 (ai.flags & ~CXL_AI_ALL))
385 return -EINVAL;
386
387 return transfer_image(adapter, operation, &ai);
388}
389
390static int device_open(struct inode *inode, struct file *file)
391{
392 int adapter_num = CXL_DEVT_ADAPTER(inode->i_rdev);
393 struct cxl *adapter;
394 int rc = 0, i;
395
396 pr_devel("in %s\n", __func__);
397
398 BUG_ON(sizeof(struct ai_header) != CXL_AI_HEADER_SIZE);
399
400 /* Allows one process to open the device by using a semaphore */
401 if (down_interruptible(&sem) != 0)
402 return -EPERM;
403
404 if (!(adapter = get_cxl_adapter(adapter_num)))
405 return -ENODEV;
406
407 file->private_data = adapter;
408 continue_token = 0;
409 transfer = 0;
410
411 for (i = 0; i < CXL_AI_MAX_ENTRIES; i++)
412 buffer[i] = NULL;
413
414 /* aligned buffer containing list entries which describes up to
415 * 1 megabyte of data (256 entries of 4096 bytes each)
416 * Logical real address of buffer 0 - Buffer 0 length in bytes
417 * Logical real address of buffer 1 - Buffer 1 length in bytes
418 * Logical real address of buffer 2 - Buffer 2 length in bytes
419 * ....
420 * ....
421 * Logical real address of buffer N - Buffer N length in bytes
422 */
423 le = (struct sg_list *)get_zeroed_page(GFP_KERNEL);
424 if (!le) {
425 rc = -ENOMEM;
426 goto err;
427 }
428
429 for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
430 buffer[i] = (unsigned long *)get_zeroed_page(GFP_KERNEL);
431 if (!buffer[i]) {
432 rc = -ENOMEM;
433 goto err1;
434 }
435 }
436
437 return 0;
438
439err1:
440 for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
441 if (buffer[i])
442 free_page((unsigned long) buffer[i]);
443 }
444
445 if (le)
446 free_page((unsigned long) le);
447err:
448 put_device(&adapter->dev);
449
450 return rc;
451}
452
453static long device_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
454{
455 struct cxl *adapter = file->private_data;
456
457 pr_devel("in %s\n", __func__);
458
459 if (cmd == CXL_IOCTL_DOWNLOAD_IMAGE)
460 return ioctl_transfer_image(adapter,
461 DOWNLOAD_IMAGE,
462 (struct cxl_adapter_image __user *)arg);
463 else if (cmd == CXL_IOCTL_VALIDATE_IMAGE)
464 return ioctl_transfer_image(adapter,
465 VALIDATE_IMAGE,
466 (struct cxl_adapter_image __user *)arg);
467 else
468 return -EINVAL;
469}
470
471static long device_compat_ioctl(struct file *file, unsigned int cmd,
472 unsigned long arg)
473{
474 return device_ioctl(file, cmd, arg);
475}
476
477static int device_close(struct inode *inode, struct file *file)
478{
479 struct cxl *adapter = file->private_data;
480 int i;
481
482 pr_devel("in %s\n", __func__);
483
484 for (i = 0; i < CXL_AI_MAX_ENTRIES; i++) {
485 if (buffer[i])
486 free_page((unsigned long) buffer[i]);
487 }
488
489 if (le)
490 free_page((unsigned long) le);
491
492 up(&sem);
493 put_device(&adapter->dev);
494 continue_token = 0;
495
496 /* reload the module */
497 if (transfer)
498 cxl_guest_reload_module(adapter);
499 else {
500 pr_devel("resetting adapter\n");
501 cxl_h_reset_adapter(adapter->guest->handle);
502 }
503
504 transfer = 0;
505 return 0;
506}
507
508static const struct file_operations fops = {
509 .owner = THIS_MODULE,
510 .open = device_open,
511 .unlocked_ioctl = device_ioctl,
512 .compat_ioctl = device_compat_ioctl,
513 .release = device_close,
514};
515
516void cxl_guest_remove_chardev(struct cxl *adapter)
517{
518 cdev_del(&adapter->guest->cdev);
519}
520
521int cxl_guest_add_chardev(struct cxl *adapter)
522{
523 dev_t devt;
524 int rc;
525
526 devt = MKDEV(MAJOR(cxl_get_dev()), CXL_CARD_MINOR(adapter));
527 cdev_init(&adapter->guest->cdev, &fops);
528 if ((rc = cdev_add(&adapter->guest->cdev, devt, 1))) {
529 dev_err(&adapter->dev,
530 "Unable to add chardev on adapter (card%i): %i\n",
531 adapter->adapter_num, rc);
532 goto err;
533 }
534 adapter->dev.devt = devt;
535 sema_init(&sem, 1);
536err:
537 return rc;
538}