blob: d8f98ad9b0297ff56d2a89a8170ba83ba509b990 [file] [log] [blame]
Dong Jia Shi0a19e612017-03-17 04:17:32 +01001/*
2 * channel program interfaces
3 *
4 * Copyright IBM Corp. 2017
5 *
6 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
7 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com>
8 */
9
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/iommu.h>
13#include <linux/vfio.h>
14#include <asm/idals.h>
15
16#include "vfio_ccw_cp.h"
17
18/*
19 * Max length for ccw chain.
20 * XXX: Limit to 256, need to check more?
21 */
22#define CCWCHAIN_LEN_MAX 256
23
24struct pfn_array {
25 unsigned long pa_iova;
26 unsigned long *pa_iova_pfn;
27 unsigned long *pa_pfn;
28 int pa_nr;
29};
30
31struct pfn_array_table {
32 struct pfn_array *pat_pa;
33 int pat_nr;
34};
35
36struct ccwchain {
37 struct list_head next;
38 struct ccw1 *ch_ccw;
39 /* Guest physical address of the current chain. */
40 u64 ch_iova;
41 /* Count of the valid ccws in chain. */
42 int ch_len;
43 /* Pinned PAGEs for the original data. */
44 struct pfn_array_table *ch_pat;
45};
46
47/*
48 * pfn_array_pin() - pin user pages in memory
49 * @pa: pfn_array on which to perform the operation
50 * @mdev: the mediated device to perform pin/unpin operations
51 *
52 * Attempt to pin user pages in memory.
53 *
54 * Usage of pfn_array:
55 * @pa->pa_iova starting guest physical I/O address. Assigned by caller.
56 * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
57 * by caller.
58 * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by
59 * caller.
60 * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by
61 * caller.
62 * number of pages pinned. Assigned by callee.
63 *
64 * Returns:
65 * Number of pages pinned on success.
66 * If @pa->pa_nr is 0 or negative, returns 0.
67 * If no pages were pinned, returns -errno.
68 */
69static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
70{
71 int i, ret;
72
73 if (pa->pa_nr <= 0) {
74 pa->pa_nr = 0;
75 return 0;
76 }
77
78 pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
79 for (i = 1; i < pa->pa_nr; i++)
80 pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
81
82 ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
83 IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
84
85 if (ret > 0 && ret != pa->pa_nr) {
86 vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
87 pa->pa_nr = 0;
88 return 0;
89 }
90
91 return ret;
92}
93
94/* Unpin the pages before releasing the memory. */
95static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
96{
97 vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
98 pa->pa_nr = 0;
99 kfree(pa->pa_iova_pfn);
100}
101
102/* Alloc memory for PFNs, then pin pages with them. */
103static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
104 u64 iova, unsigned int len)
105{
106 int ret = 0;
107
Dong Jia Shi4cebc5d2017-10-11 04:38:22 +0200108 if (!len)
109 return 0;
110
111 if (pa->pa_nr)
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100112 return -EINVAL;
113
114 pa->pa_iova = iova;
115
116 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
117 if (!pa->pa_nr)
118 return -EINVAL;
119
120 pa->pa_iova_pfn = kcalloc(pa->pa_nr,
121 sizeof(*pa->pa_iova_pfn) +
122 sizeof(*pa->pa_pfn),
123 GFP_KERNEL);
124 if (unlikely(!pa->pa_iova_pfn))
125 return -ENOMEM;
126 pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
127
128 ret = pfn_array_pin(pa, mdev);
129
130 if (ret > 0)
131 return ret;
132 else if (!ret)
133 ret = -EINVAL;
134
135 kfree(pa->pa_iova_pfn);
136
137 return ret;
138}
139
140static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
141{
142 pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
143 if (unlikely(ZERO_OR_NULL_PTR(pat->pat_pa))) {
144 pat->pat_nr = 0;
145 return -ENOMEM;
146 }
147
148 pat->pat_nr = nr;
149
150 return 0;
151}
152
153static void pfn_array_table_unpin_free(struct pfn_array_table *pat,
154 struct device *mdev)
155{
156 int i;
157
158 for (i = 0; i < pat->pat_nr; i++)
159 pfn_array_unpin_free(pat->pat_pa + i, mdev);
160
161 if (pat->pat_nr) {
162 kfree(pat->pat_pa);
163 pat->pat_pa = NULL;
164 pat->pat_nr = 0;
165 }
166}
167
168static bool pfn_array_table_iova_pinned(struct pfn_array_table *pat,
169 unsigned long iova)
170{
171 struct pfn_array *pa = pat->pat_pa;
172 unsigned long iova_pfn = iova >> PAGE_SHIFT;
173 int i, j;
174
175 for (i = 0; i < pat->pat_nr; i++, pa++)
176 for (j = 0; j < pa->pa_nr; j++)
177 if (pa->pa_iova_pfn[i] == iova_pfn)
178 return true;
179
180 return false;
181}
182/* Create the list idal words for a pfn_array_table. */
183static inline void pfn_array_table_idal_create_words(
184 struct pfn_array_table *pat,
185 unsigned long *idaws)
186{
187 struct pfn_array *pa;
188 int i, j, k;
189
190 /*
191 * Idal words (execept the first one) rely on the memory being 4k
192 * aligned. If a user virtual address is 4K aligned, then it's
193 * corresponding kernel physical address will also be 4K aligned. Thus
194 * there will be no problem here to simply use the phys to create an
195 * idaw.
196 */
197 k = 0;
198 for (i = 0; i < pat->pat_nr; i++) {
199 pa = pat->pat_pa + i;
200 for (j = 0; j < pa->pa_nr; j++) {
201 idaws[k] = pa->pa_pfn[j] << PAGE_SHIFT;
202 if (k == 0)
203 idaws[k] += pa->pa_iova & (PAGE_SIZE - 1);
204 k++;
205 }
206 }
207}
208
209
210/*
211 * Within the domain (@mdev), copy @n bytes from a guest physical
212 * address (@iova) to a host physical address (@to).
213 */
214static long copy_from_iova(struct device *mdev,
215 void *to, u64 iova,
216 unsigned long n)
217{
218 struct pfn_array pa = {0};
219 u64 from;
220 int i, ret;
221 unsigned long l, m;
222
223 ret = pfn_array_alloc_pin(&pa, mdev, iova, n);
224 if (ret <= 0)
225 return ret;
226
227 l = n;
228 for (i = 0; i < pa.pa_nr; i++) {
229 from = pa.pa_pfn[i] << PAGE_SHIFT;
230 m = PAGE_SIZE;
231 if (i == 0) {
232 from += iova & (PAGE_SIZE - 1);
233 m -= iova & (PAGE_SIZE - 1);
234 }
235
236 m = min(l, m);
237 memcpy(to + (n - l), (void *)from, m);
238
239 l -= m;
240 if (l == 0)
241 break;
242 }
243
244 pfn_array_unpin_free(&pa, mdev);
245
246 return l;
247}
248
249static long copy_ccw_from_iova(struct channel_program *cp,
250 struct ccw1 *to, u64 iova,
251 unsigned long len)
252{
Dong Jia Shid686f212017-03-17 04:17:42 +0100253 struct ccw0 ccw0;
254 struct ccw1 *pccw1;
255 int ret;
256 int i;
257
258 ret = copy_from_iova(cp->mdev, to, iova, len * sizeof(struct ccw1));
259 if (ret)
260 return ret;
261
262 if (!cp->orb.cmd.fmt) {
263 pccw1 = to;
264 for (i = 0; i < len; i++) {
265 ccw0 = *(struct ccw0 *)pccw1;
266 if ((pccw1->cmd_code & 0x0f) == CCW_CMD_TIC) {
267 pccw1->cmd_code = CCW_CMD_TIC;
268 pccw1->flags = 0;
269 pccw1->count = 0;
270 } else {
271 pccw1->cmd_code = ccw0.cmd_code;
272 pccw1->flags = ccw0.flags;
273 pccw1->count = ccw0.count;
274 }
275 pccw1->cda = ccw0.cda;
276 pccw1++;
277 }
278 }
279
280 return ret;
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100281}
282
283/*
284 * Helpers to operate ccwchain.
285 */
286#define ccw_is_test(_ccw) (((_ccw)->cmd_code & 0x0F) == 0)
287
288#define ccw_is_noop(_ccw) ((_ccw)->cmd_code == CCW_CMD_NOOP)
289
290#define ccw_is_tic(_ccw) ((_ccw)->cmd_code == CCW_CMD_TIC)
291
292#define ccw_is_idal(_ccw) ((_ccw)->flags & CCW_FLAG_IDA)
293
294
295#define ccw_is_chain(_ccw) ((_ccw)->flags & (CCW_FLAG_CC | CCW_FLAG_DC))
296
297static struct ccwchain *ccwchain_alloc(struct channel_program *cp, int len)
298{
299 struct ccwchain *chain;
300 void *data;
301 size_t size;
302
303 /* Make ccw address aligned to 8. */
304 size = ((sizeof(*chain) + 7L) & -8L) +
305 sizeof(*chain->ch_ccw) * len +
306 sizeof(*chain->ch_pat) * len;
307 chain = kzalloc(size, GFP_DMA | GFP_KERNEL);
308 if (!chain)
309 return NULL;
310
311 data = (u8 *)chain + ((sizeof(*chain) + 7L) & -8L);
312 chain->ch_ccw = (struct ccw1 *)data;
313
314 data = (u8 *)(chain->ch_ccw) + sizeof(*chain->ch_ccw) * len;
315 chain->ch_pat = (struct pfn_array_table *)data;
316
317 chain->ch_len = len;
318
319 list_add_tail(&chain->next, &cp->ccwchain_list);
320
321 return chain;
322}
323
324static void ccwchain_free(struct ccwchain *chain)
325{
326 list_del(&chain->next);
327 kfree(chain);
328}
329
330/* Free resource for a ccw that allocated memory for its cda. */
331static void ccwchain_cda_free(struct ccwchain *chain, int idx)
332{
333 struct ccw1 *ccw = chain->ch_ccw + idx;
334
335 if (!ccw->count)
336 return;
337
338 kfree((void *)(u64)ccw->cda);
339}
340
341/* Unpin the pages then free the memory resources. */
342static void cp_unpin_free(struct channel_program *cp)
343{
344 struct ccwchain *chain, *temp;
345 int i;
346
347 list_for_each_entry_safe(chain, temp, &cp->ccwchain_list, next) {
348 for (i = 0; i < chain->ch_len; i++) {
349 pfn_array_table_unpin_free(chain->ch_pat + i,
350 cp->mdev);
351 ccwchain_cda_free(chain, i);
352 }
353 ccwchain_free(chain);
354 }
355}
356
357/**
358 * ccwchain_calc_length - calculate the length of the ccw chain.
359 * @iova: guest physical address of the target ccw chain
360 * @cp: channel_program on which to perform the operation
361 *
362 * This is the chain length not considering any TICs.
363 * You need to do a new round for each TIC target.
364 *
365 * Returns: the length of the ccw chain or -errno.
366 */
367static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
368{
369 struct ccw1 *ccw, *p;
370 int cnt;
371
372 /*
373 * Copy current chain from guest to host kernel.
374 * Currently the chain length is limited to CCWCHAIN_LEN_MAX (256).
375 * So copying 2K is enough (safe).
376 */
377 p = ccw = kcalloc(CCWCHAIN_LEN_MAX, sizeof(*ccw), GFP_KERNEL);
378 if (!ccw)
379 return -ENOMEM;
380
381 cnt = copy_ccw_from_iova(cp, ccw, iova, CCWCHAIN_LEN_MAX);
382 if (cnt) {
383 kfree(ccw);
384 return cnt;
385 }
386
387 cnt = 0;
388 do {
389 cnt++;
390
391 if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
392 break;
393
394 ccw++;
395 } while (cnt < CCWCHAIN_LEN_MAX + 1);
396
397 if (cnt == CCWCHAIN_LEN_MAX + 1)
398 cnt = -EINVAL;
399
400 kfree(p);
401 return cnt;
402}
403
404static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
405{
406 struct ccwchain *chain;
407 u32 ccw_head, ccw_tail;
408
409 list_for_each_entry(chain, &cp->ccwchain_list, next) {
410 ccw_head = chain->ch_iova;
411 ccw_tail = ccw_head + (chain->ch_len - 1) * sizeof(struct ccw1);
412
413 if ((ccw_head <= tic->cda) && (tic->cda <= ccw_tail))
414 return 1;
415 }
416
417 return 0;
418}
419
420static int ccwchain_loop_tic(struct ccwchain *chain,
421 struct channel_program *cp);
422
423static int ccwchain_handle_tic(struct ccw1 *tic, struct channel_program *cp)
424{
425 struct ccwchain *chain;
426 int len, ret;
427
428 /* May transfer to an existing chain. */
429 if (tic_target_chain_exists(tic, cp))
430 return 0;
431
432 /* Get chain length. */
433 len = ccwchain_calc_length(tic->cda, cp);
434 if (len < 0)
435 return len;
436
437 /* Need alloc a new chain for this one. */
438 chain = ccwchain_alloc(cp, len);
439 if (!chain)
440 return -ENOMEM;
441 chain->ch_iova = tic->cda;
442
443 /* Copy the new chain from user. */
444 ret = copy_ccw_from_iova(cp, chain->ch_ccw, tic->cda, len);
445 if (ret) {
446 ccwchain_free(chain);
447 return ret;
448 }
449
450 /* Loop for tics on this new chain. */
451 return ccwchain_loop_tic(chain, cp);
452}
453
454/* Loop for TICs. */
455static int ccwchain_loop_tic(struct ccwchain *chain, struct channel_program *cp)
456{
457 struct ccw1 *tic;
458 int i, ret;
459
460 for (i = 0; i < chain->ch_len; i++) {
461 tic = chain->ch_ccw + i;
462
463 if (!ccw_is_tic(tic))
464 continue;
465
466 ret = ccwchain_handle_tic(tic, cp);
467 if (ret)
468 return ret;
469 }
470
471 return 0;
472}
473
474static int ccwchain_fetch_tic(struct ccwchain *chain,
475 int idx,
476 struct channel_program *cp)
477{
478 struct ccw1 *ccw = chain->ch_ccw + idx;
479 struct ccwchain *iter;
480 u32 ccw_head, ccw_tail;
481
482 list_for_each_entry(iter, &cp->ccwchain_list, next) {
483 ccw_head = iter->ch_iova;
484 ccw_tail = ccw_head + (iter->ch_len - 1) * sizeof(struct ccw1);
485
486 if ((ccw_head <= ccw->cda) && (ccw->cda <= ccw_tail)) {
Jason J. Hernec3893772017-07-21 03:14:36 +0200487 ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100488 (ccw->cda - ccw_head));
489 return 0;
490 }
491 }
492
493 return -EFAULT;
494}
495
496static int ccwchain_fetch_direct(struct ccwchain *chain,
497 int idx,
498 struct channel_program *cp)
499{
500 struct ccw1 *ccw;
501 struct pfn_array_table *pat;
502 unsigned long *idaws;
503 int idaw_nr;
504
505 ccw = chain->ch_ccw + idx;
506
Dong Jia Shi4cebc5d2017-10-11 04:38:22 +0200507 if (!ccw->count) {
508 /*
509 * We just want the translation result of any direct ccw
510 * to be an IDA ccw, so let's add the IDA flag for it.
511 * Although the flag will be ignored by firmware.
512 */
513 ccw->flags |= CCW_FLAG_IDA;
514 return 0;
515 }
516
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100517 /*
518 * Pin data page(s) in memory.
519 * The number of pages actually is the count of the idaws which will be
520 * needed when translating a direct ccw to a idal ccw.
521 */
522 pat = chain->ch_pat + idx;
523 if (pfn_array_table_init(pat, 1))
524 return -ENOMEM;
525 idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
526 ccw->cda, ccw->count);
527 if (idaw_nr < 0)
528 return idaw_nr;
529
530 /* Translate this direct ccw to a idal ccw. */
531 idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
532 if (!idaws) {
533 pfn_array_table_unpin_free(pat, cp->mdev);
534 return -ENOMEM;
535 }
536 ccw->cda = (__u32) virt_to_phys(idaws);
537 ccw->flags |= CCW_FLAG_IDA;
538
539 pfn_array_table_idal_create_words(pat, idaws);
540
541 return 0;
542}
543
544static int ccwchain_fetch_idal(struct ccwchain *chain,
545 int idx,
546 struct channel_program *cp)
547{
548 struct ccw1 *ccw;
549 struct pfn_array_table *pat;
550 unsigned long *idaws;
551 u64 idaw_iova;
552 unsigned int idaw_nr, idaw_len;
553 int i, ret;
554
555 ccw = chain->ch_ccw + idx;
556
Dong Jia Shi4cebc5d2017-10-11 04:38:22 +0200557 if (!ccw->count)
558 return 0;
559
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100560 /* Calculate size of idaws. */
561 ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova));
562 if (ret)
563 return ret;
564 idaw_nr = idal_nr_words((void *)(idaw_iova), ccw->count);
565 idaw_len = idaw_nr * sizeof(*idaws);
566
567 /* Pin data page(s) in memory. */
568 pat = chain->ch_pat + idx;
569 ret = pfn_array_table_init(pat, idaw_nr);
570 if (ret)
571 return ret;
572
573 /* Translate idal ccw to use new allocated idaws. */
574 idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
575 if (!idaws) {
576 ret = -ENOMEM;
577 goto out_unpin;
578 }
579
580 ret = copy_from_iova(cp->mdev, idaws, ccw->cda, idaw_len);
581 if (ret)
582 goto out_free_idaws;
583
584 ccw->cda = virt_to_phys(idaws);
585
586 for (i = 0; i < idaw_nr; i++) {
587 idaw_iova = *(idaws + i);
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100588
589 ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev,
590 idaw_iova, 1);
591 if (ret < 0)
592 goto out_free_idaws;
593 }
594
595 pfn_array_table_idal_create_words(pat, idaws);
596
597 return 0;
598
599out_free_idaws:
600 kfree(idaws);
601out_unpin:
602 pfn_array_table_unpin_free(pat, cp->mdev);
603 return ret;
604}
605
606/*
607 * Fetch one ccw.
608 * To reduce memory copy, we'll pin the cda page in memory,
609 * and to get rid of the cda 2G limitiaion of ccw1, we'll translate
610 * direct ccws to idal ccws.
611 */
612static int ccwchain_fetch_one(struct ccwchain *chain,
613 int idx,
614 struct channel_program *cp)
615{
616 struct ccw1 *ccw = chain->ch_ccw + idx;
617
618 if (ccw_is_test(ccw) || ccw_is_noop(ccw))
619 return 0;
620
621 if (ccw_is_tic(ccw))
622 return ccwchain_fetch_tic(chain, idx, cp);
623
624 if (ccw_is_idal(ccw))
625 return ccwchain_fetch_idal(chain, idx, cp);
626
627 return ccwchain_fetch_direct(chain, idx, cp);
628}
629
630/**
631 * cp_init() - allocate ccwchains for a channel program.
632 * @cp: channel_program on which to perform the operation
633 * @mdev: the mediated device to perform pin/unpin operations
634 * @orb: control block for the channel program from the guest
635 *
636 * This creates one or more ccwchain(s), and copies the raw data of
637 * the target channel program from @orb->cmd.iova to the new ccwchain(s).
638 *
639 * Limitations:
640 * 1. Supports only prefetch enabled mode.
641 * 2. Supports idal(c64) ccw chaining.
642 * 3. Supports 4k idaw.
643 *
644 * Returns:
645 * %0 on success and a negative error value on failure.
646 */
647int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
648{
649 u64 iova = orb->cmd.cpa;
650 struct ccwchain *chain;
651 int len, ret;
652
653 /*
654 * XXX:
655 * Only support prefetch enable mode now.
656 * Only support 64bit addressing idal.
657 * Only support 4k IDAW.
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100658 */
Dong Jia Shid686f212017-03-17 04:17:42 +0100659 if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
Dong Jia Shi0a19e612017-03-17 04:17:32 +0100660 return -EOPNOTSUPP;
661
662 INIT_LIST_HEAD(&cp->ccwchain_list);
663 memcpy(&cp->orb, orb, sizeof(*orb));
664 cp->mdev = mdev;
665
666 /* Get chain length. */
667 len = ccwchain_calc_length(iova, cp);
668 if (len < 0)
669 return len;
670
671 /* Alloc mem for the head chain. */
672 chain = ccwchain_alloc(cp, len);
673 if (!chain)
674 return -ENOMEM;
675 chain->ch_iova = iova;
676
677 /* Copy the head chain from guest. */
678 ret = copy_ccw_from_iova(cp, chain->ch_ccw, iova, len);
679 if (ret) {
680 ccwchain_free(chain);
681 return ret;
682 }
683
684 /* Now loop for its TICs. */
685 ret = ccwchain_loop_tic(chain, cp);
686 if (ret)
687 cp_unpin_free(cp);
688
689 return ret;
690}
691
692
693/**
694 * cp_free() - free resources for channel program.
695 * @cp: channel_program on which to perform the operation
696 *
697 * This unpins the memory pages and frees the memory space occupied by
698 * @cp, which must have been returned by a previous call to cp_init().
699 * Otherwise, undefined behavior occurs.
700 */
701void cp_free(struct channel_program *cp)
702{
703 cp_unpin_free(cp);
704}
705
706/**
707 * cp_prefetch() - translate a guest physical address channel program to
708 * a real-device runnable channel program.
709 * @cp: channel_program on which to perform the operation
710 *
711 * This function translates the guest-physical-address channel program
712 * and stores the result to ccwchain list. @cp must have been
713 * initialized by a previous call with cp_init(). Otherwise, undefined
714 * behavior occurs.
715 *
716 * The S/390 CCW Translation APIS (prefixed by 'cp_') are introduced
717 * as helpers to do ccw chain translation inside the kernel. Basically
718 * they accept a channel program issued by a virtual machine, and
719 * translate the channel program to a real-device runnable channel
720 * program.
721 *
722 * These APIs will copy the ccws into kernel-space buffers, and update
723 * the guest phsical addresses with their corresponding host physical
724 * addresses. Then channel I/O device drivers could issue the
725 * translated channel program to real devices to perform an I/O
726 * operation.
727 *
728 * These interfaces are designed to support translation only for
729 * channel programs, which are generated and formatted by a
730 * guest. Thus this will make it possible for things like VFIO to
731 * leverage the interfaces to passthrough a channel I/O mediated
732 * device in QEMU.
733 *
734 * We support direct ccw chaining by translating them to idal ccws.
735 *
736 * Returns:
737 * %0 on success and a negative error value on failure.
738 */
739int cp_prefetch(struct channel_program *cp)
740{
741 struct ccwchain *chain;
742 int len, idx, ret;
743
744 list_for_each_entry(chain, &cp->ccwchain_list, next) {
745 len = chain->ch_len;
746 for (idx = 0; idx < len; idx++) {
747 ret = ccwchain_fetch_one(chain, idx, cp);
748 if (ret)
749 return ret;
750 }
751 }
752
753 return 0;
754}
755
756/**
757 * cp_get_orb() - get the orb of the channel program
758 * @cp: channel_program on which to perform the operation
759 * @intparm: new intparm for the returned orb
760 * @lpm: candidate value of the logical-path mask for the returned orb
761 *
762 * This function returns the address of the updated orb of the channel
763 * program. Channel I/O device drivers could use this orb to issue a
764 * ssch.
765 */
766union orb *cp_get_orb(struct channel_program *cp, u32 intparm, u8 lpm)
767{
768 union orb *orb;
769 struct ccwchain *chain;
770 struct ccw1 *cpa;
771
772 orb = &cp->orb;
773
774 orb->cmd.intparm = intparm;
775 orb->cmd.fmt = 1;
776 orb->cmd.key = PAGE_DEFAULT_KEY >> 4;
777
778 if (orb->cmd.lpm == 0)
779 orb->cmd.lpm = lpm;
780
781 chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
782 cpa = chain->ch_ccw;
783 orb->cmd.cpa = (__u32) __pa(cpa);
784
785 return orb;
786}
787
788/**
789 * cp_update_scsw() - update scsw for a channel program.
790 * @cp: channel_program on which to perform the operation
791 * @scsw: I/O results of the channel program and also the target to be
792 * updated
793 *
794 * @scsw contains the I/O results of the channel program that pointed
795 * to by @cp. However what @scsw->cpa stores is a host physical
796 * address, which is meaningless for the guest, which is waiting for
797 * the I/O results.
798 *
799 * This function updates @scsw->cpa to its coressponding guest physical
800 * address.
801 */
802void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
803{
804 struct ccwchain *chain;
805 u32 cpa = scsw->cmd.cpa;
806 u32 ccw_head, ccw_tail;
807
808 /*
809 * LATER:
810 * For now, only update the cmd.cpa part. We may need to deal with
811 * other portions of the schib as well, even if we don't return them
812 * in the ioctl directly. Path status changes etc.
813 */
814 list_for_each_entry(chain, &cp->ccwchain_list, next) {
815 ccw_head = (u32)(u64)chain->ch_ccw;
816 ccw_tail = (u32)(u64)(chain->ch_ccw + chain->ch_len - 1);
817
818 if ((ccw_head <= cpa) && (cpa <= ccw_tail)) {
819 /*
820 * (cpa - ccw_head) is the offset value of the host
821 * physical ccw to its chain head.
822 * Adding this value to the guest physical ccw chain
823 * head gets us the guest cpa.
824 */
825 cpa = chain->ch_iova + (cpa - ccw_head);
826 break;
827 }
828 }
829
830 scsw->cmd.cpa = cpa;
831}
832
833/**
834 * cp_iova_pinned() - check if an iova is pinned for a ccw chain.
835 * @cmd: ccwchain command on which to perform the operation
836 * @iova: the iova to check
837 *
838 * If the @iova is currently pinned for the ccw chain, return true;
839 * else return false.
840 */
841bool cp_iova_pinned(struct channel_program *cp, u64 iova)
842{
843 struct ccwchain *chain;
844 int i;
845
846 list_for_each_entry(chain, &cp->ccwchain_list, next) {
847 for (i = 0; i < chain->ch_len; i++)
848 if (pfn_array_table_iova_pinned(chain->ch_pat + i,
849 iova))
850 return true;
851 }
852
853 return false;
854}