blob: d47880e971dd534ec92d3fecbed8a50d35649ab7 [file] [log] [blame]
Venkateswararao Jujjuri (JV)022cae32011-01-28 14:11:13 -08001/*
2 * Copyright IBM Corporation, 2010
3 * Author Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 *
13 */
14
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <net/9p/9p.h>
18#include <net/9p/client.h>
19#include <linux/scatterlist.h>
20#include "trans_common.h"
21
22/**
23 * p9_release_req_pages - Release pages after the transaction.
24 * @*private: PDU's private page of struct trans_rpage_info
25 */
26void
27p9_release_req_pages(struct trans_rpage_info *rpinfo)
28{
29 int i = 0;
30
31 while (rpinfo->rp_data[i] && rpinfo->rp_nr_pages--) {
32 put_page(rpinfo->rp_data[i]);
33 i++;
34 }
35}
36EXPORT_SYMBOL(p9_release_req_pages);
37
38/**
Lucas De Marchi25985ed2011-03-30 22:57:33 -030039 * p9_nr_pages - Return number of pages needed to accommodate the payload.
Venkateswararao Jujjuri (JV)022cae32011-01-28 14:11:13 -080040 */
41int
42p9_nr_pages(struct p9_req_t *req)
43{
Aneesh Kumar K.V472e7f92011-03-08 16:39:47 +053044 unsigned long start_page, end_page;
45 start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
46 end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
Venkateswararao Jujjuri (JV)022cae32011-01-28 14:11:13 -080047 PAGE_SIZE - 1) >> PAGE_SHIFT;
48 return end_page - start_page;
49}
50EXPORT_SYMBOL(p9_nr_pages);
51
52/**
53 * payload_gup - Translates user buffer into kernel pages and
54 * pins them either for read/write through get_user_pages_fast().
55 * @req: Request to be sent to server.
56 * @pdata_off: data offset into the first page after translation (gup).
57 * @pdata_len: Total length of the IO. gup may not return requested # of pages.
Lucas De Marchi25985ed2011-03-30 22:57:33 -030058 * @nr_pages: number of pages to accommodate the payload
Venkateswararao Jujjuri (JV)022cae32011-01-28 14:11:13 -080059 * @rw: Indicates if the pages are for read or write.
60 */
61int
62p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
63 int nr_pages, u8 rw)
64{
65 uint32_t first_page_bytes = 0;
66 uint32_t pdata_mapped_pages;
67 struct trans_rpage_info *rpinfo;
68
69 *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
70
71 if (*pdata_off)
Aneesh Kumar K.V472e7f92011-03-08 16:39:47 +053072 first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
73 req->tc->pbuf_size);
Venkateswararao Jujjuri (JV)022cae32011-01-28 14:11:13 -080074
75 rpinfo = req->tc->private;
76 pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
77 nr_pages, rw, &rpinfo->rp_data[0]);
78
79 if (pdata_mapped_pages < 0) {
80 printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p"
81 "nr_pages:%d\n", pdata_mapped_pages,
82 req->tc->pubuf, nr_pages);
83 pdata_mapped_pages = 0;
84 return -EIO;
85 }
86 rpinfo->rp_nr_pages = pdata_mapped_pages;
87 if (*pdata_off) {
88 *pdata_len = first_page_bytes;
89 *pdata_len += min((req->tc->pbuf_size - *pdata_len),
90 ((size_t)pdata_mapped_pages - 1) << PAGE_SHIFT);
91 } else {
92 *pdata_len = min(req->tc->pbuf_size,
93 (size_t)pdata_mapped_pages << PAGE_SHIFT);
94 }
95 return 0;
96}
97EXPORT_SYMBOL(p9_payload_gup);