blob: 3498cbcc7542752af3d42c68d886c151fdde79df [file] [log] [blame]
Andy Grover7875e182009-02-24 15:30:26 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover7875e182009-02-24 15:30:26 +000035
36#include "rds.h"
37#include "rdma.h"
38
39static DECLARE_WAIT_QUEUE_HEAD(rds_message_flush_waitq);
40
41static unsigned int rds_exthdr_size[__RDS_EXTHDR_MAX] = {
42[RDS_EXTHDR_NONE] = 0,
43[RDS_EXTHDR_VERSION] = sizeof(struct rds_ext_header_version),
44[RDS_EXTHDR_RDMA] = sizeof(struct rds_ext_header_rdma),
45[RDS_EXTHDR_RDMA_DEST] = sizeof(struct rds_ext_header_rdma_dest),
46};
47
48
49void rds_message_addref(struct rds_message *rm)
50{
51 rdsdebug("addref rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
52 atomic_inc(&rm->m_refcount);
53}
Andy Grover616b7572009-08-21 12:28:32 +000054EXPORT_SYMBOL_GPL(rds_message_addref);
Andy Grover7875e182009-02-24 15:30:26 +000055
56/*
57 * This relies on dma_map_sg() not touching sg[].page during merging.
58 */
59static void rds_message_purge(struct rds_message *rm)
60{
61 unsigned long i;
62
63 if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
64 return;
65
Andy Grovere7791372010-01-12 12:15:02 -080066 for (i = 0; i < rm->data.m_nents; i++) {
67 rdsdebug("putting data page %p\n", (void *)sg_page(&rm->data.m_sg[i]));
Andy Grover7875e182009-02-24 15:30:26 +000068 /* XXX will have to put_page for page refs */
Andy Grovere7791372010-01-12 12:15:02 -080069 __free_page(sg_page(&rm->data.m_sg[i]));
Andy Grover7875e182009-02-24 15:30:26 +000070 }
Andy Grovere7791372010-01-12 12:15:02 -080071 rm->data.m_nents = 0;
Andy Grover7875e182009-02-24 15:30:26 +000072
Andy Grovere7791372010-01-12 12:15:02 -080073 if (rm->rdma.m_rdma_op)
74 rds_rdma_free_op(rm->rdma.m_rdma_op);
75 if (rm->rdma.m_rdma_mr)
76 rds_mr_put(rm->rdma.m_rdma_mr);
Andy Grover7875e182009-02-24 15:30:26 +000077}
78
79void rds_message_inc_purge(struct rds_incoming *inc)
80{
81 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
82 rds_message_purge(rm);
83}
84
85void rds_message_put(struct rds_message *rm)
86{
87 rdsdebug("put rm %p ref %d\n", rm, atomic_read(&rm->m_refcount));
88
89 if (atomic_dec_and_test(&rm->m_refcount)) {
90 BUG_ON(!list_empty(&rm->m_sock_item));
91 BUG_ON(!list_empty(&rm->m_conn_item));
92 rds_message_purge(rm);
93
94 kfree(rm);
95 }
96}
Andy Grover616b7572009-08-21 12:28:32 +000097EXPORT_SYMBOL_GPL(rds_message_put);
Andy Grover7875e182009-02-24 15:30:26 +000098
99void rds_message_inc_free(struct rds_incoming *inc)
100{
101 struct rds_message *rm = container_of(inc, struct rds_message, m_inc);
102 rds_message_put(rm);
103}
104
105void rds_message_populate_header(struct rds_header *hdr, __be16 sport,
106 __be16 dport, u64 seq)
107{
108 hdr->h_flags = 0;
109 hdr->h_sport = sport;
110 hdr->h_dport = dport;
111 hdr->h_sequence = cpu_to_be64(seq);
112 hdr->h_exthdr[0] = RDS_EXTHDR_NONE;
113}
Andy Grover616b7572009-08-21 12:28:32 +0000114EXPORT_SYMBOL_GPL(rds_message_populate_header);
Andy Grover7875e182009-02-24 15:30:26 +0000115
116int rds_message_add_extension(struct rds_header *hdr,
117 unsigned int type, const void *data, unsigned int len)
118{
119 unsigned int ext_len = sizeof(u8) + len;
120 unsigned char *dst;
121
122 /* For now, refuse to add more than one extension header */
123 if (hdr->h_exthdr[0] != RDS_EXTHDR_NONE)
124 return 0;
125
Joe Perchesf64f9e72009-11-29 16:55:45 -0800126 if (type >= __RDS_EXTHDR_MAX || len != rds_exthdr_size[type])
Andy Grover7875e182009-02-24 15:30:26 +0000127 return 0;
128
129 if (ext_len >= RDS_HEADER_EXT_SPACE)
130 return 0;
131 dst = hdr->h_exthdr;
132
133 *dst++ = type;
134 memcpy(dst, data, len);
135
136 dst[len] = RDS_EXTHDR_NONE;
137 return 1;
138}
Andy Grover616b7572009-08-21 12:28:32 +0000139EXPORT_SYMBOL_GPL(rds_message_add_extension);
Andy Grover7875e182009-02-24 15:30:26 +0000140
141/*
142 * If a message has extension headers, retrieve them here.
143 * Call like this:
144 *
145 * unsigned int pos = 0;
146 *
147 * while (1) {
148 * buflen = sizeof(buffer);
149 * type = rds_message_next_extension(hdr, &pos, buffer, &buflen);
150 * if (type == RDS_EXTHDR_NONE)
151 * break;
152 * ...
153 * }
154 */
155int rds_message_next_extension(struct rds_header *hdr,
156 unsigned int *pos, void *buf, unsigned int *buflen)
157{
158 unsigned int offset, ext_type, ext_len;
159 u8 *src = hdr->h_exthdr;
160
161 offset = *pos;
162 if (offset >= RDS_HEADER_EXT_SPACE)
163 goto none;
164
165 /* Get the extension type and length. For now, the
166 * length is implied by the extension type. */
167 ext_type = src[offset++];
168
169 if (ext_type == RDS_EXTHDR_NONE || ext_type >= __RDS_EXTHDR_MAX)
170 goto none;
171 ext_len = rds_exthdr_size[ext_type];
172 if (offset + ext_len > RDS_HEADER_EXT_SPACE)
173 goto none;
174
175 *pos = offset + ext_len;
176 if (ext_len < *buflen)
177 *buflen = ext_len;
178 memcpy(buf, src + offset, *buflen);
179 return ext_type;
180
181none:
182 *pos = RDS_HEADER_EXT_SPACE;
183 *buflen = 0;
184 return RDS_EXTHDR_NONE;
185}
186
187int rds_message_add_version_extension(struct rds_header *hdr, unsigned int version)
188{
189 struct rds_ext_header_version ext_hdr;
190
191 ext_hdr.h_version = cpu_to_be32(version);
192 return rds_message_add_extension(hdr, RDS_EXTHDR_VERSION, &ext_hdr, sizeof(ext_hdr));
193}
194
195int rds_message_get_version_extension(struct rds_header *hdr, unsigned int *version)
196{
197 struct rds_ext_header_version ext_hdr;
198 unsigned int pos = 0, len = sizeof(ext_hdr);
199
200 /* We assume the version extension is the only one present */
201 if (rds_message_next_extension(hdr, &pos, &ext_hdr, &len) != RDS_EXTHDR_VERSION)
202 return 0;
203 *version = be32_to_cpu(ext_hdr.h_version);
204 return 1;
205}
206
207int rds_message_add_rdma_dest_extension(struct rds_header *hdr, u32 r_key, u32 offset)
208{
209 struct rds_ext_header_rdma_dest ext_hdr;
210
211 ext_hdr.h_rdma_rkey = cpu_to_be32(r_key);
212 ext_hdr.h_rdma_offset = cpu_to_be32(offset);
213 return rds_message_add_extension(hdr, RDS_EXTHDR_RDMA_DEST, &ext_hdr, sizeof(ext_hdr));
214}
Andy Grover616b7572009-08-21 12:28:32 +0000215EXPORT_SYMBOL_GPL(rds_message_add_rdma_dest_extension);
Andy Grover7875e182009-02-24 15:30:26 +0000216
Andy Groverfc445082010-01-12 12:56:06 -0800217/*
218 * Each rds_message is allocated with extra space for the scatterlist entries
219 * rds ops will need. This is to minimize memory allocation count. Then, each rds op
220 * can grab SGs when initializing its part of the rds_message.
221 */
222struct rds_message *rds_message_alloc(unsigned int extra_len, gfp_t gfp)
Andy Grover7875e182009-02-24 15:30:26 +0000223{
224 struct rds_message *rm;
225
Andy Groverfc445082010-01-12 12:56:06 -0800226 rm = kzalloc(sizeof(struct rds_message) + extra_len, gfp);
Andy Grover7875e182009-02-24 15:30:26 +0000227 if (!rm)
228 goto out;
229
Andy Groverfc445082010-01-12 12:56:06 -0800230 rm->m_used_sgs = 0;
231 rm->m_total_sgs = extra_len / sizeof(struct scatterlist);
232
Andy Grover7875e182009-02-24 15:30:26 +0000233 atomic_set(&rm->m_refcount, 1);
234 INIT_LIST_HEAD(&rm->m_sock_item);
235 INIT_LIST_HEAD(&rm->m_conn_item);
236 spin_lock_init(&rm->m_rs_lock);
237
238out:
239 return rm;
240}
241
Andy Groverfc445082010-01-12 12:56:06 -0800242/*
243 * RDS ops use this to grab SG entries from the rm's sg pool.
244 */
245struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents)
246{
247 struct scatterlist *sg_first = (struct scatterlist *) &rm[1];
248 struct scatterlist *sg_ret;
249
250 WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs);
251
252 sg_ret = &sg_first[rm->m_used_sgs];
253
254 rm->m_used_sgs += nents;
255
256 return sg_ret;
257}
258
Andy Grover7875e182009-02-24 15:30:26 +0000259struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
260{
261 struct rds_message *rm;
262 unsigned int i;
263
264 rm = rds_message_alloc(ceil(total_len, PAGE_SIZE), GFP_KERNEL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800265 if (!rm)
Andy Grover7875e182009-02-24 15:30:26 +0000266 return ERR_PTR(-ENOMEM);
267
268 set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
269 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
Andy Grovere7791372010-01-12 12:15:02 -0800270 rm->data.m_nents = ceil(total_len, PAGE_SIZE);
Andy Grover7875e182009-02-24 15:30:26 +0000271
Andy Grovere7791372010-01-12 12:15:02 -0800272 for (i = 0; i < rm->data.m_nents; ++i) {
273 sg_set_page(&rm->data.m_sg[i],
Andy Grover7875e182009-02-24 15:30:26 +0000274 virt_to_page(page_addrs[i]),
275 PAGE_SIZE, 0);
276 }
277
278 return rm;
279}
280
Andy Groverfc445082010-01-12 12:56:06 -0800281int rds_message_copy_from_user(struct rds_message *rm, struct iovec *first_iov,
Andy Grover7875e182009-02-24 15:30:26 +0000282 size_t total_len)
283{
284 unsigned long to_copy;
285 unsigned long iov_off;
286 unsigned long sg_off;
Andy Grover7875e182009-02-24 15:30:26 +0000287 struct iovec *iov;
288 struct scatterlist *sg;
Andy Groverfc445082010-01-12 12:56:06 -0800289 int ret = 0;
Andy Grover7875e182009-02-24 15:30:26 +0000290
291 rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
292
293 /*
294 * now allocate and copy in the data payload.
295 */
Andy Grovere7791372010-01-12 12:15:02 -0800296 sg = rm->data.m_sg;
Andy Grover7875e182009-02-24 15:30:26 +0000297 iov = first_iov;
298 iov_off = 0;
299 sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */
300
301 while (total_len) {
Andy Grover8690bfa2010-01-12 11:56:44 -0800302 if (!sg_page(sg)) {
Andy Grover7875e182009-02-24 15:30:26 +0000303 ret = rds_page_remainder_alloc(sg, total_len,
304 GFP_HIGHUSER);
305 if (ret)
306 goto out;
Andy Grovere7791372010-01-12 12:15:02 -0800307 rm->data.m_nents++;
Andy Grover7875e182009-02-24 15:30:26 +0000308 sg_off = 0;
309 }
310
311 while (iov_off == iov->iov_len) {
312 iov_off = 0;
313 iov++;
314 }
315
316 to_copy = min(iov->iov_len - iov_off, sg->length - sg_off);
317 to_copy = min_t(size_t, to_copy, total_len);
318
319 rdsdebug("copying %lu bytes from user iov [%p, %zu] + %lu to "
320 "sg [%p, %u, %u] + %lu\n",
321 to_copy, iov->iov_base, iov->iov_len, iov_off,
322 (void *)sg_page(sg), sg->offset, sg->length, sg_off);
323
324 ret = rds_page_copy_from_user(sg_page(sg), sg->offset + sg_off,
325 iov->iov_base + iov_off,
326 to_copy);
327 if (ret)
328 goto out;
329
330 iov_off += to_copy;
331 total_len -= to_copy;
332 sg_off += to_copy;
333
334 if (sg_off == sg->length)
335 sg++;
336 }
337
Andy Grover7875e182009-02-24 15:30:26 +0000338out:
Andy Groverfc445082010-01-12 12:56:06 -0800339 return ret;
Andy Grover7875e182009-02-24 15:30:26 +0000340}
341
342int rds_message_inc_copy_to_user(struct rds_incoming *inc,
343 struct iovec *first_iov, size_t size)
344{
345 struct rds_message *rm;
346 struct iovec *iov;
347 struct scatterlist *sg;
348 unsigned long to_copy;
349 unsigned long iov_off;
350 unsigned long vec_off;
351 int copied;
352 int ret;
353 u32 len;
354
355 rm = container_of(inc, struct rds_message, m_inc);
356 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
357
358 iov = first_iov;
359 iov_off = 0;
Andy Grovere7791372010-01-12 12:15:02 -0800360 sg = rm->data.m_sg;
Andy Grover7875e182009-02-24 15:30:26 +0000361 vec_off = 0;
362 copied = 0;
363
364 while (copied < size && copied < len) {
365 while (iov_off == iov->iov_len) {
366 iov_off = 0;
367 iov++;
368 }
369
370 to_copy = min(iov->iov_len - iov_off, sg->length - vec_off);
371 to_copy = min_t(size_t, to_copy, size - copied);
372 to_copy = min_t(unsigned long, to_copy, len - copied);
373
374 rdsdebug("copying %lu bytes to user iov [%p, %zu] + %lu to "
375 "sg [%p, %u, %u] + %lu\n",
376 to_copy, iov->iov_base, iov->iov_len, iov_off,
377 sg_page(sg), sg->offset, sg->length, vec_off);
378
379 ret = rds_page_copy_to_user(sg_page(sg), sg->offset + vec_off,
380 iov->iov_base + iov_off,
381 to_copy);
382 if (ret) {
383 copied = ret;
384 break;
385 }
386
387 iov_off += to_copy;
388 vec_off += to_copy;
389 copied += to_copy;
390
391 if (vec_off == sg->length) {
392 vec_off = 0;
393 sg++;
394 }
395 }
396
397 return copied;
398}
399
400/*
401 * If the message is still on the send queue, wait until the transport
402 * is done with it. This is particularly important for RDMA operations.
403 */
404void rds_message_wait(struct rds_message *rm)
405{
406 wait_event(rds_message_flush_waitq,
407 !test_bit(RDS_MSG_MAPPED, &rm->m_flags));
408}
409
410void rds_message_unmapped(struct rds_message *rm)
411{
412 clear_bit(RDS_MSG_MAPPED, &rm->m_flags);
413 if (waitqueue_active(&rds_message_flush_waitq))
414 wake_up(&rds_message_flush_waitq);
415}
Andy Grover616b7572009-08-21 12:28:32 +0000416EXPORT_SYMBOL_GPL(rds_message_unmapped);
Andy Grover7875e182009-02-24 15:30:26 +0000417