blob: 5c4590b0c5216ac42ae5d50bed20b20d5b5fb4e0 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2011, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ptlrpc/sec_plain.c
33 *
34 * Author: Eric Mei <ericm@clusterfs.com>
35 */
36
37#define DEBUG_SUBSYSTEM S_SEC
38
Greg Kroah-Hartmane27db142014-07-11 22:29:36 -070039#include "../include/obd_support.h"
40#include "../include/obd_cksum.h"
41#include "../include/obd_class.h"
42#include "../include/lustre_net.h"
43#include "../include/lustre_sec.h"
Anders Fridlund2ced12a2015-09-08 00:56:48 +020044#include "ptlrpc_internal.h"
Peng Taod7e09d02013-05-02 16:46:55 +080045
46struct plain_sec {
47 struct ptlrpc_sec pls_base;
48 rwlock_t pls_lock;
49 struct ptlrpc_cli_ctx *pls_ctx;
50};
51
52static inline struct plain_sec *sec2plsec(struct ptlrpc_sec *sec)
53{
54 return container_of(sec, struct plain_sec, pls_base);
55}
56
57static struct ptlrpc_sec_policy plain_policy;
58static struct ptlrpc_ctx_ops plain_ctx_ops;
59static struct ptlrpc_svc_ctx plain_svc_ctx;
60
61static unsigned int plain_at_offset;
62
63/*
64 * for simplicity, plain policy rpc use fixed layout.
65 */
66#define PLAIN_PACK_SEGMENTS (4)
67
68#define PLAIN_PACK_HDR_OFF (0)
69#define PLAIN_PACK_MSG_OFF (1)
70#define PLAIN_PACK_USER_OFF (2)
71#define PLAIN_PACK_BULK_OFF (3)
72
73#define PLAIN_FL_USER (0x01)
74#define PLAIN_FL_BULK (0x02)
75
76struct plain_header {
77 __u8 ph_ver; /* 0 */
78 __u8 ph_flags;
79 __u8 ph_sp; /* source */
80 __u8 ph_bulk_hash_alg; /* complete flavor desc */
81 __u8 ph_pad[4];
82};
83
84struct plain_bulk_token {
85 __u8 pbt_hash[8];
86};
87
88#define PLAIN_BSD_SIZE \
89 (sizeof(struct ptlrpc_bulk_sec_desc) + sizeof(struct plain_bulk_token))
90
91/****************************************
92 * bulk checksum helpers *
93 ****************************************/
94
95static int plain_unpack_bsd(struct lustre_msg *msg, int swabbed)
96{
97 struct ptlrpc_bulk_sec_desc *bsd;
98
99 if (bulk_sec_desc_unpack(msg, PLAIN_PACK_BULK_OFF, swabbed))
100 return -EPROTO;
101
102 bsd = lustre_msg_buf(msg, PLAIN_PACK_BULK_OFF, PLAIN_BSD_SIZE);
Oleg Drokin8b382082016-02-16 00:46:58 -0500103 if (!bsd) {
Peng Taod7e09d02013-05-02 16:46:55 +0800104 CERROR("bulk sec desc has short size %d\n",
105 lustre_msg_buflen(msg, PLAIN_PACK_BULK_OFF));
106 return -EPROTO;
107 }
108
109 if (bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
110 bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG) {
111 CERROR("invalid bulk svc %u\n", bsd->bsd_svc);
112 return -EPROTO;
113 }
114
115 return 0;
116}
117
118static int plain_generate_bulk_csum(struct ptlrpc_bulk_desc *desc,
119 __u8 hash_alg,
120 struct plain_bulk_token *token)
121{
122 if (hash_alg == BULK_HASH_ALG_NULL)
123 return 0;
124
125 memset(token->pbt_hash, 0, sizeof(token->pbt_hash));
126 return sptlrpc_get_bulk_checksum(desc, hash_alg, token->pbt_hash,
127 sizeof(token->pbt_hash));
128}
129
130static int plain_verify_bulk_csum(struct ptlrpc_bulk_desc *desc,
131 __u8 hash_alg,
132 struct plain_bulk_token *tokenr)
133{
134 struct plain_bulk_token tokenv;
Chris Hannad0bfef32015-06-03 10:28:26 -0400135 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800136
137 if (hash_alg == BULK_HASH_ALG_NULL)
138 return 0;
139
140 memset(&tokenv.pbt_hash, 0, sizeof(tokenv.pbt_hash));
141 rc = sptlrpc_get_bulk_checksum(desc, hash_alg, tokenv.pbt_hash,
142 sizeof(tokenv.pbt_hash));
143 if (rc)
144 return rc;
145
146 if (memcmp(tokenr->pbt_hash, tokenv.pbt_hash, sizeof(tokenr->pbt_hash)))
147 return -EACCES;
148 return 0;
149}
150
151static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
152{
Chris Hannad0bfef32015-06-03 10:28:26 -0400153 char *ptr;
154 unsigned int off, i;
Peng Taod7e09d02013-05-02 16:46:55 +0800155
156 for (i = 0; i < desc->bd_iov_count; i++) {
157 if (desc->bd_iov[i].kiov_len == 0)
158 continue;
159
160 ptr = kmap(desc->bd_iov[i].kiov_page);
Oleg Drokin616387e2016-03-30 19:48:23 -0400161 off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
Peng Taod7e09d02013-05-02 16:46:55 +0800162 ptr[off] ^= 0x1;
163 kunmap(desc->bd_iov[i].kiov_page);
164 return;
165 }
166}
167
168/****************************************
169 * cli_ctx apis *
170 ****************************************/
171
172static
173int plain_ctx_refresh(struct ptlrpc_cli_ctx *ctx)
174{
175 /* should never reach here */
176 LBUG();
177 return 0;
178}
179
180static
181int plain_ctx_validate(struct ptlrpc_cli_ctx *ctx)
182{
183 return 0;
184}
185
186static
187int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
188{
Chris Hannad0bfef32015-06-03 10:28:26 -0400189 struct lustre_msg *msg = req->rq_reqbuf;
Peng Taod7e09d02013-05-02 16:46:55 +0800190 struct plain_header *phdr;
Peng Taod7e09d02013-05-02 16:46:55 +0800191
192 msg->lm_secflvr = req->rq_flvr.sf_rpc;
193
194 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
195 phdr->ph_ver = 0;
196 phdr->ph_flags = 0;
197 phdr->ph_sp = ctx->cc_sec->ps_part;
198 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
199
200 if (req->rq_pack_udesc)
201 phdr->ph_flags |= PLAIN_FL_USER;
202 if (req->rq_pack_bulk)
203 phdr->ph_flags |= PLAIN_FL_BULK;
204
205 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
206 msg->lm_buflens);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800207 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800208}
209
210static
211int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
212{
Chris Hannad0bfef32015-06-03 10:28:26 -0400213 struct lustre_msg *msg = req->rq_repdata;
Peng Taod7e09d02013-05-02 16:46:55 +0800214 struct plain_header *phdr;
Chris Hannad0bfef32015-06-03 10:28:26 -0400215 __u32 cksum;
216 int swabbed;
Peng Taod7e09d02013-05-02 16:46:55 +0800217
218 if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
219 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800220 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800221 }
222
223 swabbed = ptlrpc_rep_need_swab(req);
224
225 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
Oleg Drokin8b382082016-02-16 00:46:58 -0500226 if (!phdr) {
Peng Taod7e09d02013-05-02 16:46:55 +0800227 CERROR("missing plain header\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800228 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800229 }
230
231 if (phdr->ph_ver != 0) {
232 CERROR("Invalid header version\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800233 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800234 }
235
236 /* expect no user desc in reply */
237 if (phdr->ph_flags & PLAIN_FL_USER) {
238 CERROR("Unexpected udesc flag in reply\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800239 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800240 }
241
242 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
243 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
244 req->rq_flvr.u_bulk.hash.hash_alg);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800245 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800246 }
247
248 if (unlikely(req->rq_early)) {
249 unsigned int hsize = 4;
250
251 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
252 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
253 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
254 NULL, 0, (unsigned char *)&cksum, &hsize);
255 if (cksum != msg->lm_cksum) {
256 CDEBUG(D_SEC,
257 "early reply checksum mismatch: %08x != %08x\n",
258 cpu_to_le32(cksum), msg->lm_cksum);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800259 return -EINVAL;
Peng Taod7e09d02013-05-02 16:46:55 +0800260 }
261 } else {
262 /* whether we sent with bulk or not, we expect the same
Oleg Drokindadfcda2016-02-24 22:00:38 -0500263 * in reply, except for early reply
264 */
Peng Taod7e09d02013-05-02 16:46:55 +0800265 if (!req->rq_early &&
266 !equi(req->rq_pack_bulk == 1,
267 phdr->ph_flags & PLAIN_FL_BULK)) {
268 CERROR("%s bulk checksum in reply\n",
269 req->rq_pack_bulk ? "Missing" : "Unexpected");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800270 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800271 }
272
273 if (phdr->ph_flags & PLAIN_FL_BULK) {
274 if (plain_unpack_bsd(msg, swabbed))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800275 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800276 }
277 }
278
279 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
280 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800281 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800282}
283
284static
285int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
286 struct ptlrpc_request *req,
287 struct ptlrpc_bulk_desc *desc)
288{
289 struct ptlrpc_bulk_sec_desc *bsd;
Chris Hannad0bfef32015-06-03 10:28:26 -0400290 struct plain_bulk_token *token;
291 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800292
293 LASSERT(req->rq_pack_bulk);
294 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
295
296 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
Oleg Drokin9797fb02016-06-18 23:53:12 -0400297 token = (struct plain_bulk_token *)bsd->bsd_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800298
299 bsd->bsd_version = 0;
300 bsd->bsd_flags = 0;
301 bsd->bsd_type = SPTLRPC_BULK_DEFAULT;
302 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
303
304 if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800305 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800306
307 if (req->rq_bulk_read)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800308 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800309
310 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
311 token);
312 if (rc) {
313 CERROR("bulk write: failed to compute checksum: %d\n", rc);
314 } else {
315 /*
316 * for sending we only compute the wrong checksum instead
317 * of corrupting the data so it is still correct on a redo
318 */
319 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND) &&
320 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL)
321 token->pbt_hash[0] ^= 0x1;
322 }
323
324 return rc;
325}
326
327static
328int plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
329 struct ptlrpc_request *req,
330 struct ptlrpc_bulk_desc *desc)
331{
332 struct ptlrpc_bulk_sec_desc *bsdv;
Chris Hannad0bfef32015-06-03 10:28:26 -0400333 struct plain_bulk_token *tokenv;
334 int rc;
335 int i, nob;
Peng Taod7e09d02013-05-02 16:46:55 +0800336
337 LASSERT(req->rq_pack_bulk);
338 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS);
339 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS);
340
341 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0);
Oleg Drokin9797fb02016-06-18 23:53:12 -0400342 tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800343
344 if (req->rq_bulk_write) {
345 if (bsdv->bsd_flags & BSD_FL_ERR)
346 return -EIO;
347 return 0;
348 }
349
350 /* fix the actual data size */
351 for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
352 if (desc->bd_iov[i].kiov_len + nob > desc->bd_nob_transferred) {
353 desc->bd_iov[i].kiov_len =
354 desc->bd_nob_transferred - nob;
355 }
356 nob += desc->bd_iov[i].kiov_len;
357 }
358
359 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
360 tokenv);
361 if (rc)
362 CERROR("bulk read: client verify failed: %d\n", rc);
363
364 return rc;
365}
366
367/****************************************
368 * sec apis *
369 ****************************************/
370
371static
372struct ptlrpc_cli_ctx *plain_sec_install_ctx(struct plain_sec *plsec)
373{
Chris Hannad0bfef32015-06-03 10:28:26 -0400374 struct ptlrpc_cli_ctx *ctx, *ctx_new;
Peng Taod7e09d02013-05-02 16:46:55 +0800375
Julia Lawall9ae10592015-05-01 17:51:12 +0200376 ctx_new = kzalloc(sizeof(*ctx_new), GFP_NOFS);
Peng Taod7e09d02013-05-02 16:46:55 +0800377
378 write_lock(&plsec->pls_lock);
379
380 ctx = plsec->pls_ctx;
381 if (ctx) {
382 atomic_inc(&ctx->cc_refcount);
383
Julia Lawall76372382015-05-01 21:37:48 +0200384 kfree(ctx_new);
Peng Taod7e09d02013-05-02 16:46:55 +0800385 } else if (ctx_new) {
386 ctx = ctx_new;
387
388 atomic_set(&ctx->cc_refcount, 1); /* for cache */
389 ctx->cc_sec = &plsec->pls_base;
390 ctx->cc_ops = &plain_ctx_ops;
391 ctx->cc_expire = 0;
392 ctx->cc_flags = PTLRPC_CTX_CACHED | PTLRPC_CTX_UPTODATE;
393 ctx->cc_vcred.vc_uid = 0;
394 spin_lock_init(&ctx->cc_lock);
395 INIT_LIST_HEAD(&ctx->cc_req_list);
396 INIT_LIST_HEAD(&ctx->cc_gc_chain);
397
398 plsec->pls_ctx = ctx;
399 atomic_inc(&plsec->pls_base.ps_nctx);
400 atomic_inc(&plsec->pls_base.ps_refcount);
401
402 atomic_inc(&ctx->cc_refcount); /* for caller */
403 }
404
405 write_unlock(&plsec->pls_lock);
406
407 return ctx;
408}
409
410static
411void plain_destroy_sec(struct ptlrpc_sec *sec)
412{
Chris Hannad0bfef32015-06-03 10:28:26 -0400413 struct plain_sec *plsec = sec2plsec(sec);
Peng Taod7e09d02013-05-02 16:46:55 +0800414
415 LASSERT(sec->ps_policy == &plain_policy);
416 LASSERT(sec->ps_import);
417 LASSERT(atomic_read(&sec->ps_refcount) == 0);
418 LASSERT(atomic_read(&sec->ps_nctx) == 0);
Oleg Drokin8b382082016-02-16 00:46:58 -0500419 LASSERT(!plsec->pls_ctx);
Peng Taod7e09d02013-05-02 16:46:55 +0800420
421 class_import_put(sec->ps_import);
422
Julia Lawall9ae10592015-05-01 17:51:12 +0200423 kfree(plsec);
Peng Taod7e09d02013-05-02 16:46:55 +0800424}
425
426static
427void plain_kill_sec(struct ptlrpc_sec *sec)
428{
429 sec->ps_dying = 1;
430}
431
432static
433struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
434 struct ptlrpc_svc_ctx *svc_ctx,
435 struct sptlrpc_flavor *sf)
436{
Chris Hannad0bfef32015-06-03 10:28:26 -0400437 struct plain_sec *plsec;
438 struct ptlrpc_sec *sec;
439 struct ptlrpc_cli_ctx *ctx;
Peng Taod7e09d02013-05-02 16:46:55 +0800440
441 LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
442
Julia Lawall9ae10592015-05-01 17:51:12 +0200443 plsec = kzalloc(sizeof(*plsec), GFP_NOFS);
Julia Lawall597851a2015-06-20 18:59:10 +0200444 if (!plsec)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800445 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800446
447 /*
448 * initialize plain_sec
449 */
450 rwlock_init(&plsec->pls_lock);
451 plsec->pls_ctx = NULL;
452
453 sec = &plsec->pls_base;
454 sec->ps_policy = &plain_policy;
455 atomic_set(&sec->ps_refcount, 0);
456 atomic_set(&sec->ps_nctx, 0);
457 sec->ps_id = sptlrpc_get_next_secid();
458 sec->ps_import = class_import_get(imp);
459 sec->ps_flvr = *sf;
460 spin_lock_init(&sec->ps_lock);
461 INIT_LIST_HEAD(&sec->ps_gc_list);
462 sec->ps_gc_interval = 0;
463 sec->ps_gc_next = 0;
464
465 /* install ctx immediately if this is a reverse sec */
466 if (svc_ctx) {
467 ctx = plain_sec_install_ctx(plsec);
Oleg Drokin8b382082016-02-16 00:46:58 -0500468 if (!ctx) {
Peng Taod7e09d02013-05-02 16:46:55 +0800469 plain_destroy_sec(sec);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800470 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800471 }
472 sptlrpc_cli_ctx_put(ctx, 1);
473 }
474
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800475 return sec;
Peng Taod7e09d02013-05-02 16:46:55 +0800476}
477
478static
479struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
480 struct vfs_cred *vcred,
481 int create, int remove_dead)
482{
Chris Hannad0bfef32015-06-03 10:28:26 -0400483 struct plain_sec *plsec = sec2plsec(sec);
484 struct ptlrpc_cli_ctx *ctx;
Peng Taod7e09d02013-05-02 16:46:55 +0800485
486 read_lock(&plsec->pls_lock);
487 ctx = plsec->pls_ctx;
488 if (ctx)
489 atomic_inc(&ctx->cc_refcount);
490 read_unlock(&plsec->pls_lock);
491
Oleg Drokin8b382082016-02-16 00:46:58 -0500492 if (unlikely(!ctx))
Peng Taod7e09d02013-05-02 16:46:55 +0800493 ctx = plain_sec_install_ctx(plsec);
494
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800495 return ctx;
Peng Taod7e09d02013-05-02 16:46:55 +0800496}
497
498static
499void plain_release_ctx(struct ptlrpc_sec *sec,
500 struct ptlrpc_cli_ctx *ctx, int sync)
501{
502 LASSERT(atomic_read(&sec->ps_refcount) > 0);
503 LASSERT(atomic_read(&sec->ps_nctx) > 0);
504 LASSERT(atomic_read(&ctx->cc_refcount) == 0);
505 LASSERT(ctx->cc_sec == sec);
506
Julia Lawall9ae10592015-05-01 17:51:12 +0200507 kfree(ctx);
Peng Taod7e09d02013-05-02 16:46:55 +0800508
509 atomic_dec(&sec->ps_nctx);
510 sptlrpc_sec_put(sec);
511}
512
513static
514int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
515 uid_t uid, int grace, int force)
516{
Chris Hannad0bfef32015-06-03 10:28:26 -0400517 struct plain_sec *plsec = sec2plsec(sec);
518 struct ptlrpc_cli_ctx *ctx;
Peng Taod7e09d02013-05-02 16:46:55 +0800519
520 /* do nothing unless caller want to flush for 'all' */
521 if (uid != -1)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800522 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800523
524 write_lock(&plsec->pls_lock);
525 ctx = plsec->pls_ctx;
526 plsec->pls_ctx = NULL;
527 write_unlock(&plsec->pls_lock);
528
529 if (ctx)
530 sptlrpc_cli_ctx_put(ctx, 1);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800531 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800532}
533
534static
535int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
536 struct ptlrpc_request *req,
537 int msgsize)
538{
539 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
Chris Hannad0bfef32015-06-03 10:28:26 -0400540 int alloc_len;
Peng Taod7e09d02013-05-02 16:46:55 +0800541
542 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
543 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
544
545 if (req->rq_pack_udesc)
546 buflens[PLAIN_PACK_USER_OFF] = sptlrpc_current_user_desc_size();
547
548 if (req->rq_pack_bulk) {
549 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
550 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
551 }
552
553 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
554
555 if (!req->rq_reqbuf) {
556 LASSERT(!req->rq_pool);
557
558 alloc_len = size_roundup_power2(alloc_len);
Julia Lawallee0ec192015-06-11 14:02:58 +0200559 req->rq_reqbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
Peng Taod7e09d02013-05-02 16:46:55 +0800560 if (!req->rq_reqbuf)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800561 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800562
563 req->rq_reqbuf_len = alloc_len;
564 } else {
565 LASSERT(req->rq_pool);
566 LASSERT(req->rq_reqbuf_len >= alloc_len);
567 memset(req->rq_reqbuf, 0, alloc_len);
568 }
569
570 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
571 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0);
572
Lidza Louinadb3c16c2016-05-16 14:51:42 -0400573 if (req->rq_pack_udesc) {
574 int rc = sptlrpc_pack_user_desc(req->rq_reqbuf,
575 PLAIN_PACK_USER_OFF);
576 if (rc < 0)
577 return rc;
578 }
Peng Taod7e09d02013-05-02 16:46:55 +0800579
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800580 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800581}
582
583static
584void plain_free_reqbuf(struct ptlrpc_sec *sec,
585 struct ptlrpc_request *req)
586{
Peng Taod7e09d02013-05-02 16:46:55 +0800587 if (!req->rq_pool) {
Julia Lawallee0ec192015-06-11 14:02:58 +0200588 kvfree(req->rq_reqbuf);
Peng Taod7e09d02013-05-02 16:46:55 +0800589 req->rq_reqbuf = NULL;
590 req->rq_reqbuf_len = 0;
591 }
Peng Taod7e09d02013-05-02 16:46:55 +0800592}
593
594static
595int plain_alloc_repbuf(struct ptlrpc_sec *sec,
596 struct ptlrpc_request *req,
597 int msgsize)
598{
599 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
600 int alloc_len;
Peng Taod7e09d02013-05-02 16:46:55 +0800601
602 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
603 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
604
605 if (req->rq_pack_bulk) {
606 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
607 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
608 }
609
610 alloc_len = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
611
612 /* add space for early reply */
613 alloc_len += plain_at_offset;
614
615 alloc_len = size_roundup_power2(alloc_len);
616
Julia Lawallee0ec192015-06-11 14:02:58 +0200617 req->rq_repbuf = libcfs_kvzalloc(alloc_len, GFP_NOFS);
Peng Taod7e09d02013-05-02 16:46:55 +0800618 if (!req->rq_repbuf)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800619 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800620
621 req->rq_repbuf_len = alloc_len;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800622 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800623}
624
625static
626void plain_free_repbuf(struct ptlrpc_sec *sec,
627 struct ptlrpc_request *req)
628{
Julia Lawallee0ec192015-06-11 14:02:58 +0200629 kvfree(req->rq_repbuf);
Peng Taod7e09d02013-05-02 16:46:55 +0800630 req->rq_repbuf = NULL;
631 req->rq_repbuf_len = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800632}
633
634static
635int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
636 struct ptlrpc_request *req,
637 int segment, int newsize)
638{
Chris Hannad0bfef32015-06-03 10:28:26 -0400639 struct lustre_msg *newbuf;
640 int oldsize;
641 int newmsg_size, newbuf_size;
Peng Taod7e09d02013-05-02 16:46:55 +0800642
643 LASSERT(req->rq_reqbuf);
644 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
645 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) ==
646 req->rq_reqmsg);
647
648 /* compute new embedded msg size. */
649 oldsize = req->rq_reqmsg->lm_buflens[segment];
650 req->rq_reqmsg->lm_buflens[segment] = newsize;
651 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount,
652 req->rq_reqmsg->lm_buflens);
653 req->rq_reqmsg->lm_buflens[segment] = oldsize;
654
655 /* compute new wrapper msg size. */
656 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF];
657 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size;
658 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount,
659 req->rq_reqbuf->lm_buflens);
660 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize;
661
662 /* request from pool should always have enough buffer */
663 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size);
664
665 if (req->rq_reqbuf_len < newbuf_size) {
666 newbuf_size = size_roundup_power2(newbuf_size);
667
Julia Lawallee0ec192015-06-11 14:02:58 +0200668 newbuf = libcfs_kvzalloc(newbuf_size, GFP_NOFS);
Oleg Drokin8b382082016-02-16 00:46:58 -0500669 if (!newbuf)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800670 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800671
Oleg Drokin61d72582014-06-22 21:32:06 -0400672 /* Must lock this, so that otherwise unprotected change of
673 * rq_reqmsg is not racing with parallel processing of
674 * imp_replay_list traversing threads. See LU-3333
675 * This is a bandaid at best, we really need to deal with this
676 * in request enlarging code before unpacking that's already
Oleg Drokindadfcda2016-02-24 22:00:38 -0500677 * there
678 */
Oleg Drokin61d72582014-06-22 21:32:06 -0400679 if (req->rq_import)
680 spin_lock(&req->rq_import->imp_lock);
681
Peng Taod7e09d02013-05-02 16:46:55 +0800682 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
683
Julia Lawallee0ec192015-06-11 14:02:58 +0200684 kvfree(req->rq_reqbuf);
Peng Taod7e09d02013-05-02 16:46:55 +0800685 req->rq_reqbuf = newbuf;
686 req->rq_reqbuf_len = newbuf_size;
687 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf,
688 PLAIN_PACK_MSG_OFF, 0);
Oleg Drokin61d72582014-06-22 21:32:06 -0400689
690 if (req->rq_import)
691 spin_unlock(&req->rq_import->imp_lock);
Peng Taod7e09d02013-05-02 16:46:55 +0800692 }
693
694 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF,
695 newmsg_size);
696 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
697
698 req->rq_reqlen = newmsg_size;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800699 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800700}
701
702/****************************************
703 * service apis *
704 ****************************************/
705
706static struct ptlrpc_svc_ctx plain_svc_ctx = {
707 .sc_refcount = ATOMIC_INIT(1),
708 .sc_policy = &plain_policy,
709};
710
711static
712int plain_accept(struct ptlrpc_request *req)
713{
Chris Hannad0bfef32015-06-03 10:28:26 -0400714 struct lustre_msg *msg = req->rq_reqbuf;
Peng Taod7e09d02013-05-02 16:46:55 +0800715 struct plain_header *phdr;
Chris Hannad0bfef32015-06-03 10:28:26 -0400716 int swabbed;
Peng Taod7e09d02013-05-02 16:46:55 +0800717
718 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
719 SPTLRPC_POLICY_PLAIN);
720
721 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) !=
722 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN) ||
723 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
724 SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
725 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800726 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +0800727 }
728
729 if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
730 CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800731 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +0800732 }
733
734 swabbed = ptlrpc_req_need_swab(req);
735
736 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
Oleg Drokin8b382082016-02-16 00:46:58 -0500737 if (!phdr) {
Peng Taod7e09d02013-05-02 16:46:55 +0800738 CERROR("missing plain header\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800739 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800740 }
741
742 if (phdr->ph_ver != 0) {
743 CERROR("Invalid header version\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800744 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800745 }
746
747 if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
748 CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800749 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800750 }
751
752 req->rq_sp_from = phdr->ph_sp;
753 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg;
754
755 if (phdr->ph_flags & PLAIN_FL_USER) {
756 if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
757 swabbed)) {
758 CERROR("Mal-formed user descriptor\n");
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800759 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +0800760 }
761
762 req->rq_pack_udesc = 1;
763 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0);
764 }
765
766 if (phdr->ph_flags & PLAIN_FL_BULK) {
767 if (plain_unpack_bsd(msg, swabbed))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800768 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +0800769
770 req->rq_pack_bulk = 1;
771 }
772
773 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
774 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF];
775
776 req->rq_svc_ctx = &plain_svc_ctx;
777 atomic_inc(&req->rq_svc_ctx->sc_refcount);
778
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800779 return SECSVC_OK;
Peng Taod7e09d02013-05-02 16:46:55 +0800780}
781
782static
783int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
784{
Chris Hannad0bfef32015-06-03 10:28:26 -0400785 struct ptlrpc_reply_state *rs;
786 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
787 int rs_size = sizeof(*rs);
Peng Taod7e09d02013-05-02 16:46:55 +0800788
789 LASSERT(msgsize % 8 == 0);
790
791 buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
792 buflens[PLAIN_PACK_MSG_OFF] = msgsize;
793
794 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write))
795 buflens[PLAIN_PACK_BULK_OFF] = PLAIN_BSD_SIZE;
796
797 rs_size += lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
798
799 rs = req->rq_reply_state;
800
801 if (rs) {
802 /* pre-allocated */
803 LASSERT(rs->rs_size >= rs_size);
804 } else {
Julia Lawallee0ec192015-06-11 14:02:58 +0200805 rs = libcfs_kvzalloc(rs_size, GFP_NOFS);
Oleg Drokin8b382082016-02-16 00:46:58 -0500806 if (!rs)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800807 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800808
809 rs->rs_size = rs_size;
810 }
811
812 rs->rs_svc_ctx = req->rq_svc_ctx;
813 atomic_inc(&req->rq_svc_ctx->sc_refcount);
Oleg Drokin9797fb02016-06-18 23:53:12 -0400814 rs->rs_repbuf = (struct lustre_msg *)(rs + 1);
Peng Taod7e09d02013-05-02 16:46:55 +0800815 rs->rs_repbuf_len = rs_size - sizeof(*rs);
816
817 lustre_init_msg_v2(rs->rs_repbuf, PLAIN_PACK_SEGMENTS, buflens, NULL);
818 rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
819
820 req->rq_reply_state = rs;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800821 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800822}
823
824static
825void plain_free_rs(struct ptlrpc_reply_state *rs)
826{
Peng Taod7e09d02013-05-02 16:46:55 +0800827 LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
828 atomic_dec(&rs->rs_svc_ctx->sc_refcount);
829
830 if (!rs->rs_prealloc)
Julia Lawallee0ec192015-06-11 14:02:58 +0200831 kvfree(rs);
Peng Taod7e09d02013-05-02 16:46:55 +0800832}
833
834static
835int plain_authorize(struct ptlrpc_request *req)
836{
837 struct ptlrpc_reply_state *rs = req->rq_reply_state;
Chris Hannad0bfef32015-06-03 10:28:26 -0400838 struct lustre_msg_v2 *msg = rs->rs_repbuf;
839 struct plain_header *phdr;
840 int len;
Peng Taod7e09d02013-05-02 16:46:55 +0800841
842 LASSERT(rs);
843 LASSERT(msg);
844
845 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF])
846 len = lustre_shrink_msg(msg, PLAIN_PACK_MSG_OFF,
847 req->rq_replen, 1);
848 else
849 len = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
850
851 msg->lm_secflvr = req->rq_flvr.sf_rpc;
852
853 phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, 0);
854 phdr->ph_ver = 0;
855 phdr->ph_flags = 0;
856 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg;
857
858 if (req->rq_pack_bulk)
859 phdr->ph_flags |= PLAIN_FL_BULK;
860
861 rs->rs_repdata_len = len;
862
863 if (likely(req->rq_packed_final)) {
864 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)
865 req->rq_reply_off = plain_at_offset;
866 else
867 req->rq_reply_off = 0;
868 } else {
869 unsigned int hsize = 4;
870
871 cfs_crypto_hash_digest(CFS_HASH_ALG_CRC32,
872 lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0),
873 lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF),
874 NULL, 0, (unsigned char *)&msg->lm_cksum, &hsize);
Oleg Drokin38bbb632015-05-16 03:38:32 -0400875 req->rq_reply_off = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800876 }
877
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800878 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800879}
880
881static
882int plain_svc_unwrap_bulk(struct ptlrpc_request *req,
883 struct ptlrpc_bulk_desc *desc)
884{
Chris Hannad0bfef32015-06-03 10:28:26 -0400885 struct ptlrpc_reply_state *rs = req->rq_reply_state;
Peng Taod7e09d02013-05-02 16:46:55 +0800886 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
Chris Hannad0bfef32015-06-03 10:28:26 -0400887 struct plain_bulk_token *tokenr;
888 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800889
890 LASSERT(req->rq_bulk_write);
891 LASSERT(req->rq_pack_bulk);
892
893 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
Oleg Drokin9797fb02016-06-18 23:53:12 -0400894 tokenr = (struct plain_bulk_token *)bsdr->bsd_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800895 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
896
897 bsdv->bsd_version = 0;
898 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
899 bsdv->bsd_svc = bsdr->bsd_svc;
900 bsdv->bsd_flags = 0;
901
902 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
903 return 0;
904
905 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
906 tokenr);
907 if (rc) {
908 bsdv->bsd_flags |= BSD_FL_ERR;
909 CERROR("bulk write: server verify failed: %d\n", rc);
910 }
911
912 return rc;
913}
914
915static
916int plain_svc_wrap_bulk(struct ptlrpc_request *req,
917 struct ptlrpc_bulk_desc *desc)
918{
Chris Hannad0bfef32015-06-03 10:28:26 -0400919 struct ptlrpc_reply_state *rs = req->rq_reply_state;
Peng Taod7e09d02013-05-02 16:46:55 +0800920 struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
Chris Hannad0bfef32015-06-03 10:28:26 -0400921 struct plain_bulk_token *tokenv;
922 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800923
924 LASSERT(req->rq_bulk_read);
925 LASSERT(req->rq_pack_bulk);
926
927 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0);
928 bsdv = lustre_msg_buf(rs->rs_repbuf, PLAIN_PACK_BULK_OFF, 0);
Oleg Drokin9797fb02016-06-18 23:53:12 -0400929 tokenv = (struct plain_bulk_token *)bsdv->bsd_data;
Peng Taod7e09d02013-05-02 16:46:55 +0800930
931 bsdv->bsd_version = 0;
932 bsdv->bsd_type = SPTLRPC_BULK_DEFAULT;
933 bsdv->bsd_svc = bsdr->bsd_svc;
934 bsdv->bsd_flags = 0;
935
936 if (bsdr->bsd_svc == SPTLRPC_BULK_SVC_NULL)
937 return 0;
938
939 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
940 tokenv);
941 if (rc) {
Joe Perches2d00bd12014-11-23 11:28:50 -0800942 CERROR("bulk read: server failed to compute checksum: %d\n",
943 rc);
Peng Taod7e09d02013-05-02 16:46:55 +0800944 } else {
945 if (OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
946 corrupt_bulk_data(desc);
947 }
948
949 return rc;
950}
951
952static struct ptlrpc_ctx_ops plain_ctx_ops = {
953 .refresh = plain_ctx_refresh,
954 .validate = plain_ctx_validate,
955 .sign = plain_ctx_sign,
956 .verify = plain_ctx_verify,
957 .wrap_bulk = plain_cli_wrap_bulk,
958 .unwrap_bulk = plain_cli_unwrap_bulk,
959};
960
961static struct ptlrpc_sec_cops plain_sec_cops = {
962 .create_sec = plain_create_sec,
963 .destroy_sec = plain_destroy_sec,
964 .kill_sec = plain_kill_sec,
965 .lookup_ctx = plain_lookup_ctx,
966 .release_ctx = plain_release_ctx,
967 .flush_ctx_cache = plain_flush_ctx_cache,
968 .alloc_reqbuf = plain_alloc_reqbuf,
969 .free_reqbuf = plain_free_reqbuf,
970 .alloc_repbuf = plain_alloc_repbuf,
971 .free_repbuf = plain_free_repbuf,
972 .enlarge_reqbuf = plain_enlarge_reqbuf,
973};
974
975static struct ptlrpc_sec_sops plain_sec_sops = {
976 .accept = plain_accept,
977 .alloc_rs = plain_alloc_rs,
978 .authorize = plain_authorize,
979 .free_rs = plain_free_rs,
980 .unwrap_bulk = plain_svc_unwrap_bulk,
981 .wrap_bulk = plain_svc_wrap_bulk,
982};
983
984static struct ptlrpc_sec_policy plain_policy = {
985 .sp_owner = THIS_MODULE,
986 .sp_name = "plain",
987 .sp_policy = SPTLRPC_POLICY_PLAIN,
988 .sp_cops = &plain_sec_cops,
989 .sp_sops = &plain_sec_sops,
990};
991
992int sptlrpc_plain_init(void)
993{
994 __u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
995 int rc;
996
997 buflens[PLAIN_PACK_MSG_OFF] = lustre_msg_early_size();
998 plain_at_offset = lustre_msg_size_v2(PLAIN_PACK_SEGMENTS, buflens);
999
1000 rc = sptlrpc_register_policy(&plain_policy);
1001 if (rc)
1002 CERROR("failed to register: %d\n", rc);
1003
1004 return rc;
1005}
1006
1007void sptlrpc_plain_fini(void)
1008{
1009 int rc;
1010
1011 rc = sptlrpc_unregister_policy(&plain_policy);
1012 if (rc)
1013 CERROR("cannot unregister: %d\n", rc);
1014}