blob: 5d3995d5c69af9f26718b5ba7f1245e8b2917bcd [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2011, 2012, Intel Corporation.
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ptlrpc/sec.c
33 *
34 * Author: Eric Mei <ericm@clusterfs.com>
35 */
36
37#define DEBUG_SUBSYSTEM S_SEC
38
Greg Kroah-Hartman9fdaf8c2014-07-11 20:51:16 -070039#include "../../include/linux/libcfs/libcfs.h"
Peng Taod7e09d02013-05-02 16:46:55 +080040#include <linux/crypto.h>
41#include <linux/key.h>
42
Greg Kroah-Hartmane27db142014-07-11 22:29:36 -070043#include "../include/obd.h"
44#include "../include/obd_class.h"
45#include "../include/obd_support.h"
46#include "../include/lustre_net.h"
47#include "../include/lustre_import.h"
48#include "../include/lustre_dlm.h"
49#include "../include/lustre_sec.h"
Peng Taod7e09d02013-05-02 16:46:55 +080050
51#include "ptlrpc_internal.h"
52
53/***********************************************
54 * policy registers *
55 ***********************************************/
56
57static rwlock_t policy_lock;
58static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
59 NULL,
60};
61
62int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
63{
64 __u16 number = policy->sp_policy;
65
66 LASSERT(policy->sp_name);
67 LASSERT(policy->sp_cops);
68 LASSERT(policy->sp_sops);
69
70 if (number >= SPTLRPC_POLICY_MAX)
71 return -EINVAL;
72
73 write_lock(&policy_lock);
74 if (unlikely(policies[number])) {
75 write_unlock(&policy_lock);
76 return -EALREADY;
77 }
78 policies[number] = policy;
79 write_unlock(&policy_lock);
80
81 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
82 return 0;
83}
84EXPORT_SYMBOL(sptlrpc_register_policy);
85
86int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
87{
88 __u16 number = policy->sp_policy;
89
90 LASSERT(number < SPTLRPC_POLICY_MAX);
91
92 write_lock(&policy_lock);
Oleg Drokin8b382082016-02-16 00:46:58 -050093 if (unlikely(!policies[number])) {
Peng Taod7e09d02013-05-02 16:46:55 +080094 write_unlock(&policy_lock);
95 CERROR("%s: already unregistered\n", policy->sp_name);
96 return -EINVAL;
97 }
98
99 LASSERT(policies[number] == policy);
100 policies[number] = NULL;
101 write_unlock(&policy_lock);
102
103 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
104 return 0;
105}
106EXPORT_SYMBOL(sptlrpc_unregister_policy);
107
108static
Greg Donaldaff9d8e2014-08-21 11:07:42 -0500109struct ptlrpc_sec_policy *sptlrpc_wireflavor2policy(__u32 flavor)
Peng Taod7e09d02013-05-02 16:46:55 +0800110{
111 static DEFINE_MUTEX(load_mutex);
Chris Hannad0bfef32015-06-03 10:28:26 -0400112 static atomic_t loaded = ATOMIC_INIT(0);
Peng Taod7e09d02013-05-02 16:46:55 +0800113 struct ptlrpc_sec_policy *policy;
Chris Hannad0bfef32015-06-03 10:28:26 -0400114 __u16 number = SPTLRPC_FLVR_POLICY(flavor);
115 __u16 flag = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800116
117 if (number >= SPTLRPC_POLICY_MAX)
118 return NULL;
119
120 while (1) {
121 read_lock(&policy_lock);
122 policy = policies[number];
123 if (policy && !try_module_get(policy->sp_owner))
124 policy = NULL;
Oleg Drokin8b382082016-02-16 00:46:58 -0500125 if (!policy)
Peng Taod7e09d02013-05-02 16:46:55 +0800126 flag = atomic_read(&loaded);
127 read_unlock(&policy_lock);
128
Oleg Drokin8b382082016-02-16 00:46:58 -0500129 if (policy || flag != 0 ||
Peng Taod7e09d02013-05-02 16:46:55 +0800130 number != SPTLRPC_POLICY_GSS)
131 break;
132
133 /* try to load gss module, once */
134 mutex_lock(&load_mutex);
135 if (atomic_read(&loaded) == 0) {
136 if (request_module("ptlrpc_gss") == 0)
137 CDEBUG(D_SEC,
138 "module ptlrpc_gss loaded on demand\n");
139 else
140 CERROR("Unable to load module ptlrpc_gss\n");
141
142 atomic_set(&loaded, 1);
143 }
144 mutex_unlock(&load_mutex);
145 }
146
147 return policy;
148}
149
150__u32 sptlrpc_name2flavor_base(const char *name)
151{
152 if (!strcmp(name, "null"))
153 return SPTLRPC_FLVR_NULL;
154 if (!strcmp(name, "plain"))
155 return SPTLRPC_FLVR_PLAIN;
156 if (!strcmp(name, "krb5n"))
157 return SPTLRPC_FLVR_KRB5N;
158 if (!strcmp(name, "krb5a"))
159 return SPTLRPC_FLVR_KRB5A;
160 if (!strcmp(name, "krb5i"))
161 return SPTLRPC_FLVR_KRB5I;
162 if (!strcmp(name, "krb5p"))
163 return SPTLRPC_FLVR_KRB5P;
164
165 return SPTLRPC_FLVR_INVALID;
166}
167EXPORT_SYMBOL(sptlrpc_name2flavor_base);
168
169const char *sptlrpc_flavor2name_base(__u32 flvr)
170{
171 __u32 base = SPTLRPC_FLVR_BASE(flvr);
172
173 if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
174 return "null";
175 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
176 return "plain";
177 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
178 return "krb5n";
179 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
180 return "krb5a";
181 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
182 return "krb5i";
183 else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
184 return "krb5p";
185
186 CERROR("invalid wire flavor 0x%x\n", flvr);
187 return "invalid";
188}
189EXPORT_SYMBOL(sptlrpc_flavor2name_base);
190
191char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
192 char *buf, int bufsize)
193{
194 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
195 snprintf(buf, bufsize, "hash:%s",
196 sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
197 else
198 snprintf(buf, bufsize, "%s",
199 sptlrpc_flavor2name_base(sf->sf_rpc));
200
201 buf[bufsize - 1] = '\0';
202 return buf;
203}
204EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
205
206char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
207{
Rickard Strandqvist22e1dd62014-10-12 17:55:33 +0200208 strlcpy(buf, sptlrpc_flavor2name_base(sf->sf_rpc), bufsize);
Peng Taod7e09d02013-05-02 16:46:55 +0800209
210 /*
211 * currently we don't support customized bulk specification for
212 * flavors other than plain
213 */
214 if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
215 char bspec[16];
216
217 bspec[0] = '-';
218 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
Rickard Strandqvist22e1dd62014-10-12 17:55:33 +0200219 strlcat(buf, bspec, bufsize);
Peng Taod7e09d02013-05-02 16:46:55 +0800220 }
221
Peng Taod7e09d02013-05-02 16:46:55 +0800222 return buf;
223}
224EXPORT_SYMBOL(sptlrpc_flavor2name);
225
Shraddha Barkebe23ce12015-10-05 05:32:39 +0530226static char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
Peng Taod7e09d02013-05-02 16:46:55 +0800227{
228 buf[0] = '\0';
229
230 if (flags & PTLRPC_SEC_FL_REVERSE)
231 strlcat(buf, "reverse,", bufsize);
232 if (flags & PTLRPC_SEC_FL_ROOTONLY)
233 strlcat(buf, "rootonly,", bufsize);
234 if (flags & PTLRPC_SEC_FL_UDESC)
235 strlcat(buf, "udesc,", bufsize);
236 if (flags & PTLRPC_SEC_FL_BULK)
237 strlcat(buf, "bulk,", bufsize);
238 if (buf[0] == '\0')
239 strlcat(buf, "-,", bufsize);
240
241 return buf;
242}
Peng Taod7e09d02013-05-02 16:46:55 +0800243
244/**************************************************
245 * client context APIs *
246 **************************************************/
247
248static
249struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
250{
251 struct vfs_cred vcred;
252 int create = 1, remove_dead = 1;
253
254 LASSERT(sec);
255 LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
256
257 if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
258 PTLRPC_SEC_FL_ROOTONLY)) {
259 vcred.vc_uid = 0;
260 vcred.vc_gid = 0;
261 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
262 create = 0;
263 remove_dead = 0;
264 }
265 } else {
Peng Tao4b1a25f2013-07-15 22:27:14 +0800266 vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
267 vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
Peng Taod7e09d02013-05-02 16:46:55 +0800268 }
269
270 return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
271 create, remove_dead);
272}
273
274struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
275{
276 atomic_inc(&ctx->cc_refcount);
277 return ctx;
278}
279EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
280
281void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
282{
283 struct ptlrpc_sec *sec = ctx->cc_sec;
284
285 LASSERT(sec);
286 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
287
288 if (!atomic_dec_and_test(&ctx->cc_refcount))
289 return;
290
291 sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
292}
293EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
294
Peng Taod7e09d02013-05-02 16:46:55 +0800295static int import_sec_check_expire(struct obd_import *imp)
296{
Chris Hannad0bfef32015-06-03 10:28:26 -0400297 int adapt = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800298
299 spin_lock(&imp->imp_lock);
300 if (imp->imp_sec_expire &&
Arnd Bergmann986ef132015-09-27 16:45:28 -0400301 imp->imp_sec_expire < ktime_get_real_seconds()) {
Peng Taod7e09d02013-05-02 16:46:55 +0800302 adapt = 1;
303 imp->imp_sec_expire = 0;
304 }
305 spin_unlock(&imp->imp_lock);
306
307 if (!adapt)
308 return 0;
309
310 CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
Denis Pithon81fb9552014-04-14 18:25:30 +0200311 return sptlrpc_import_sec_adapt(imp, NULL, NULL);
Peng Taod7e09d02013-05-02 16:46:55 +0800312}
313
Amir Shehatad3335972016-09-18 16:38:26 -0400314/**
315 * Get and validate the client side ptlrpc security facilities from
316 * \a imp. There is a race condition on client reconnect when the import is
317 * being destroyed while there are outstanding client bound requests. In
318 * this case do not output any error messages if import secuity is not
319 * found.
320 *
321 * \param[in] imp obd import associated with client
322 * \param[out] sec client side ptlrpc security
323 *
324 * \retval 0 if security retrieved successfully
325 * \retval -ve errno if there was a problem
326 */
Peng Taod7e09d02013-05-02 16:46:55 +0800327static int import_sec_validate_get(struct obd_import *imp,
328 struct ptlrpc_sec **sec)
329{
Chris Hannad0bfef32015-06-03 10:28:26 -0400330 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800331
332 if (unlikely(imp->imp_sec_expire)) {
333 rc = import_sec_check_expire(imp);
334 if (rc)
335 return rc;
336 }
337
338 *sec = sptlrpc_import_sec_ref(imp);
Amir Shehatad3335972016-09-18 16:38:26 -0400339 /* Only output an error when the import is still active */
Oleg Drokin8b382082016-02-16 00:46:58 -0500340 if (!*sec) {
Amir Shehatad3335972016-09-18 16:38:26 -0400341 if (list_empty(&imp->imp_zombie_chain))
342 CERROR("import %p (%s) with no sec\n",
343 imp, ptlrpc_import_state_name(imp->imp_state));
Peng Taod7e09d02013-05-02 16:46:55 +0800344 return -EACCES;
345 }
346
347 if (unlikely((*sec)->ps_dying)) {
348 CERROR("attempt to use dying sec %p\n", sec);
349 sptlrpc_sec_put(*sec);
350 return -EACCES;
351 }
352
353 return 0;
354}
355
356/**
357 * Given a \a req, find or allocate a appropriate context for it.
358 * \pre req->rq_cli_ctx == NULL.
359 *
360 * \retval 0 succeed, and req->rq_cli_ctx is set.
361 * \retval -ev error number, and req->rq_cli_ctx == NULL.
362 */
363int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
364{
365 struct obd_import *imp = req->rq_import;
366 struct ptlrpc_sec *sec;
367 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800368
369 LASSERT(!req->rq_cli_ctx);
370 LASSERT(imp);
371
372 rc = import_sec_validate_get(imp, &sec);
373 if (rc)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800374 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800375
376 req->rq_cli_ctx = get_my_ctx(sec);
377
378 sptlrpc_sec_put(sec);
379
380 if (!req->rq_cli_ctx) {
381 CERROR("req %p: fail to get context\n", req);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800382 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800383 }
384
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800385 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800386}
387
388/**
389 * Drop the context for \a req.
390 * \pre req->rq_cli_ctx != NULL.
391 * \post req->rq_cli_ctx == NULL.
392 *
393 * If \a sync == 0, this function should return quickly without sleep;
394 * otherwise it might trigger and wait for the whole process of sending
395 * an context-destroying rpc to server.
396 */
397void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
398{
Peng Taod7e09d02013-05-02 16:46:55 +0800399 LASSERT(req);
400 LASSERT(req->rq_cli_ctx);
401
402 /* request might be asked to release earlier while still
403 * in the context waiting list.
404 */
405 if (!list_empty(&req->rq_ctx_chain)) {
406 spin_lock(&req->rq_cli_ctx->cc_lock);
407 list_del_init(&req->rq_ctx_chain);
408 spin_unlock(&req->rq_cli_ctx->cc_lock);
409 }
410
411 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
412 req->rq_cli_ctx = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800413}
414
415static
416int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
417 struct ptlrpc_cli_ctx *oldctx,
418 struct ptlrpc_cli_ctx *newctx)
419{
Chris Hannad0bfef32015-06-03 10:28:26 -0400420 struct sptlrpc_flavor old_flvr;
421 char *reqmsg = NULL; /* to workaround old gcc */
422 int reqmsg_size;
423 int rc = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800424
425 LASSERT(req->rq_reqmsg);
426 LASSERT(req->rq_reqlen);
427 LASSERT(req->rq_replen);
428
Joe Perches2d00bd12014-11-23 11:28:50 -0800429 CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n",
430 req,
Peng Taod7e09d02013-05-02 16:46:55 +0800431 oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
432 newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
433 oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
434 newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
435
436 /* save flavor */
437 old_flvr = req->rq_flvr;
438
439 /* save request message */
440 reqmsg_size = req->rq_reqlen;
441 if (reqmsg_size != 0) {
Julia Lawallee0ec192015-06-11 14:02:58 +0200442 reqmsg = libcfs_kvzalloc(reqmsg_size, GFP_NOFS);
Oleg Drokin8b382082016-02-16 00:46:58 -0500443 if (!reqmsg)
Peng Taod7e09d02013-05-02 16:46:55 +0800444 return -ENOMEM;
445 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
446 }
447
448 /* release old req/rep buf */
449 req->rq_cli_ctx = oldctx;
450 sptlrpc_cli_free_reqbuf(req);
451 sptlrpc_cli_free_repbuf(req);
452 req->rq_cli_ctx = newctx;
453
454 /* recalculate the flavor */
455 sptlrpc_req_set_flavor(req, 0);
456
457 /* alloc new request buffer
458 * we don't need to alloc reply buffer here, leave it to the
Oleg Drokindadfcda2016-02-24 22:00:38 -0500459 * rest procedure of ptlrpc
460 */
Peng Taod7e09d02013-05-02 16:46:55 +0800461 if (reqmsg_size != 0) {
462 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
463 if (!rc) {
464 LASSERT(req->rq_reqmsg);
465 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
466 } else {
467 CWARN("failed to alloc reqbuf: %d\n", rc);
468 req->rq_flvr = old_flvr;
469 }
470
Julia Lawallee0ec192015-06-11 14:02:58 +0200471 kvfree(reqmsg);
Peng Taod7e09d02013-05-02 16:46:55 +0800472 }
473 return rc;
474}
475
476/**
477 * If current context of \a req is dead somehow, e.g. we just switched flavor
478 * thus marked original contexts dead, we'll find a new context for it. if
479 * no switch is needed, \a req will end up with the same context.
480 *
481 * \note a request must have a context, to keep other parts of code happy.
482 * In any case of failure during the switching, we must restore the old one.
483 */
Shraddha Barkecf0a7f92015-10-02 16:19:30 +0530484static int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
Peng Taod7e09d02013-05-02 16:46:55 +0800485{
486 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
487 struct ptlrpc_cli_ctx *newctx;
Chris Hannad0bfef32015-06-03 10:28:26 -0400488 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800489
490 LASSERT(oldctx);
491
492 sptlrpc_cli_ctx_get(oldctx);
493 sptlrpc_req_put_ctx(req, 0);
494
495 rc = sptlrpc_req_get_ctx(req);
496 if (unlikely(rc)) {
497 LASSERT(!req->rq_cli_ctx);
498
499 /* restore old ctx */
500 req->rq_cli_ctx = oldctx;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800501 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800502 }
503
504 newctx = req->rq_cli_ctx;
505 LASSERT(newctx);
506
507 if (unlikely(newctx == oldctx &&
508 test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
509 /*
510 * still get the old dead ctx, usually means system too busy
511 */
512 CDEBUG(D_SEC,
513 "ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
514 newctx, newctx->cc_flags);
515
Peng Tao18fd5ba2014-03-18 21:05:55 +0800516 set_current_state(TASK_INTERRUPTIBLE);
Jian Yudf3c30f2016-09-18 16:38:01 -0400517 schedule_timeout(msecs_to_jiffies(MSEC_PER_SEC));
Peng Taod7e09d02013-05-02 16:46:55 +0800518 } else {
519 /*
520 * it's possible newctx == oldctx if we're switching
521 * subflavor with the same sec.
522 */
523 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
524 if (rc) {
525 /* restore old ctx */
526 sptlrpc_req_put_ctx(req, 0);
527 req->rq_cli_ctx = oldctx;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800528 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800529 }
530
531 LASSERT(req->rq_cli_ctx == newctx);
532 }
533
534 sptlrpc_cli_ctx_put(oldctx, 1);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800535 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800536}
Peng Taod7e09d02013-05-02 16:46:55 +0800537
538static
539int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
540{
541 if (cli_ctx_is_refreshed(ctx))
542 return 1;
543 return 0;
544}
545
546static
547int ctx_refresh_timeout(void *data)
548{
549 struct ptlrpc_request *req = data;
550 int rc;
551
552 /* conn_cnt is needed in expire_one_request */
553 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
554
555 rc = ptlrpc_expire_one_request(req, 1);
556 /* if we started recovery, we should mark this ctx dead; otherwise
557 * in case of lgssd died nobody would retire this ctx, following
558 * connecting will still find the same ctx thus cause deadlock.
559 * there's an assumption that expire time of the request should be
560 * later than the context refresh expire time.
561 */
562 if (rc == 0)
Chen Gangd0f17b62014-07-14 19:59:13 +0800563 req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0);
Peng Taod7e09d02013-05-02 16:46:55 +0800564 return rc;
565}
566
567static
568void ctx_refresh_interrupt(void *data)
569{
570 struct ptlrpc_request *req = data;
571
572 spin_lock(&req->rq_lock);
573 req->rq_intr = 1;
574 spin_unlock(&req->rq_lock);
575}
576
577static
578void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
579{
580 spin_lock(&ctx->cc_lock);
581 if (!list_empty(&req->rq_ctx_chain))
582 list_del_init(&req->rq_ctx_chain);
583 spin_unlock(&ctx->cc_lock);
584}
585
586/**
587 * To refresh the context of \req, if it's not up-to-date.
588 * \param timeout
589 * - < 0: don't wait
590 * - = 0: wait until success or fatal error occur
591 * - > 0: timeout value (in seconds)
592 *
593 * The status of the context could be subject to be changed by other threads
594 * at any time. We allow this race, but once we return with 0, the caller will
595 * suppose it's uptodated and keep using it until the owning rpc is done.
596 *
597 * \retval 0 only if the context is uptodated.
598 * \retval -ev error number.
599 */
600int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
601{
Chris Hannad0bfef32015-06-03 10:28:26 -0400602 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
603 struct ptlrpc_sec *sec;
604 struct l_wait_info lwi;
605 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800606
607 LASSERT(ctx);
608
609 if (req->rq_ctx_init || req->rq_ctx_fini)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800610 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800611
612 /*
613 * during the process a request's context might change type even
614 * (e.g. from gss ctx to null ctx), so each loop we need to re-check
615 * everything
616 */
617again:
618 rc = import_sec_validate_get(req->rq_import, &sec);
619 if (rc)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800620 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800621
622 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
623 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
Oleg Drokin30c0aa32016-02-26 01:50:02 -0500624 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
Peng Taod7e09d02013-05-02 16:46:55 +0800625 req_off_ctx_list(req, ctx);
626 sptlrpc_req_replace_dead_ctx(req);
627 ctx = req->rq_cli_ctx;
628 }
629 sptlrpc_sec_put(sec);
630
631 if (cli_ctx_is_eternal(ctx))
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800632 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800633
634 if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
635 LASSERT(ctx->cc_ops->refresh);
636 ctx->cc_ops->refresh(ctx);
637 }
638 LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
639
640 LASSERT(ctx->cc_ops->validate);
641 if (ctx->cc_ops->validate(ctx) == 0) {
642 req_off_ctx_list(req, ctx);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800643 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800644 }
645
646 if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
647 spin_lock(&req->rq_lock);
648 req->rq_err = 1;
649 spin_unlock(&req->rq_lock);
650 req_off_ctx_list(req, ctx);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800651 return -EPERM;
Peng Taod7e09d02013-05-02 16:46:55 +0800652 }
653
654 /*
655 * There's a subtle issue for resending RPCs, suppose following
656 * situation:
657 * 1. the request was sent to server.
658 * 2. recovery was kicked start, after finished the request was
659 * marked as resent.
660 * 3. resend the request.
661 * 4. old reply from server received, we accept and verify the reply.
662 * this has to be success, otherwise the error will be aware
663 * by application.
664 * 5. new reply from server received, dropped by LNet.
665 *
666 * Note the xid of old & new request is the same. We can't simply
667 * change xid for the resent request because the server replies on
668 * it for reply reconstruction.
669 *
670 * Commonly the original context should be uptodate because we
671 * have a expiry nice time; server will keep its context because
672 * we at least hold a ref of old context which prevent context
673 * destroying RPC being sent. So server still can accept the request
674 * and finish the RPC. But if that's not the case:
675 * 1. If server side context has been trimmed, a NO_CONTEXT will
676 * be returned, gss_cli_ctx_verify/unseal will switch to new
677 * context by force.
678 * 2. Current context never be refreshed, then we are fine: we
679 * never really send request with old context before.
680 */
681 if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
682 unlikely(req->rq_reqmsg) &&
683 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
684 req_off_ctx_list(req, ctx);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800685 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800686 }
687
688 if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
689 req_off_ctx_list(req, ctx);
690 /*
691 * don't switch ctx if import was deactivated
692 */
693 if (req->rq_import->imp_deactive) {
694 spin_lock(&req->rq_lock);
695 req->rq_err = 1;
696 spin_unlock(&req->rq_lock);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800697 return -EINTR;
Peng Taod7e09d02013-05-02 16:46:55 +0800698 }
699
700 rc = sptlrpc_req_replace_dead_ctx(req);
701 if (rc) {
702 LASSERT(ctx == req->rq_cli_ctx);
703 CERROR("req %p: failed to replace dead ctx %p: %d\n",
704 req, ctx, rc);
705 spin_lock(&req->rq_lock);
706 req->rq_err = 1;
707 spin_unlock(&req->rq_lock);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800708 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800709 }
710
711 ctx = req->rq_cli_ctx;
712 goto again;
713 }
714
715 /*
716 * Now we're sure this context is during upcall, add myself into
717 * waiting list
718 */
719 spin_lock(&ctx->cc_lock);
720 if (list_empty(&req->rq_ctx_chain))
721 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
722 spin_unlock(&ctx->cc_lock);
723
724 if (timeout < 0)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800725 return -EWOULDBLOCK;
Peng Taod7e09d02013-05-02 16:46:55 +0800726
727 /* Clear any flags that may be present from previous sends */
728 LASSERT(req->rq_receiving_reply == 0);
729 spin_lock(&req->rq_lock);
730 req->rq_err = 0;
731 req->rq_timedout = 0;
732 req->rq_resend = 0;
733 req->rq_restart = 0;
734 spin_unlock(&req->rq_lock);
735
Jian Yudf3c30f2016-09-18 16:38:01 -0400736 lwi = LWI_TIMEOUT_INTR(msecs_to_jiffies(timeout * MSEC_PER_SEC),
737 ctx_refresh_timeout, ctx_refresh_interrupt,
738 req);
Peng Taod7e09d02013-05-02 16:46:55 +0800739 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
740
741 /*
742 * following cases could lead us here:
743 * - successfully refreshed;
744 * - interrupted;
745 * - timedout, and we don't want recover from the failure;
746 * - timedout, and waked up upon recovery finished;
747 * - someone else mark this ctx dead by force;
748 * - someone invalidate the req and call ptlrpc_client_wake_req(),
749 * e.g. ptlrpc_abort_inflight();
750 */
751 if (!cli_ctx_is_refreshed(ctx)) {
Masanari Iida0c2bc752014-02-08 00:30:37 +0900752 /* timed out or interrupted */
Peng Taod7e09d02013-05-02 16:46:55 +0800753 req_off_ctx_list(req, ctx);
754
755 LASSERT(rc != 0);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800756 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800757 }
758
759 goto again;
760}
761
762/**
763 * Initialize flavor settings for \a req, according to \a opcode.
764 *
765 * \note this could be called in two situations:
766 * - new request from ptlrpc_pre_req(), with proper @opcode
767 * - old request which changed ctx in the middle, with @opcode == 0
768 */
769void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
770{
771 struct ptlrpc_sec *sec;
772
773 LASSERT(req->rq_import);
774 LASSERT(req->rq_cli_ctx);
775 LASSERT(req->rq_cli_ctx->cc_sec);
776 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
777
Masanari Iida0c2bc752014-02-08 00:30:37 +0900778 /* special security flags according to opcode */
Peng Taod7e09d02013-05-02 16:46:55 +0800779 switch (opcode) {
780 case OST_READ:
781 case MDS_READPAGE:
782 case MGS_CONFIG_READ:
783 case OBD_IDX_READ:
784 req->rq_bulk_read = 1;
785 break;
786 case OST_WRITE:
787 case MDS_WRITEPAGE:
788 req->rq_bulk_write = 1;
789 break;
790 case SEC_CTX_INIT:
791 req->rq_ctx_init = 1;
792 break;
793 case SEC_CTX_FINI:
794 req->rq_ctx_fini = 1;
795 break;
796 case 0:
797 /* init/fini rpc won't be resend, so can't be here */
798 LASSERT(req->rq_ctx_init == 0);
799 LASSERT(req->rq_ctx_fini == 0);
800
801 /* cleanup flags, which should be recalculated */
802 req->rq_pack_udesc = 0;
803 req->rq_pack_bulk = 0;
804 break;
805 }
806
807 sec = req->rq_cli_ctx->cc_sec;
808
809 spin_lock(&sec->ps_lock);
810 req->rq_flvr = sec->ps_flvr;
811 spin_unlock(&sec->ps_lock);
812
813 /* force SVC_NULL for context initiation rpc, SVC_INTG for context
Oleg Drokindadfcda2016-02-24 22:00:38 -0500814 * destruction rpc
815 */
Peng Taod7e09d02013-05-02 16:46:55 +0800816 if (unlikely(req->rq_ctx_init))
817 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
818 else if (unlikely(req->rq_ctx_fini))
819 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
820
821 /* user descriptor flag, null security can't do it anyway */
822 if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
823 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
824 req->rq_pack_udesc = 1;
825
826 /* bulk security flag */
827 if ((req->rq_bulk_read || req->rq_bulk_write) &&
828 sptlrpc_flavor_has_bulk(&req->rq_flvr))
829 req->rq_pack_bulk = 1;
830}
831
832void sptlrpc_request_out_callback(struct ptlrpc_request *req)
833{
834 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
835 return;
836
837 LASSERT(req->rq_clrbuf);
838 if (req->rq_pool || !req->rq_reqbuf)
839 return;
840
Julia Lawall9ae10592015-05-01 17:51:12 +0200841 kfree(req->rq_reqbuf);
Peng Taod7e09d02013-05-02 16:46:55 +0800842 req->rq_reqbuf = NULL;
843 req->rq_reqbuf_len = 0;
844}
845
846/**
847 * Given an import \a imp, check whether current user has a valid context
848 * or not. We may create a new context and try to refresh it, and try
849 * repeatedly try in case of non-fatal errors. Return 0 means success.
850 */
851int sptlrpc_import_check_ctx(struct obd_import *imp)
852{
Chris Hannad0bfef32015-06-03 10:28:26 -0400853 struct ptlrpc_sec *sec;
Peng Taod7e09d02013-05-02 16:46:55 +0800854 struct ptlrpc_cli_ctx *ctx;
855 struct ptlrpc_request *req = NULL;
856 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800857
858 might_sleep();
859
860 sec = sptlrpc_import_sec_ref(imp);
861 ctx = get_my_ctx(sec);
862 sptlrpc_sec_put(sec);
863
864 if (!ctx)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800865 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800866
867 if (cli_ctx_is_eternal(ctx) ||
868 ctx->cc_ops->validate(ctx) == 0) {
869 sptlrpc_cli_ctx_put(ctx, 1);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800870 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800871 }
872
873 if (cli_ctx_is_error(ctx)) {
874 sptlrpc_cli_ctx_put(ctx, 1);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800875 return -EACCES;
Peng Taod7e09d02013-05-02 16:46:55 +0800876 }
877
Ann Koehler0be19af2014-04-27 13:06:36 -0400878 req = ptlrpc_request_cache_alloc(GFP_NOFS);
Peng Taod7e09d02013-05-02 16:46:55 +0800879 if (!req)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800880 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +0800881
Liang Zhen32c87282016-06-20 16:55:30 -0400882 ptlrpc_cli_req_init(req);
Peng Taod7e09d02013-05-02 16:46:55 +0800883 atomic_set(&req->rq_refcount, 10000);
Liang Zhen32c87282016-06-20 16:55:30 -0400884
Peng Taod7e09d02013-05-02 16:46:55 +0800885 req->rq_import = imp;
886 req->rq_flvr = sec->ps_flvr;
887 req->rq_cli_ctx = ctx;
888
889 rc = sptlrpc_req_refresh_ctx(req, 0);
890 LASSERT(list_empty(&req->rq_ctx_chain));
891 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
Andriy Skulysh35b2e1b2014-04-27 13:06:35 -0400892 ptlrpc_request_cache_free(req);
Peng Taod7e09d02013-05-02 16:46:55 +0800893
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800894 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800895}
896
897/**
898 * Used by ptlrpc client, to perform the pre-defined security transformation
899 * upon the request message of \a req. After this function called,
900 * req->rq_reqmsg is still accessible as clear text.
901 */
902int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
903{
904 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
905 int rc = 0;
Peng Taod7e09d02013-05-02 16:46:55 +0800906
907 LASSERT(ctx);
908 LASSERT(ctx->cc_sec);
909 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
910
911 /* we wrap bulk request here because now we can be sure
912 * the context is uptodate.
913 */
914 if (req->rq_bulk) {
915 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
916 if (rc)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800917 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800918 }
919
920 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
921 case SPTLRPC_SVC_NULL:
922 case SPTLRPC_SVC_AUTH:
923 case SPTLRPC_SVC_INTG:
924 LASSERT(ctx->cc_ops->sign);
925 rc = ctx->cc_ops->sign(ctx, req);
926 break;
927 case SPTLRPC_SVC_PRIV:
928 LASSERT(ctx->cc_ops->seal);
929 rc = ctx->cc_ops->seal(ctx, req);
930 break;
931 default:
932 LBUG();
933 }
934
935 if (rc == 0) {
936 LASSERT(req->rq_reqdata_len);
937 LASSERT(req->rq_reqdata_len % 8 == 0);
938 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
939 }
940
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800941 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800942}
943
944static int do_cli_unwrap_reply(struct ptlrpc_request *req)
945{
946 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
Chris Hannad0bfef32015-06-03 10:28:26 -0400947 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +0800948
949 LASSERT(ctx);
950 LASSERT(ctx->cc_sec);
951 LASSERT(req->rq_repbuf);
952 LASSERT(req->rq_repdata);
Oleg Drokin8b382082016-02-16 00:46:58 -0500953 LASSERT(!req->rq_repmsg);
Peng Taod7e09d02013-05-02 16:46:55 +0800954
955 req->rq_rep_swab_mask = 0;
956
957 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
958 switch (rc) {
959 case 1:
960 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
961 case 0:
962 break;
963 default:
Greg Kroah-Hartmanb0f5aad2014-07-12 20:06:04 -0700964 CERROR("failed unpack reply: x%llu\n", req->rq_xid);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800965 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800966 }
967
968 if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
969 CERROR("replied data length %d too small\n",
970 req->rq_repdata_len);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800971 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800972 }
973
974 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
975 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
976 CERROR("reply policy %u doesn't match request policy %u\n",
977 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
978 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800979 return -EPROTO;
Peng Taod7e09d02013-05-02 16:46:55 +0800980 }
981
982 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
983 case SPTLRPC_SVC_NULL:
984 case SPTLRPC_SVC_AUTH:
985 case SPTLRPC_SVC_INTG:
986 LASSERT(ctx->cc_ops->verify);
987 rc = ctx->cc_ops->verify(ctx, req);
988 break;
989 case SPTLRPC_SVC_PRIV:
990 LASSERT(ctx->cc_ops->unseal);
991 rc = ctx->cc_ops->unseal(ctx, req);
992 break;
993 default:
994 LBUG();
995 }
996 LASSERT(rc || req->rq_repmsg || req->rq_resend);
997
998 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
999 !req->rq_ctx_init)
1000 req->rq_rep_swab_mask = 0;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001001 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001002}
1003
1004/**
1005 * Used by ptlrpc client, to perform security transformation upon the reply
1006 * message of \a req. After return successfully, req->rq_repmsg points to
1007 * the reply message in clear text.
1008 *
1009 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1010 * going to change.
1011 */
1012int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1013{
1014 LASSERT(req->rq_repbuf);
Oleg Drokin8b382082016-02-16 00:46:58 -05001015 LASSERT(!req->rq_repdata);
1016 LASSERT(!req->rq_repmsg);
Peng Taod7e09d02013-05-02 16:46:55 +08001017 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1018
1019 if (req->rq_reply_off == 0 &&
1020 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1021 CERROR("real reply with offset 0\n");
1022 return -EPROTO;
1023 }
1024
1025 if (req->rq_reply_off % 8 != 0) {
1026 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1027 return -EPROTO;
1028 }
1029
1030 req->rq_repdata = (struct lustre_msg *)
1031 (req->rq_repbuf + req->rq_reply_off);
1032 req->rq_repdata_len = req->rq_nob_received;
1033
1034 return do_cli_unwrap_reply(req);
1035}
1036
1037/**
1038 * Used by ptlrpc client, to perform security transformation upon the early
1039 * reply message of \a req. We expect the rq_reply_off is 0, and
1040 * rq_nob_received is the early reply size.
1041 *
1042 * Because the receive buffer might be still posted, the reply data might be
1043 * changed at any time, no matter we're holding rq_lock or not. For this reason
1044 * we allocate a separate ptlrpc_request and reply buffer for early reply
1045 * processing.
1046 *
1047 * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1048 * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1049 * \a *req_ret to release it.
1050 * \retval -ev error number, and \a req_ret will not be set.
1051 */
1052int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1053 struct ptlrpc_request **req_ret)
1054{
Chris Hannad0bfef32015-06-03 10:28:26 -04001055 struct ptlrpc_request *early_req;
1056 char *early_buf;
1057 int early_bufsz, early_size;
1058 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001059
Ann Koehler0be19af2014-04-27 13:06:36 -04001060 early_req = ptlrpc_request_cache_alloc(GFP_NOFS);
Oleg Drokin8b382082016-02-16 00:46:58 -05001061 if (!early_req)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001062 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +08001063
Liang Zhen32c87282016-06-20 16:55:30 -04001064 ptlrpc_cli_req_init(early_req);
1065
Peng Taod7e09d02013-05-02 16:46:55 +08001066 early_size = req->rq_nob_received;
1067 early_bufsz = size_roundup_power2(early_size);
Julia Lawallee0ec192015-06-11 14:02:58 +02001068 early_buf = libcfs_kvzalloc(early_bufsz, GFP_NOFS);
Oleg Drokin8b382082016-02-16 00:46:58 -05001069 if (!early_buf) {
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001070 rc = -ENOMEM;
1071 goto err_req;
1072 }
Peng Taod7e09d02013-05-02 16:46:55 +08001073
1074 /* sanity checkings and copy data out, do it inside spinlock */
1075 spin_lock(&req->rq_lock);
1076
1077 if (req->rq_replied) {
1078 spin_unlock(&req->rq_lock);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001079 rc = -EALREADY;
1080 goto err_buf;
Peng Taod7e09d02013-05-02 16:46:55 +08001081 }
1082
1083 LASSERT(req->rq_repbuf);
Oleg Drokin8b382082016-02-16 00:46:58 -05001084 LASSERT(!req->rq_repdata);
1085 LASSERT(!req->rq_repmsg);
Peng Taod7e09d02013-05-02 16:46:55 +08001086
1087 if (req->rq_reply_off != 0) {
1088 CERROR("early reply with offset %u\n", req->rq_reply_off);
1089 spin_unlock(&req->rq_lock);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001090 rc = -EPROTO;
1091 goto err_buf;
Peng Taod7e09d02013-05-02 16:46:55 +08001092 }
1093
1094 if (req->rq_nob_received != early_size) {
1095 /* even another early arrived the size should be the same */
1096 CERROR("data size has changed from %u to %u\n",
1097 early_size, req->rq_nob_received);
1098 spin_unlock(&req->rq_lock);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001099 rc = -EINVAL;
1100 goto err_buf;
Peng Taod7e09d02013-05-02 16:46:55 +08001101 }
1102
1103 if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1104 CERROR("early reply length %d too small\n",
1105 req->rq_nob_received);
1106 spin_unlock(&req->rq_lock);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001107 rc = -EALREADY;
1108 goto err_buf;
Peng Taod7e09d02013-05-02 16:46:55 +08001109 }
1110
1111 memcpy(early_buf, req->rq_repbuf, early_size);
1112 spin_unlock(&req->rq_lock);
1113
Peng Taod7e09d02013-05-02 16:46:55 +08001114 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1115 early_req->rq_flvr = req->rq_flvr;
1116 early_req->rq_repbuf = early_buf;
1117 early_req->rq_repbuf_len = early_bufsz;
Oleg Drokin9797fb02016-06-18 23:53:12 -04001118 early_req->rq_repdata = (struct lustre_msg *)early_buf;
Peng Taod7e09d02013-05-02 16:46:55 +08001119 early_req->rq_repdata_len = early_size;
1120 early_req->rq_early = 1;
1121 early_req->rq_reqmsg = req->rq_reqmsg;
1122
1123 rc = do_cli_unwrap_reply(early_req);
1124 if (rc) {
1125 DEBUG_REQ(D_ADAPTTO, early_req,
1126 "error %d unwrap early reply", rc);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001127 goto err_ctx;
Peng Taod7e09d02013-05-02 16:46:55 +08001128 }
1129
1130 LASSERT(early_req->rq_repmsg);
1131 *req_ret = early_req;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001132 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001133
1134err_ctx:
1135 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1136err_buf:
Julia Lawallee0ec192015-06-11 14:02:58 +02001137 kvfree(early_buf);
Peng Taod7e09d02013-05-02 16:46:55 +08001138err_req:
Andriy Skulysh35b2e1b2014-04-27 13:06:35 -04001139 ptlrpc_request_cache_free(early_req);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001140 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001141}
1142
1143/**
1144 * Used by ptlrpc client, to release a processed early reply \a early_req.
1145 *
1146 * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1147 */
1148void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1149{
1150 LASSERT(early_req->rq_repbuf);
1151 LASSERT(early_req->rq_repdata);
1152 LASSERT(early_req->rq_repmsg);
1153
1154 sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
Julia Lawallee0ec192015-06-11 14:02:58 +02001155 kvfree(early_req->rq_repbuf);
Andriy Skulysh35b2e1b2014-04-27 13:06:35 -04001156 ptlrpc_request_cache_free(early_req);
Peng Taod7e09d02013-05-02 16:46:55 +08001157}
1158
1159/**************************************************
1160 * sec ID *
1161 **************************************************/
1162
1163/*
1164 * "fixed" sec (e.g. null) use sec_id < 0
1165 */
1166static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1167
1168int sptlrpc_get_next_secid(void)
1169{
1170 return atomic_inc_return(&sptlrpc_sec_id);
1171}
1172EXPORT_SYMBOL(sptlrpc_get_next_secid);
1173
1174/**************************************************
1175 * client side high-level security APIs *
1176 **************************************************/
1177
1178static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1179 int grace, int force)
1180{
1181 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1182
1183 LASSERT(policy->sp_cops);
1184 LASSERT(policy->sp_cops->flush_ctx_cache);
1185
1186 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1187}
1188
1189static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1190{
1191 struct ptlrpc_sec_policy *policy = sec->ps_policy;
1192
1193 LASSERT_ATOMIC_ZERO(&sec->ps_refcount);
1194 LASSERT_ATOMIC_ZERO(&sec->ps_nctx);
1195 LASSERT(policy->sp_cops->destroy_sec);
1196
Masanari Iida0c2bc752014-02-08 00:30:37 +09001197 CDEBUG(D_SEC, "%s@%p: being destroyed\n", sec->ps_policy->sp_name, sec);
Peng Taod7e09d02013-05-02 16:46:55 +08001198
1199 policy->sp_cops->destroy_sec(sec);
1200 sptlrpc_policy_put(policy);
1201}
1202
Peng Taod7e09d02013-05-02 16:46:55 +08001203static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1204{
1205 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1206
1207 if (sec->ps_policy->sp_cops->kill_sec) {
1208 sec->ps_policy->sp_cops->kill_sec(sec);
1209
1210 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1211 }
1212}
1213
Shraddha Barkebe23ce12015-10-05 05:32:39 +05301214static struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
Peng Taod7e09d02013-05-02 16:46:55 +08001215{
1216 if (sec)
1217 atomic_inc(&sec->ps_refcount);
1218
1219 return sec;
1220}
Peng Taod7e09d02013-05-02 16:46:55 +08001221
1222void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1223{
1224 if (sec) {
1225 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1226
1227 if (atomic_dec_and_test(&sec->ps_refcount)) {
1228 sptlrpc_gc_del_sec(sec);
1229 sec_cop_destroy_sec(sec);
1230 }
1231 }
1232}
1233EXPORT_SYMBOL(sptlrpc_sec_put);
1234
1235/*
Masanari Iida0c2bc752014-02-08 00:30:37 +09001236 * policy module is responsible for taking reference of import
Peng Taod7e09d02013-05-02 16:46:55 +08001237 */
1238static
Greg Donaldaff9d8e2014-08-21 11:07:42 -05001239struct ptlrpc_sec *sptlrpc_sec_create(struct obd_import *imp,
Chris Hannad0bfef32015-06-03 10:28:26 -04001240 struct ptlrpc_svc_ctx *svc_ctx,
1241 struct sptlrpc_flavor *sf,
1242 enum lustre_sec_part sp)
Peng Taod7e09d02013-05-02 16:46:55 +08001243{
1244 struct ptlrpc_sec_policy *policy;
Chris Hannad0bfef32015-06-03 10:28:26 -04001245 struct ptlrpc_sec *sec;
1246 char str[32];
Peng Taod7e09d02013-05-02 16:46:55 +08001247
1248 if (svc_ctx) {
1249 LASSERT(imp->imp_dlm_fake == 1);
1250
1251 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1252 imp->imp_obd->obd_type->typ_name,
1253 imp->imp_obd->obd_name,
1254 sptlrpc_flavor2name(sf, str, sizeof(str)));
1255
1256 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1257 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1258 } else {
1259 LASSERT(imp->imp_dlm_fake == 0);
1260
1261 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1262 imp->imp_obd->obd_type->typ_name,
1263 imp->imp_obd->obd_name,
1264 sptlrpc_flavor2name(sf, str, sizeof(str)));
1265
1266 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1267 if (!policy) {
1268 CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001269 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08001270 }
1271 }
1272
1273 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1274 if (sec) {
1275 atomic_inc(&sec->ps_refcount);
1276
1277 sec->ps_part = sp;
1278
1279 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1280 sptlrpc_gc_add_sec(sec);
1281 } else {
1282 sptlrpc_policy_put(policy);
1283 }
1284
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001285 return sec;
Peng Taod7e09d02013-05-02 16:46:55 +08001286}
1287
1288struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1289{
1290 struct ptlrpc_sec *sec;
1291
1292 spin_lock(&imp->imp_lock);
1293 sec = sptlrpc_sec_get(imp->imp_sec);
1294 spin_unlock(&imp->imp_lock);
1295
1296 return sec;
1297}
1298EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1299
1300static void sptlrpc_import_sec_install(struct obd_import *imp,
1301 struct ptlrpc_sec *sec)
1302{
1303 struct ptlrpc_sec *old_sec;
1304
1305 LASSERT_ATOMIC_POS(&sec->ps_refcount);
1306
1307 spin_lock(&imp->imp_lock);
1308 old_sec = imp->imp_sec;
1309 imp->imp_sec = sec;
1310 spin_unlock(&imp->imp_lock);
1311
1312 if (old_sec) {
1313 sptlrpc_sec_kill(old_sec);
1314
1315 /* balance the ref taken by this import */
1316 sptlrpc_sec_put(old_sec);
1317 }
1318}
1319
1320static inline
1321int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1322{
1323 return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1324}
1325
1326static inline
1327void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1328{
1329 *dst = *src;
1330}
1331
1332static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1333 struct ptlrpc_sec *sec,
1334 struct sptlrpc_flavor *sf)
1335{
Chris Hannad0bfef32015-06-03 10:28:26 -04001336 char str1[32], str2[32];
Peng Taod7e09d02013-05-02 16:46:55 +08001337
1338 if (sec->ps_flvr.sf_flags != sf->sf_flags)
1339 CDEBUG(D_SEC, "changing sec flags: %s -> %s\n",
1340 sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1341 str1, sizeof(str1)),
1342 sptlrpc_secflags2str(sf->sf_flags,
1343 str2, sizeof(str2)));
1344
1345 spin_lock(&sec->ps_lock);
1346 flavor_copy(&sec->ps_flvr, sf);
1347 spin_unlock(&sec->ps_lock);
1348}
1349
1350/**
1351 * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1352 * configuration. Upon called, imp->imp_sec may or may not be NULL.
1353 *
1354 * - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1355 * - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1356 */
1357int sptlrpc_import_sec_adapt(struct obd_import *imp,
1358 struct ptlrpc_svc_ctx *svc_ctx,
1359 struct sptlrpc_flavor *flvr)
1360{
Chris Hannad0bfef32015-06-03 10:28:26 -04001361 struct ptlrpc_connection *conn;
1362 struct sptlrpc_flavor sf;
1363 struct ptlrpc_sec *sec, *newsec;
1364 enum lustre_sec_part sp;
1365 char str[24];
1366 int rc = 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001367
1368 might_sleep();
1369
Oleg Drokin8b382082016-02-16 00:46:58 -05001370 if (!imp)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001371 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001372
1373 conn = imp->imp_connection;
1374
Oleg Drokin8b382082016-02-16 00:46:58 -05001375 if (!svc_ctx) {
Peng Taod7e09d02013-05-02 16:46:55 +08001376 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1377 /*
1378 * normal import, determine flavor from rule set, except
1379 * for mgc the flavor is predetermined.
1380 */
1381 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1382 sf = cliobd->cl_flvr_mgc;
1383 else
1384 sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1385 cliobd->cl_sp_to,
1386 &cliobd->cl_target_uuid,
1387 conn->c_self, &sf);
1388
1389 sp = imp->imp_obd->u.cli.cl_sp_me;
1390 } else {
Masanari Iida0c2bc752014-02-08 00:30:37 +09001391 /* reverse import, determine flavor from incoming request */
Peng Taod7e09d02013-05-02 16:46:55 +08001392 sf = *flvr;
1393
1394 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1395 sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1396 PTLRPC_SEC_FL_ROOTONLY;
1397
1398 sp = sptlrpc_target_sec_part(imp->imp_obd);
1399 }
1400
1401 sec = sptlrpc_import_sec_ref(imp);
1402 if (sec) {
Chris Hannad0bfef32015-06-03 10:28:26 -04001403 char str2[24];
Peng Taod7e09d02013-05-02 16:46:55 +08001404
1405 if (flavor_equal(&sf, &sec->ps_flvr))
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001406 goto out;
Peng Taod7e09d02013-05-02 16:46:55 +08001407
1408 CDEBUG(D_SEC, "import %s->%s: changing flavor %s -> %s\n",
1409 imp->imp_obd->obd_name,
1410 obd_uuid2str(&conn->c_remote_uuid),
1411 sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1412 sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1413
1414 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1415 SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1416 SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1417 SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1418 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
Julia Lawalla9b3e8f2014-09-07 18:18:29 +02001419 goto out;
Peng Taod7e09d02013-05-02 16:46:55 +08001420 }
1421 } else if (SPTLRPC_FLVR_BASE(sf.sf_rpc) !=
1422 SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL)) {
1423 CDEBUG(D_SEC, "import %s->%s netid %x: select flavor %s\n",
1424 imp->imp_obd->obd_name,
1425 obd_uuid2str(&conn->c_remote_uuid),
1426 LNET_NIDNET(conn->c_self),
1427 sptlrpc_flavor2name(&sf, str, sizeof(str)));
1428 }
1429
1430 mutex_lock(&imp->imp_sec_mutex);
1431
1432 newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1433 if (newsec) {
1434 sptlrpc_import_sec_install(imp, newsec);
1435 } else {
1436 CERROR("import %s->%s: failed to create new sec\n",
1437 imp->imp_obd->obd_name,
1438 obd_uuid2str(&conn->c_remote_uuid));
1439 rc = -EPERM;
1440 }
1441
1442 mutex_unlock(&imp->imp_sec_mutex);
1443out:
1444 sptlrpc_sec_put(sec);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001445 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001446}
1447
1448void sptlrpc_import_sec_put(struct obd_import *imp)
1449{
1450 if (imp->imp_sec) {
1451 sptlrpc_sec_kill(imp->imp_sec);
1452
1453 sptlrpc_sec_put(imp->imp_sec);
1454 imp->imp_sec = NULL;
1455 }
1456}
1457
1458static void import_flush_ctx_common(struct obd_import *imp,
1459 uid_t uid, int grace, int force)
1460{
1461 struct ptlrpc_sec *sec;
1462
Oleg Drokin8b382082016-02-16 00:46:58 -05001463 if (!imp)
Peng Taod7e09d02013-05-02 16:46:55 +08001464 return;
1465
1466 sec = sptlrpc_import_sec_ref(imp);
Oleg Drokin8b382082016-02-16 00:46:58 -05001467 if (!sec)
Peng Taod7e09d02013-05-02 16:46:55 +08001468 return;
1469
1470 sec_cop_flush_ctx_cache(sec, uid, grace, force);
1471 sptlrpc_sec_put(sec);
1472}
1473
Peng Taod7e09d02013-05-02 16:46:55 +08001474void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1475{
Peng Tao4b1a25f2013-07-15 22:27:14 +08001476 import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
1477 1, 1);
Peng Taod7e09d02013-05-02 16:46:55 +08001478}
1479EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1480
1481void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1482{
1483 import_flush_ctx_common(imp, -1, 1, 1);
1484}
1485EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1486
1487/**
1488 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1489 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1490 */
1491int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1492{
1493 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1494 struct ptlrpc_sec_policy *policy;
1495 int rc;
1496
1497 LASSERT(ctx);
1498 LASSERT(ctx->cc_sec);
1499 LASSERT(ctx->cc_sec->ps_policy);
Oleg Drokin8b382082016-02-16 00:46:58 -05001500 LASSERT(!req->rq_reqmsg);
Peng Taod7e09d02013-05-02 16:46:55 +08001501 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1502
1503 policy = ctx->cc_sec->ps_policy;
1504 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1505 if (!rc) {
1506 LASSERT(req->rq_reqmsg);
1507 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1508
1509 /* zeroing preallocated buffer */
1510 if (req->rq_pool)
1511 memset(req->rq_reqmsg, 0, msgsize);
1512 }
1513
1514 return rc;
1515}
1516
1517/**
1518 * Used by ptlrpc client to free request buffer of \a req. After this
1519 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1520 */
1521void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1522{
1523 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1524 struct ptlrpc_sec_policy *policy;
1525
1526 LASSERT(ctx);
1527 LASSERT(ctx->cc_sec);
1528 LASSERT(ctx->cc_sec->ps_policy);
1529 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1530
Oleg Drokin8b382082016-02-16 00:46:58 -05001531 if (!req->rq_reqbuf && !req->rq_clrbuf)
Peng Taod7e09d02013-05-02 16:46:55 +08001532 return;
1533
1534 policy = ctx->cc_sec->ps_policy;
1535 policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1536 req->rq_reqmsg = NULL;
1537}
1538
1539/*
1540 * NOTE caller must guarantee the buffer size is enough for the enlargement
1541 */
1542void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1543 int segment, int newsize)
1544{
Chris Hannad0bfef32015-06-03 10:28:26 -04001545 void *src, *dst;
1546 int oldsize, oldmsg_size, movesize;
Peng Taod7e09d02013-05-02 16:46:55 +08001547
1548 LASSERT(segment < msg->lm_bufcount);
1549 LASSERT(msg->lm_buflens[segment] <= newsize);
1550
1551 if (msg->lm_buflens[segment] == newsize)
1552 return;
1553
1554 /* nothing to do if we are enlarging the last segment */
1555 if (segment == msg->lm_bufcount - 1) {
1556 msg->lm_buflens[segment] = newsize;
1557 return;
1558 }
1559
1560 oldsize = msg->lm_buflens[segment];
1561
1562 src = lustre_msg_buf(msg, segment + 1, 0);
1563 msg->lm_buflens[segment] = newsize;
1564 dst = lustre_msg_buf(msg, segment + 1, 0);
1565 msg->lm_buflens[segment] = oldsize;
1566
1567 /* move from segment + 1 to end segment */
1568 LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1569 oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
Oleg Drokin9797fb02016-06-18 23:53:12 -04001570 movesize = oldmsg_size - ((unsigned long)src - (unsigned long)msg);
Peng Taod7e09d02013-05-02 16:46:55 +08001571 LASSERT(movesize >= 0);
1572
1573 if (movesize)
1574 memmove(dst, src, movesize);
1575
1576 /* note we don't clear the ares where old data live, not secret */
1577
1578 /* finally set new segment size */
1579 msg->lm_buflens[segment] = newsize;
1580}
1581EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1582
1583/**
1584 * Used by ptlrpc client to enlarge the \a segment of request message pointed
1585 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1586 * preserved after the enlargement. this must be called after original request
1587 * buffer being allocated.
1588 *
1589 * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1590 * so caller should refresh its local pointers if needed.
1591 */
1592int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1593 int segment, int newsize)
1594{
Chris Hannad0bfef32015-06-03 10:28:26 -04001595 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1596 struct ptlrpc_sec_cops *cops;
1597 struct lustre_msg *msg = req->rq_reqmsg;
Peng Taod7e09d02013-05-02 16:46:55 +08001598
1599 LASSERT(ctx);
1600 LASSERT(msg);
1601 LASSERT(msg->lm_bufcount > segment);
1602 LASSERT(msg->lm_buflens[segment] <= newsize);
1603
1604 if (msg->lm_buflens[segment] == newsize)
1605 return 0;
1606
1607 cops = ctx->cc_sec->ps_policy->sp_cops;
1608 LASSERT(cops->enlarge_reqbuf);
1609 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1610}
1611EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1612
1613/**
1614 * Used by ptlrpc client to allocate reply buffer of \a req.
1615 *
1616 * \note After this, req->rq_repmsg is still not accessible.
1617 */
1618int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1619{
1620 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1621 struct ptlrpc_sec_policy *policy;
Peng Taod7e09d02013-05-02 16:46:55 +08001622
1623 LASSERT(ctx);
1624 LASSERT(ctx->cc_sec);
1625 LASSERT(ctx->cc_sec->ps_policy);
1626
1627 if (req->rq_repbuf)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001628 return 0;
Peng Taod7e09d02013-05-02 16:46:55 +08001629
1630 policy = ctx->cc_sec->ps_policy;
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001631 return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
Peng Taod7e09d02013-05-02 16:46:55 +08001632}
1633
1634/**
1635 * Used by ptlrpc client to free reply buffer of \a req. After this
1636 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1637 */
1638void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1639{
1640 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1641 struct ptlrpc_sec_policy *policy;
Peng Taod7e09d02013-05-02 16:46:55 +08001642
1643 LASSERT(ctx);
1644 LASSERT(ctx->cc_sec);
1645 LASSERT(ctx->cc_sec->ps_policy);
1646 LASSERT_ATOMIC_POS(&ctx->cc_refcount);
1647
Oleg Drokin8b382082016-02-16 00:46:58 -05001648 if (!req->rq_repbuf)
Peng Taod7e09d02013-05-02 16:46:55 +08001649 return;
1650 LASSERT(req->rq_repbuf_len);
1651
1652 policy = ctx->cc_sec->ps_policy;
1653 policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1654 req->rq_repmsg = NULL;
Peng Taod7e09d02013-05-02 16:46:55 +08001655}
1656
Shraddha Barkebe23ce12015-10-05 05:32:39 +05301657static int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1658 struct ptlrpc_svc_ctx *ctx)
Peng Taod7e09d02013-05-02 16:46:55 +08001659{
1660 struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1661
1662 if (!policy->sp_sops->install_rctx)
1663 return 0;
1664 return policy->sp_sops->install_rctx(imp, ctx);
1665}
1666
1667/****************************************
1668 * server side security *
1669 ****************************************/
1670
1671static int flavor_allowed(struct sptlrpc_flavor *exp,
1672 struct ptlrpc_request *req)
1673{
1674 struct sptlrpc_flavor *flvr = &req->rq_flvr;
1675
1676 if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1677 return 1;
1678
1679 if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1680 SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1681 SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1682 SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1683 return 1;
1684
1685 return 0;
1686}
1687
1688#define EXP_FLVR_UPDATE_EXPIRE (OBD_TIMEOUT_DEFAULT + 10)
1689
1690/**
1691 * Given an export \a exp, check whether the flavor of incoming \a req
1692 * is allowed by the export \a exp. Main logic is about taking care of
1693 * changing configurations. Return 0 means success.
1694 */
1695int sptlrpc_target_export_check(struct obd_export *exp,
1696 struct ptlrpc_request *req)
1697{
Chris Hannad0bfef32015-06-03 10:28:26 -04001698 struct sptlrpc_flavor flavor;
Peng Taod7e09d02013-05-02 16:46:55 +08001699
Oleg Drokin8b382082016-02-16 00:46:58 -05001700 if (!exp)
Peng Taod7e09d02013-05-02 16:46:55 +08001701 return 0;
1702
1703 /* client side export has no imp_reverse, skip
Oleg Drokindadfcda2016-02-24 22:00:38 -05001704 * FIXME maybe we should check flavor this as well???
1705 */
Oleg Drokin8b382082016-02-16 00:46:58 -05001706 if (!exp->exp_imp_reverse)
Peng Taod7e09d02013-05-02 16:46:55 +08001707 return 0;
1708
1709 /* don't care about ctx fini rpc */
1710 if (req->rq_ctx_fini)
1711 return 0;
1712
1713 spin_lock(&exp->exp_lock);
1714
1715 /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1716 * the first req with the new flavor, then treat it as current flavor,
1717 * adapt reverse sec according to it.
1718 * note the first rpc with new flavor might not be with root ctx, in
Oleg Drokindadfcda2016-02-24 22:00:38 -05001719 * which case delay the sec_adapt by leaving exp_flvr_adapt == 1.
1720 */
Peng Taod7e09d02013-05-02 16:46:55 +08001721 if (unlikely(exp->exp_flvr_changed) &&
1722 flavor_allowed(&exp->exp_flvr_old[1], req)) {
1723 /* make the new flavor as "current", and old ones as
Oleg Drokindadfcda2016-02-24 22:00:38 -05001724 * about-to-expire
1725 */
Peng Taod7e09d02013-05-02 16:46:55 +08001726 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1727 exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1728 flavor = exp->exp_flvr_old[1];
1729 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1730 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1731 exp->exp_flvr_old[0] = exp->exp_flvr;
Arnd Bergmann986ef132015-09-27 16:45:28 -04001732 exp->exp_flvr_expire[0] = ktime_get_real_seconds() +
Peng Taod7e09d02013-05-02 16:46:55 +08001733 EXP_FLVR_UPDATE_EXPIRE;
1734 exp->exp_flvr = flavor;
1735
1736 /* flavor change finished */
1737 exp->exp_flvr_changed = 0;
1738 LASSERT(exp->exp_flvr_adapt == 1);
1739
1740 /* if it's gss, we only interested in root ctx init */
1741 if (req->rq_auth_gss &&
1742 !(req->rq_ctx_init &&
1743 (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1744 req->rq_auth_usr_ost))) {
1745 spin_unlock(&exp->exp_lock);
1746 CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1747 req->rq_auth_gss, req->rq_ctx_init,
1748 req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1749 req->rq_auth_usr_ost);
1750 return 0;
1751 }
1752
1753 exp->exp_flvr_adapt = 0;
1754 spin_unlock(&exp->exp_lock);
1755
1756 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1757 req->rq_svc_ctx, &flavor);
1758 }
1759
1760 /* if it equals to the current flavor, we accept it, but need to
Oleg Drokindadfcda2016-02-24 22:00:38 -05001761 * dealing with reverse sec/ctx
1762 */
Peng Taod7e09d02013-05-02 16:46:55 +08001763 if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1764 /* most cases should return here, we only interested in
Oleg Drokindadfcda2016-02-24 22:00:38 -05001765 * gss root ctx init
1766 */
Peng Taod7e09d02013-05-02 16:46:55 +08001767 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1768 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1769 !req->rq_auth_usr_ost)) {
1770 spin_unlock(&exp->exp_lock);
1771 return 0;
1772 }
1773
1774 /* if flavor just changed, we should not proceed, just leave
1775 * it and current flavor will be discovered and replaced
Oleg Drokindadfcda2016-02-24 22:00:38 -05001776 * shortly, and let _this_ rpc pass through
1777 */
Peng Taod7e09d02013-05-02 16:46:55 +08001778 if (exp->exp_flvr_changed) {
1779 LASSERT(exp->exp_flvr_adapt);
1780 spin_unlock(&exp->exp_lock);
1781 return 0;
1782 }
1783
1784 if (exp->exp_flvr_adapt) {
1785 exp->exp_flvr_adapt = 0;
1786 CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1787 exp, exp->exp_flvr.sf_rpc,
1788 exp->exp_flvr_old[0].sf_rpc,
1789 exp->exp_flvr_old[1].sf_rpc);
1790 flavor = exp->exp_flvr;
1791 spin_unlock(&exp->exp_lock);
1792
1793 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1794 req->rq_svc_ctx,
1795 &flavor);
1796 } else {
Joe Perches2d00bd12014-11-23 11:28:50 -08001797 CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, install rvs ctx\n",
1798 exp, exp->exp_flvr.sf_rpc,
Peng Taod7e09d02013-05-02 16:46:55 +08001799 exp->exp_flvr_old[0].sf_rpc,
1800 exp->exp_flvr_old[1].sf_rpc);
1801 spin_unlock(&exp->exp_lock);
1802
1803 return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1804 req->rq_svc_ctx);
1805 }
1806 }
1807
1808 if (exp->exp_flvr_expire[0]) {
Arnd Bergmann986ef132015-09-27 16:45:28 -04001809 if (exp->exp_flvr_expire[0] >= ktime_get_real_seconds()) {
Peng Taod7e09d02013-05-02 16:46:55 +08001810 if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
Arnd Bergmann986ef132015-09-27 16:45:28 -04001811 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the middle one (%lld)\n", exp,
Peng Taod7e09d02013-05-02 16:46:55 +08001812 exp->exp_flvr.sf_rpc,
1813 exp->exp_flvr_old[0].sf_rpc,
1814 exp->exp_flvr_old[1].sf_rpc,
Arnd Bergmann986ef132015-09-27 16:45:28 -04001815 (s64)(exp->exp_flvr_expire[0] -
1816 ktime_get_real_seconds()));
Peng Taod7e09d02013-05-02 16:46:55 +08001817 spin_unlock(&exp->exp_lock);
1818 return 0;
1819 }
1820 } else {
1821 CDEBUG(D_SEC, "mark middle expired\n");
1822 exp->exp_flvr_expire[0] = 0;
1823 }
1824 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1825 exp->exp_flvr.sf_rpc,
1826 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1827 req->rq_flvr.sf_rpc);
1828 }
1829
1830 /* now it doesn't match the current flavor, the only chance we can
Oleg Drokindadfcda2016-02-24 22:00:38 -05001831 * accept it is match the old flavors which is not expired.
1832 */
Peng Taod7e09d02013-05-02 16:46:55 +08001833 if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
Arnd Bergmann986ef132015-09-27 16:45:28 -04001834 if (exp->exp_flvr_expire[1] >= ktime_get_real_seconds()) {
Peng Taod7e09d02013-05-02 16:46:55 +08001835 if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
Arnd Bergmann986ef132015-09-27 16:45:28 -04001836 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the oldest one (%lld)\n",
Joe Perches2d00bd12014-11-23 11:28:50 -08001837 exp,
Peng Taod7e09d02013-05-02 16:46:55 +08001838 exp->exp_flvr.sf_rpc,
1839 exp->exp_flvr_old[0].sf_rpc,
1840 exp->exp_flvr_old[1].sf_rpc,
Arnd Bergmann986ef132015-09-27 16:45:28 -04001841 (s64)(exp->exp_flvr_expire[1] -
1842 ktime_get_real_seconds()));
Peng Taod7e09d02013-05-02 16:46:55 +08001843 spin_unlock(&exp->exp_lock);
1844 return 0;
1845 }
1846 } else {
1847 CDEBUG(D_SEC, "mark oldest expired\n");
1848 exp->exp_flvr_expire[1] = 0;
1849 }
1850 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1851 exp, exp->exp_flvr.sf_rpc,
1852 exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1853 req->rq_flvr.sf_rpc);
1854 } else {
1855 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1856 exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1857 exp->exp_flvr_old[1].sf_rpc);
1858 }
1859
1860 spin_unlock(&exp->exp_lock);
1861
Arnd Bergmann986ef132015-09-27 16:45:28 -04001862 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+lld)|%x(%+lld)\n",
Peng Taod7e09d02013-05-02 16:46:55 +08001863 exp, exp->exp_obd->obd_name,
1864 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1865 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1866 req->rq_flvr.sf_rpc,
1867 exp->exp_flvr.sf_rpc,
1868 exp->exp_flvr_old[0].sf_rpc,
1869 exp->exp_flvr_expire[0] ?
Arnd Bergmann986ef132015-09-27 16:45:28 -04001870 (s64)(exp->exp_flvr_expire[0] - ktime_get_real_seconds()) : 0,
Peng Taod7e09d02013-05-02 16:46:55 +08001871 exp->exp_flvr_old[1].sf_rpc,
1872 exp->exp_flvr_expire[1] ?
Arnd Bergmann986ef132015-09-27 16:45:28 -04001873 (s64)(exp->exp_flvr_expire[1] - ktime_get_real_seconds()) : 0);
Peng Taod7e09d02013-05-02 16:46:55 +08001874 return -EACCES;
1875}
1876EXPORT_SYMBOL(sptlrpc_target_export_check);
1877
Peng Taod7e09d02013-05-02 16:46:55 +08001878static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1879{
1880 /* peer's claim is unreliable unless gss is being used */
1881 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1882 return svc_rc;
1883
1884 switch (req->rq_sp_from) {
1885 case LUSTRE_SP_CLI:
1886 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
1887 DEBUG_REQ(D_ERROR, req, "faked source CLI");
1888 svc_rc = SECSVC_DROP;
1889 }
1890 break;
1891 case LUSTRE_SP_MDT:
1892 if (!req->rq_auth_usr_mdt) {
1893 DEBUG_REQ(D_ERROR, req, "faked source MDT");
1894 svc_rc = SECSVC_DROP;
1895 }
1896 break;
1897 case LUSTRE_SP_OST:
1898 if (!req->rq_auth_usr_ost) {
1899 DEBUG_REQ(D_ERROR, req, "faked source OST");
1900 svc_rc = SECSVC_DROP;
1901 }
1902 break;
1903 case LUSTRE_SP_MGS:
1904 case LUSTRE_SP_MGC:
1905 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1906 !req->rq_auth_usr_ost) {
1907 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
1908 svc_rc = SECSVC_DROP;
1909 }
1910 break;
1911 case LUSTRE_SP_ANY:
1912 default:
1913 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1914 svc_rc = SECSVC_DROP;
1915 }
1916
1917 return svc_rc;
1918}
1919
1920/**
1921 * Used by ptlrpc server, to perform transformation upon request message of
1922 * incoming \a req. This must be the first thing to do with a incoming
1923 * request in ptlrpc layer.
1924 *
1925 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
1926 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
1927 * \retval SECSVC_COMPLETE success, the request has been fully processed, and
1928 * reply message has been prepared.
1929 * \retval SECSVC_DROP failed, this request should be dropped.
1930 */
1931int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1932{
1933 struct ptlrpc_sec_policy *policy;
Chris Hannad0bfef32015-06-03 10:28:26 -04001934 struct lustre_msg *msg = req->rq_reqbuf;
1935 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001936
1937 LASSERT(msg);
Oleg Drokin8b382082016-02-16 00:46:58 -05001938 LASSERT(!req->rq_reqmsg);
1939 LASSERT(!req->rq_repmsg);
1940 LASSERT(!req->rq_svc_ctx);
Peng Taod7e09d02013-05-02 16:46:55 +08001941
1942 req->rq_req_swab_mask = 0;
1943
1944 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
1945 switch (rc) {
1946 case 1:
1947 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1948 case 0:
1949 break;
1950 default:
Greg Kroah-Hartmanb0f5aad2014-07-12 20:06:04 -07001951 CERROR("error unpacking request from %s x%llu\n",
Peng Taod7e09d02013-05-02 16:46:55 +08001952 libcfs_id2str(req->rq_peer), req->rq_xid);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001953 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +08001954 }
1955
1956 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
1957 req->rq_sp_from = LUSTRE_SP_ANY;
Peng Tao4b1a25f2013-07-15 22:27:14 +08001958 req->rq_auth_uid = -1;
1959 req->rq_auth_mapped_uid = -1;
Peng Taod7e09d02013-05-02 16:46:55 +08001960
1961 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
1962 if (!policy) {
1963 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001964 return SECSVC_DROP;
Peng Taod7e09d02013-05-02 16:46:55 +08001965 }
1966
1967 LASSERT(policy->sp_sops->accept);
1968 rc = policy->sp_sops->accept(req);
1969 sptlrpc_policy_put(policy);
1970 LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1971 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1972
1973 /*
1974 * if it's not null flavor (which means embedded packing msg),
Masanari Iida0c2bc752014-02-08 00:30:37 +09001975 * reset the swab mask for the coming inner msg unpacking.
Peng Taod7e09d02013-05-02 16:46:55 +08001976 */
1977 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
1978 req->rq_req_swab_mask = 0;
1979
1980 /* sanity check for the request source */
1981 rc = sptlrpc_svc_check_from(req, rc);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08001982 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001983}
1984
1985/**
1986 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
1987 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
1988 * a buffer of \a msglen size.
1989 */
1990int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
1991{
1992 struct ptlrpc_sec_policy *policy;
1993 struct ptlrpc_reply_state *rs;
1994 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08001995
1996 LASSERT(req->rq_svc_ctx);
1997 LASSERT(req->rq_svc_ctx->sc_policy);
1998
1999 policy = req->rq_svc_ctx->sc_policy;
2000 LASSERT(policy->sp_sops->alloc_rs);
2001
2002 rc = policy->sp_sops->alloc_rs(req, msglen);
2003 if (unlikely(rc == -ENOMEM)) {
Patrick Farrella34041a2014-04-27 13:06:25 -04002004 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
Mike Rapoport50ffcb72015-10-13 16:03:40 +03002005
Patrick Farrella34041a2014-04-27 13:06:25 -04002006 if (svcpt->scp_service->srv_max_reply_size <
2007 msglen + sizeof(struct ptlrpc_reply_state)) {
2008 /* Just return failure if the size is too big */
James Nunez19b20562016-03-07 18:10:21 -05002009 CERROR("size of message is too big (%zd), %d allowed\n",
Oleg Drokin30c0aa32016-02-26 01:50:02 -05002010 msglen + sizeof(struct ptlrpc_reply_state),
2011 svcpt->scp_service->srv_max_reply_size);
Patrick Farrella34041a2014-04-27 13:06:25 -04002012 return -ENOMEM;
2013 }
2014
Peng Taod7e09d02013-05-02 16:46:55 +08002015 /* failed alloc, try emergency pool */
Patrick Farrella34041a2014-04-27 13:06:25 -04002016 rs = lustre_get_emerg_rs(svcpt);
Oleg Drokin8b382082016-02-16 00:46:58 -05002017 if (!rs)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08002018 return -ENOMEM;
Peng Taod7e09d02013-05-02 16:46:55 +08002019
2020 req->rq_reply_state = rs;
2021 rc = policy->sp_sops->alloc_rs(req, msglen);
2022 if (rc) {
2023 lustre_put_emerg_rs(rs);
2024 req->rq_reply_state = NULL;
2025 }
2026 }
2027
2028 LASSERT(rc != 0 ||
2029 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2030
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08002031 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002032}
2033
2034/**
2035 * Used by ptlrpc server, to perform transformation upon reply message.
2036 *
Masanari Iida0c2bc752014-02-08 00:30:37 +09002037 * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
Peng Taod7e09d02013-05-02 16:46:55 +08002038 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2039 */
2040int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2041{
2042 struct ptlrpc_sec_policy *policy;
2043 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002044
2045 LASSERT(req->rq_svc_ctx);
2046 LASSERT(req->rq_svc_ctx->sc_policy);
2047
2048 policy = req->rq_svc_ctx->sc_policy;
2049 LASSERT(policy->sp_sops->authorize);
2050
2051 rc = policy->sp_sops->authorize(req);
2052 LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2053
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +08002054 return rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002055}
2056
2057/**
2058 * Used by ptlrpc server, to free reply_state.
2059 */
2060void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2061{
2062 struct ptlrpc_sec_policy *policy;
2063 unsigned int prealloc;
Peng Taod7e09d02013-05-02 16:46:55 +08002064
2065 LASSERT(rs->rs_svc_ctx);
2066 LASSERT(rs->rs_svc_ctx->sc_policy);
2067
2068 policy = rs->rs_svc_ctx->sc_policy;
2069 LASSERT(policy->sp_sops->free_rs);
2070
2071 prealloc = rs->rs_prealloc;
2072 policy->sp_sops->free_rs(rs);
2073
2074 if (prealloc)
2075 lustre_put_emerg_rs(rs);
Peng Taod7e09d02013-05-02 16:46:55 +08002076}
2077
2078void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2079{
2080 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2081
Oleg Drokin8b382082016-02-16 00:46:58 -05002082 if (ctx)
Peng Taod7e09d02013-05-02 16:46:55 +08002083 atomic_inc(&ctx->sc_refcount);
2084}
2085
2086void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2087{
2088 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2089
Oleg Drokin8b382082016-02-16 00:46:58 -05002090 if (!ctx)
Peng Taod7e09d02013-05-02 16:46:55 +08002091 return;
2092
2093 LASSERT_ATOMIC_POS(&ctx->sc_refcount);
2094 if (atomic_dec_and_test(&ctx->sc_refcount)) {
2095 if (ctx->sc_policy->sp_sops->free_ctx)
2096 ctx->sc_policy->sp_sops->free_ctx(ctx);
2097 }
2098 req->rq_svc_ctx = NULL;
2099}
2100
Peng Taod7e09d02013-05-02 16:46:55 +08002101/****************************************
2102 * bulk security *
2103 ****************************************/
2104
2105/**
2106 * Perform transformation upon bulk data pointed by \a desc. This is called
2107 * before transforming the request message.
2108 */
2109int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2110 struct ptlrpc_bulk_desc *desc)
2111{
2112 struct ptlrpc_cli_ctx *ctx;
2113
2114 LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2115
2116 if (!req->rq_pack_bulk)
2117 return 0;
2118
2119 ctx = req->rq_cli_ctx;
2120 if (ctx->cc_ops->wrap_bulk)
2121 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2122 return 0;
2123}
2124EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2125
2126/**
2127 * This is called after unwrap the reply message.
2128 * return nob of actual plain text size received, or error code.
2129 */
2130int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2131 struct ptlrpc_bulk_desc *desc,
2132 int nob)
2133{
Chris Hannad0bfef32015-06-03 10:28:26 -04002134 struct ptlrpc_cli_ctx *ctx;
2135 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002136
2137 LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2138
2139 if (!req->rq_pack_bulk)
2140 return desc->bd_nob_transferred;
2141
2142 ctx = req->rq_cli_ctx;
2143 if (ctx->cc_ops->unwrap_bulk) {
2144 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2145 if (rc < 0)
2146 return rc;
2147 }
2148 return desc->bd_nob_transferred;
2149}
2150EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2151
2152/**
2153 * This is called after unwrap the reply message.
2154 * return 0 for success or error code.
2155 */
2156int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2157 struct ptlrpc_bulk_desc *desc)
2158{
Chris Hannad0bfef32015-06-03 10:28:26 -04002159 struct ptlrpc_cli_ctx *ctx;
2160 int rc;
Peng Taod7e09d02013-05-02 16:46:55 +08002161
2162 LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2163
2164 if (!req->rq_pack_bulk)
2165 return 0;
2166
2167 ctx = req->rq_cli_ctx;
2168 if (ctx->cc_ops->unwrap_bulk) {
2169 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2170 if (rc < 0)
2171 return rc;
2172 }
2173
2174 /*
2175 * if everything is going right, nob should equals to nob_transferred.
2176 * in case of privacy mode, nob_transferred needs to be adjusted.
2177 */
2178 if (desc->bd_nob != desc->bd_nob_transferred) {
James Nunez19b20562016-03-07 18:10:21 -05002179 CERROR("nob %d doesn't match transferred nob %d\n",
Peng Taod7e09d02013-05-02 16:46:55 +08002180 desc->bd_nob, desc->bd_nob_transferred);
2181 return -EPROTO;
2182 }
2183
2184 return 0;
2185}
2186EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2187
Peng Taod7e09d02013-05-02 16:46:55 +08002188/****************************************
2189 * user descriptor helpers *
2190 ****************************************/
2191
2192int sptlrpc_current_user_desc_size(void)
2193{
2194 int ngroups;
2195
2196 ngroups = current_ngroups;
2197
2198 if (ngroups > LUSTRE_MAX_GROUPS)
2199 ngroups = LUSTRE_MAX_GROUPS;
2200 return sptlrpc_user_desc_size(ngroups);
2201}
2202EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2203
2204int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2205{
2206 struct ptlrpc_user_desc *pud;
2207
2208 pud = lustre_msg_buf(msg, offset, 0);
2209
Lidza Louinadb3c16c2016-05-16 14:51:42 -04002210 if (!pud)
2211 return -EINVAL;
2212
Peng Tao4b1a25f2013-07-15 22:27:14 +08002213 pud->pud_uid = from_kuid(&init_user_ns, current_uid());
2214 pud->pud_gid = from_kgid(&init_user_ns, current_gid());
2215 pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
2216 pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
Peng Taod7e09d02013-05-02 16:46:55 +08002217 pud->pud_cap = cfs_curproc_cap_pack();
2218 pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2219
2220 task_lock(current);
2221 if (pud->pud_ngroups > current_ngroups)
2222 pud->pud_ngroups = current_ngroups;
2223 memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2224 pud->pud_ngroups * sizeof(__u32));
2225 task_unlock(current);
2226
2227 return 0;
2228}
2229EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2230
2231int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2232{
2233 struct ptlrpc_user_desc *pud;
Chris Hannad0bfef32015-06-03 10:28:26 -04002234 int i;
Peng Taod7e09d02013-05-02 16:46:55 +08002235
2236 pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2237 if (!pud)
2238 return -EINVAL;
2239
2240 if (swabbed) {
2241 __swab32s(&pud->pud_uid);
2242 __swab32s(&pud->pud_gid);
2243 __swab32s(&pud->pud_fsuid);
2244 __swab32s(&pud->pud_fsgid);
2245 __swab32s(&pud->pud_cap);
2246 __swab32s(&pud->pud_ngroups);
2247 }
2248
2249 if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2250 CERROR("%u groups is too large\n", pud->pud_ngroups);
2251 return -EINVAL;
2252 }
2253
2254 if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2255 msg->lm_buflens[offset]) {
2256 CERROR("%u groups are claimed but bufsize only %u\n",
2257 pud->pud_ngroups, msg->lm_buflens[offset]);
2258 return -EINVAL;
2259 }
2260
2261 if (swabbed) {
2262 for (i = 0; i < pud->pud_ngroups; i++)
2263 __swab32s(&pud->pud_groups[i]);
2264 }
2265
2266 return 0;
2267}
2268EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2269
2270/****************************************
2271 * misc helpers *
2272 ****************************************/
2273
Greg Donaldaff9d8e2014-08-21 11:07:42 -05002274const char *sec2target_str(struct ptlrpc_sec *sec)
Peng Taod7e09d02013-05-02 16:46:55 +08002275{
2276 if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2277 return "*";
2278 if (sec_is_reverse(sec))
2279 return "c";
2280 return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2281}
2282EXPORT_SYMBOL(sec2target_str);
2283
2284/*
2285 * return true if the bulk data is protected
2286 */
Geliang Tang316bd5e2015-10-18 22:35:31 +08002287bool sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
Peng Taod7e09d02013-05-02 16:46:55 +08002288{
2289 switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2290 case SPTLRPC_BULK_SVC_INTG:
2291 case SPTLRPC_BULK_SVC_PRIV:
Geliang Tang316bd5e2015-10-18 22:35:31 +08002292 return true;
Peng Taod7e09d02013-05-02 16:46:55 +08002293 default:
Geliang Tang316bd5e2015-10-18 22:35:31 +08002294 return false;
Peng Taod7e09d02013-05-02 16:46:55 +08002295 }
2296}
2297EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2298
2299/****************************************
2300 * crypto API helper/alloc blkciper *
2301 ****************************************/
2302
2303/****************************************
2304 * initialize/finalize *
2305 ****************************************/
2306
2307int sptlrpc_init(void)
2308{
2309 int rc;
2310
2311 rwlock_init(&policy_lock);
2312
2313 rc = sptlrpc_gc_init();
2314 if (rc)
2315 goto out;
2316
2317 rc = sptlrpc_conf_init();
2318 if (rc)
2319 goto out_gc;
2320
2321 rc = sptlrpc_enc_pool_init();
2322 if (rc)
2323 goto out_conf;
2324
2325 rc = sptlrpc_null_init();
2326 if (rc)
2327 goto out_pool;
2328
2329 rc = sptlrpc_plain_init();
2330 if (rc)
2331 goto out_null;
2332
2333 rc = sptlrpc_lproc_init();
2334 if (rc)
2335 goto out_plain;
2336
2337 return 0;
2338
2339out_plain:
2340 sptlrpc_plain_fini();
2341out_null:
2342 sptlrpc_null_fini();
2343out_pool:
2344 sptlrpc_enc_pool_fini();
2345out_conf:
2346 sptlrpc_conf_fini();
2347out_gc:
2348 sptlrpc_gc_fini();
2349out:
2350 return rc;
2351}
2352
2353void sptlrpc_fini(void)
2354{
2355 sptlrpc_lproc_fini();
2356 sptlrpc_plain_fini();
2357 sptlrpc_null_fini();
2358 sptlrpc_enc_pool_fini();
2359 sptlrpc_conf_fini();
2360 sptlrpc_gc_fini();
2361}