blob: d5debd615fdf1c198c7b40b1091776577bfb28fb [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
Andreas Dilger1dc563a2015-11-08 18:09:37 -050026 * Copyright (c) 2010, 2015, Intel Corporation.
Peng Taod7e09d02013-05-02 16:46:55 +080027 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 */
32/** \defgroup PtlRPC Portal RPC and networking module.
33 *
34 * PortalRPC is the layer used by rest of lustre code to achieve network
35 * communications: establish connections with corresponding export and import
36 * states, listen for a service, send and receive RPCs.
37 * PortalRPC also includes base recovery framework: packet resending and
38 * replaying, reconnections, pinger.
39 *
40 * PortalRPC utilizes LNet as its transport layer.
41 *
42 * @{
43 */
44
Peng Taod7e09d02013-05-02 16:46:55 +080045#ifndef _LUSTRE_NET_H
46#define _LUSTRE_NET_H
47
48/** \defgroup net net
49 *
50 * @{
51 */
52
Greg Kroah-Hartman9fdaf8c2014-07-11 20:51:16 -070053#include "../../include/linux/libcfs/libcfs.h"
James Simmonsbbf00c32015-06-11 15:18:11 -040054#include "../../include/linux/lnet/nidstr.h"
55#include "../../include/linux/lnet/api.h"
Greg Kroah-Hartman1accaad2014-07-11 21:34:24 -070056#include "lustre/lustre_idl.h"
57#include "lustre_ha.h"
58#include "lustre_sec.h"
59#include "lustre_import.h"
60#include "lprocfs_status.h"
61#include "lu_object.h"
62#include "lustre_req_layout.h"
Peng Taod7e09d02013-05-02 16:46:55 +080063
Greg Kroah-Hartman1accaad2014-07-11 21:34:24 -070064#include "obd_support.h"
65#include "lustre_ver.h"
Peng Taod7e09d02013-05-02 16:46:55 +080066
67/* MD flags we _always_ use */
68#define PTLRPC_MD_OPTIONS 0
69
70/**
71 * Max # of bulk operations in one request.
72 * In order for the client and server to properly negotiate the maximum
73 * possible transfer size, PTLRPC_BULK_OPS_COUNT must be a power-of-two
74 * value. The client is free to limit the actual RPC size for any bulk
Oleg Drokinc56e2562016-02-24 22:00:25 -050075 * transfer via cl_max_pages_per_rpc to some non-power-of-two value.
76 */
Peng Taod7e09d02013-05-02 16:46:55 +080077#define PTLRPC_BULK_OPS_BITS 2
78#define PTLRPC_BULK_OPS_COUNT (1U << PTLRPC_BULK_OPS_BITS)
79/**
80 * PTLRPC_BULK_OPS_MASK is for the convenience of the client only, and
81 * should not be used on the server at all. Otherwise, it imposes a
82 * protocol limitation on the maximum RPC size that can be used by any
83 * RPC sent to that server in the future. Instead, the server should
84 * use the negotiated per-client ocd_brw_size to determine the bulk
Oleg Drokinc56e2562016-02-24 22:00:25 -050085 * RPC count.
86 */
Peng Taod7e09d02013-05-02 16:46:55 +080087#define PTLRPC_BULK_OPS_MASK (~((__u64)PTLRPC_BULK_OPS_COUNT - 1))
88
89/**
90 * Define maxima for bulk I/O.
91 *
92 * A single PTLRPC BRW request is sent via up to PTLRPC_BULK_OPS_COUNT
93 * of LNET_MTU sized RDMA transfers. Clients and servers negotiate the
94 * currently supported maximum between peers at connect via ocd_brw_size.
95 */
96#define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS)
97#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +030098#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT)
Peng Taod7e09d02013-05-02 16:46:55 +080099
100#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS)
101#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300102#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT)
Peng Taod7e09d02013-05-02 16:46:55 +0800103#define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300104#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT)
Peng Taod7e09d02013-05-02 16:46:55 +0800105#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS)
106
107/* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */
108# if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0)
109# error "PTLRPC_MAX_BRW_PAGES isn't a power of two"
110# endif
Kirill A. Shutemovea1754a2016-04-01 15:29:48 +0300111# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE))
112# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE"
Peng Taod7e09d02013-05-02 16:46:55 +0800113# endif
114# if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT)
115# error "PTLRPC_MAX_BRW_SIZE too big"
116# endif
117# if (PTLRPC_MAX_BRW_PAGES > LNET_MAX_IOV * PTLRPC_BULK_OPS_COUNT)
118# error "PTLRPC_MAX_BRW_PAGES too big"
119# endif
120
121#define PTLRPC_NTHRS_INIT 2
122
123/**
124 * Buffer Constants
125 *
126 * Constants determine how memory is used to buffer incoming service requests.
127 *
128 * ?_NBUFS # buffers to allocate when growing the pool
129 * ?_BUFSIZE # bytes in a single request buffer
130 * ?_MAXREQSIZE # maximum request service will receive
131 *
132 * When fewer than ?_NBUFS/2 buffers are posted for receive, another chunk
133 * of ?_NBUFS is added to the pool.
134 *
135 * Messages larger than ?_MAXREQSIZE are dropped. Request buffers are
136 * considered full when less than ?_MAXREQSIZE is left in them.
137 */
138/**
139 * Thread Constants
140 *
141 * Constants determine how threads are created for ptlrpc service.
142 *
143 * ?_NTHRS_INIT # threads to create for each service partition on
144 * initializing. If it's non-affinity service and
145 * there is only one partition, it's the overall #
146 * threads for the service while initializing.
147 * ?_NTHRS_BASE # threads should be created at least for each
148 * ptlrpc partition to keep the service healthy.
149 * It's the low-water mark of threads upper-limit
150 * for each partition.
151 * ?_THR_FACTOR # threads can be added on threads upper-limit for
152 * each CPU core. This factor is only for reference,
153 * we might decrease value of factor if number of cores
154 * per CPT is above a limit.
155 * ?_NTHRS_MAX # overall threads can be created for a service,
156 * it's a soft limit because if service is running
157 * on machine with hundreds of cores and tens of
158 * CPU partitions, we need to guarantee each partition
159 * has ?_NTHRS_BASE threads, which means total threads
160 * will be ?_NTHRS_BASE * number_of_cpts which can
161 * exceed ?_NTHRS_MAX.
162 *
163 * Examples
164 *
165 * #define MDS_NTHRS_INIT 2
166 * #define MDS_NTHRS_BASE 64
167 * #define MDS_NTHRS_FACTOR 8
168 * #define MDS_NTHRS_MAX 1024
169 *
170 * Example 1):
171 * ---------------------------------------------------------------------
172 * Server(A) has 16 cores, user configured it to 4 partitions so each
173 * partition has 4 cores, then actual number of service threads on each
174 * partition is:
175 * MDS_NTHRS_BASE(64) + cores(4) * MDS_NTHRS_FACTOR(8) = 96
176 *
177 * Total number of threads for the service is:
178 * 96 * partitions(4) = 384
179 *
180 * Example 2):
181 * ---------------------------------------------------------------------
182 * Server(B) has 32 cores, user configured it to 4 partitions so each
183 * partition has 8 cores, then actual number of service threads on each
184 * partition is:
185 * MDS_NTHRS_BASE(64) + cores(8) * MDS_NTHRS_FACTOR(8) = 128
186 *
187 * Total number of threads for the service is:
188 * 128 * partitions(4) = 512
189 *
190 * Example 3):
191 * ---------------------------------------------------------------------
192 * Server(B) has 96 cores, user configured it to 8 partitions so each
193 * partition has 12 cores, then actual number of service threads on each
194 * partition is:
195 * MDS_NTHRS_BASE(64) + cores(12) * MDS_NTHRS_FACTOR(8) = 160
196 *
197 * Total number of threads for the service is:
198 * 160 * partitions(8) = 1280
199 *
200 * However, it's above the soft limit MDS_NTHRS_MAX, so we choose this number
201 * as upper limit of threads number for each partition:
202 * MDS_NTHRS_MAX(1024) / partitions(8) = 128
203 *
204 * Example 4):
205 * ---------------------------------------------------------------------
206 * Server(C) have a thousand of cores and user configured it to 32 partitions
207 * MDS_NTHRS_BASE(64) * 32 = 2048
208 *
209 * which is already above soft limit MDS_NTHRS_MAX(1024), but we still need
210 * to guarantee that each partition has at least MDS_NTHRS_BASE(64) threads
211 * to keep service healthy, so total number of threads will just be 2048.
212 *
213 * NB: we don't suggest to choose server with that many cores because backend
214 * filesystem itself, buffer cache, or underlying network stack might
215 * have some SMP scalability issues at that large scale.
216 *
217 * If user already has a fat machine with hundreds or thousands of cores,
218 * there are two choices for configuration:
219 * a) create CPU table from subset of all CPUs and run Lustre on
220 * top of this subset
221 * b) bind service threads on a few partitions, see modparameters of
222 * MDS and OSS for details
223*
224 * NB: these calculations (and examples below) are simplified to help
225 * understanding, the real implementation is a little more complex,
226 * please see ptlrpc_server_nthreads_check() for details.
227 *
228 */
229
230 /*
231 * LDLM threads constants:
232 *
233 * Given 8 as factor and 24 as base threads number
234 *
235 * example 1)
236 * On 4-core machine we will have 24 + 8 * 4 = 56 threads.
237 *
238 * example 2)
239 * On 8-core machine with 2 partitions we will have 24 + 4 * 8 = 56
240 * threads for each partition and total threads number will be 112.
241 *
242 * example 3)
243 * On 64-core machine with 8 partitions we will need LDLM_NTHRS_BASE(24)
244 * threads for each partition to keep service healthy, so total threads
245 * number should be 24 * 8 = 192.
246 *
247 * So with these constants, threads number will be at the similar level
248 * of old versions, unless target machine has over a hundred cores
249 */
250#define LDLM_THR_FACTOR 8
251#define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT
252#define LDLM_NTHRS_BASE 24
253#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128)
254
255#define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT
256#define LDLM_CLIENT_NBUFS 1
257#define LDLM_SERVER_NBUFS 64
258#define LDLM_BUFSIZE (8 * 1024)
259#define LDLM_MAXREQSIZE (5 * 1024)
260#define LDLM_MAXREPSIZE (1024)
261
James Nuneze55c4472014-04-27 13:06:42 -0400262#define MDS_MAXREQSIZE (5 * 1024) /* >= 4736 */
263
Peng Taod7e09d02013-05-02 16:46:55 +0800264#define OST_MAXREQSIZE (5 * 1024)
Peng Taod7e09d02013-05-02 16:46:55 +0800265
266/* Macro to hide a typecast. */
267#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args)
268
Liang Zhen32c87282016-06-20 16:55:30 -0400269struct ptlrpc_replay_async_args {
270 int praa_old_state;
271 int praa_old_status;
272};
273
Peng Taod7e09d02013-05-02 16:46:55 +0800274/**
275 * Structure to single define portal connection.
276 */
277struct ptlrpc_connection {
278 /** linkage for connections hash table */
279 struct hlist_node c_hash;
280 /** Our own lnet nid for this connection */
281 lnet_nid_t c_self;
282 /** Remote side nid for this connection */
283 lnet_process_id_t c_peer;
284 /** UUID of the other side */
285 struct obd_uuid c_remote_uuid;
286 /** reference counter for this connection */
287 atomic_t c_refcount;
288};
289
290/** Client definition for PortalRPC */
291struct ptlrpc_client {
292 /** What lnet portal does this client send messages to by default */
293 __u32 cli_request_portal;
294 /** What portal do we expect replies on */
295 __u32 cli_reply_portal;
296 /** Name of the client */
297 char *cli_name;
298};
299
300/** state flags of requests */
301/* XXX only ones left are those used by the bulk descs as well! */
302#define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
303#define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
304
305#define REQ_MAX_ACK_LOCKS 8
306
307union ptlrpc_async_args {
308 /**
309 * Scratchpad for passing args to completion interpreter. Users
310 * cast to the struct of their choosing, and CLASSERT that this is
Oleg Drokinc0a24722015-09-16 12:26:58 -0400311 * big enough. For _tons_ of context, kmalloc a struct and store
Peng Taod7e09d02013-05-02 16:46:55 +0800312 * a pointer to it here. The pointer_arg ensures this struct is at
313 * least big enough for that.
314 */
315 void *pointer_arg[11];
316 __u64 space[7];
317};
318
319struct ptlrpc_request_set;
320typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
321typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
322
323/**
324 * Definition of request set structure.
325 * Request set is a list of requests (not necessary to the same target) that
326 * once populated with RPCs could be sent in parallel.
327 * There are two kinds of request sets. General purpose and with dedicated
328 * serving thread. Example of the latter is ptlrpcd set.
329 * For general purpose sets once request set started sending it is impossible
330 * to add new requests to such set.
331 * Provides a way to call "completion callbacks" when all requests in the set
332 * returned.
333 */
334struct ptlrpc_request_set {
335 atomic_t set_refcount;
336 /** number of in queue requests */
337 atomic_t set_new_count;
338 /** number of uncompleted requests */
339 atomic_t set_remaining;
340 /** wait queue to wait on for request events */
341 wait_queue_head_t set_waitq;
342 wait_queue_head_t *set_wakeup_ptr;
343 /** List of requests in the set */
344 struct list_head set_requests;
345 /**
346 * List of completion callbacks to be called when the set is completed
347 * This is only used if \a set_interpret is NULL.
348 * Links struct ptlrpc_set_cbdata.
349 */
350 struct list_head set_cblist;
351 /** Completion callback, if only one. */
352 set_interpreter_func set_interpret;
353 /** opaq argument passed to completion \a set_interpret callback. */
354 void *set_arg;
355 /**
356 * Lock for \a set_new_requests manipulations
357 * locked so that any old caller can communicate requests to
358 * the set holder who can then fold them into the lock-free set
359 */
360 spinlock_t set_new_req_lock;
361 /** List of new yet unsent requests. Only used with ptlrpcd now. */
362 struct list_head set_new_requests;
363
364 /** rq_status of requests that have been freed already */
365 int set_rc;
366 /** Additional fields used by the flow control extension */
367 /** Maximum number of RPCs in flight */
368 int set_max_inflight;
369 /** Callback function used to generate RPCs */
370 set_producer_func set_producer;
371 /** opaq argument passed to the producer callback */
372 void *set_producer_arg;
373};
374
375/**
376 * Description of a single ptrlrpc_set callback
377 */
378struct ptlrpc_set_cbdata {
379 /** List linkage item */
380 struct list_head psc_item;
381 /** Pointer to interpreting function */
382 set_interpreter_func psc_interpret;
383 /** Opaq argument to pass to the callback */
384 void *psc_data;
385};
386
387struct ptlrpc_bulk_desc;
388struct ptlrpc_service_part;
389struct ptlrpc_service;
390
391/**
392 * ptlrpc callback & work item stuff
393 */
394struct ptlrpc_cb_id {
395 void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */
396 void *cbid_arg; /* additional arg */
397};
398
399/** Maximum number of locks to fit into reply state */
400#define RS_MAX_LOCKS 8
401#define RS_DEBUG 0
402
403/**
404 * Structure to define reply state on the server
405 * Reply state holds various reply message information. Also for "difficult"
406 * replies (rep-ack case) we store the state after sending reply and wait
407 * for the client to acknowledge the reception. In these cases locks could be
408 * added to the state for replay/failover consistency guarantees.
409 */
410struct ptlrpc_reply_state {
411 /** Callback description */
412 struct ptlrpc_cb_id rs_cb_id;
413 /** Linkage for list of all reply states in a system */
414 struct list_head rs_list;
415 /** Linkage for list of all reply states on same export */
416 struct list_head rs_exp_list;
417 /** Linkage for list of all reply states for same obd */
418 struct list_head rs_obd_list;
419#if RS_DEBUG
420 struct list_head rs_debug_list;
421#endif
422 /** A spinlock to protect the reply state flags */
423 spinlock_t rs_lock;
424 /** Reply state flags */
Oleg Drokinc56e2562016-02-24 22:00:25 -0500425 unsigned long rs_difficult:1; /* ACK/commit stuff */
Peng Taod7e09d02013-05-02 16:46:55 +0800426 unsigned long rs_no_ack:1; /* no ACK, even for
Oleg Drokinc56e2562016-02-24 22:00:25 -0500427 * difficult requests
428 */
Peng Taod7e09d02013-05-02 16:46:55 +0800429 unsigned long rs_scheduled:1; /* being handled? */
430 unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
431 unsigned long rs_handled:1; /* been handled yet? */
432 unsigned long rs_on_net:1; /* reply_out_callback pending? */
433 unsigned long rs_prealloc:1; /* rs from prealloc list */
434 unsigned long rs_committed:1;/* the transaction was committed
Oleg Drokinc56e2562016-02-24 22:00:25 -0500435 * and the rs was dispatched
436 */
Peng Taod7e09d02013-05-02 16:46:55 +0800437 /** Size of the state */
438 int rs_size;
439 /** opcode */
440 __u32 rs_opc;
441 /** Transaction number */
442 __u64 rs_transno;
443 /** xid */
444 __u64 rs_xid;
445 struct obd_export *rs_export;
446 struct ptlrpc_service_part *rs_svcpt;
447 /** Lnet metadata handle for the reply */
448 lnet_handle_md_t rs_md_h;
449 atomic_t rs_refcount;
450
Masanari Iida17891182014-03-08 22:58:33 +0900451 /** Context for the service thread */
Peng Taod7e09d02013-05-02 16:46:55 +0800452 struct ptlrpc_svc_ctx *rs_svc_ctx;
453 /** Reply buffer (actually sent to the client), encoded if needed */
454 struct lustre_msg *rs_repbuf; /* wrapper */
455 /** Size of the reply buffer */
456 int rs_repbuf_len; /* wrapper buf length */
457 /** Size of the reply message */
458 int rs_repdata_len; /* wrapper msg length */
459 /**
Masanari Iidabd9070c2014-03-08 22:58:34 +0900460 * Actual reply message. Its content is encrypted (if needed) to
Peng Taod7e09d02013-05-02 16:46:55 +0800461 * produce reply buffer for actual sending. In simple case
Masanari Iidabd9070c2014-03-08 22:58:34 +0900462 * of no network encryption we just set \a rs_repbuf to \a rs_msg
Peng Taod7e09d02013-05-02 16:46:55 +0800463 */
464 struct lustre_msg *rs_msg; /* reply message */
465
466 /** Number of locks awaiting client ACK */
467 int rs_nlocks;
468 /** Handles of locks awaiting client reply ACK */
469 struct lustre_handle rs_locks[RS_MAX_LOCKS];
470 /** Lock modes of locks in \a rs_locks */
Oleg Drokin52ee0d22016-02-24 21:59:54 -0500471 enum ldlm_mode rs_modes[RS_MAX_LOCKS];
Peng Taod7e09d02013-05-02 16:46:55 +0800472};
473
474struct ptlrpc_thread;
475
476/** RPC stages */
477enum rq_phase {
478 RQ_PHASE_NEW = 0xebc0de00,
479 RQ_PHASE_RPC = 0xebc0de01,
480 RQ_PHASE_BULK = 0xebc0de02,
481 RQ_PHASE_INTERPRET = 0xebc0de03,
482 RQ_PHASE_COMPLETE = 0xebc0de04,
Vitaly Fertman81ea39e2016-06-20 16:55:34 -0400483 RQ_PHASE_UNREG_RPC = 0xebc0de05,
484 RQ_PHASE_UNREG_BULK = 0xebc0de06,
485 RQ_PHASE_UNDEFINED = 0xebc0de07
Peng Taod7e09d02013-05-02 16:46:55 +0800486};
487
488/** Type of request interpreter call-back */
489typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
490 struct ptlrpc_request *req,
491 void *arg, int rc);
492
493/**
494 * Definition of request pool structure.
495 * The pool is used to store empty preallocated requests for the case
496 * when we would actually need to send something without performing
497 * any allocations (to avoid e.g. OOM).
498 */
499struct ptlrpc_request_pool {
500 /** Locks the list */
501 spinlock_t prp_lock;
502 /** list of ptlrpc_request structs */
503 struct list_head prp_req_list;
Masanari Iida17891182014-03-08 22:58:33 +0900504 /** Maximum message size that would fit into a request from this pool */
Peng Taod7e09d02013-05-02 16:46:55 +0800505 int prp_rq_size;
506 /** Function to allocate more requests for this pool */
Li Xiaefd9d72015-09-14 18:41:32 -0400507 int (*prp_populate)(struct ptlrpc_request_pool *, int);
Peng Taod7e09d02013-05-02 16:46:55 +0800508};
509
510struct lu_context;
511struct lu_env;
512
513struct ldlm_lock;
514
515/**
516 * \defgroup nrs Network Request Scheduler
517 * @{
518 */
519struct ptlrpc_nrs_policy;
520struct ptlrpc_nrs_resource;
521struct ptlrpc_nrs_request;
522
523/**
524 * NRS control operations.
525 *
526 * These are common for all policies.
527 */
528enum ptlrpc_nrs_ctl {
529 /**
530 * Not a valid opcode.
531 */
532 PTLRPC_NRS_CTL_INVALID,
533 /**
534 * Activate the policy.
535 */
536 PTLRPC_NRS_CTL_START,
537 /**
538 * Reserved for multiple primary policies, which may be a possibility
539 * in the future.
540 */
541 PTLRPC_NRS_CTL_STOP,
542 /**
543 * Policies can start using opcodes from this value and onwards for
544 * their own purposes; the assigned value itself is arbitrary.
545 */
546 PTLRPC_NRS_CTL_1ST_POL_SPEC = 0x20,
547};
548
549/**
550 * ORR policy operations
551 */
552enum nrs_ctl_orr {
553 NRS_CTL_ORR_RD_QUANTUM = PTLRPC_NRS_CTL_1ST_POL_SPEC,
554 NRS_CTL_ORR_WR_QUANTUM,
555 NRS_CTL_ORR_RD_OFF_TYPE,
556 NRS_CTL_ORR_WR_OFF_TYPE,
557 NRS_CTL_ORR_RD_SUPP_REQ,
558 NRS_CTL_ORR_WR_SUPP_REQ,
559};
560
561/**
562 * NRS policy operations.
563 *
564 * These determine the behaviour of a policy, and are called in response to
565 * NRS core events.
566 */
567struct ptlrpc_nrs_pol_ops {
568 /**
569 * Called during policy registration; this operation is optional.
570 *
571 * \param[in,out] policy The policy being initialized
572 */
573 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
574 /**
575 * Called during policy unregistration; this operation is optional.
576 *
577 * \param[in,out] policy The policy being unregistered/finalized
578 */
579 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
580 /**
581 * Called when activating a policy via lprocfs; policies allocate and
582 * initialize their resources here; this operation is optional.
583 *
584 * \param[in,out] policy The policy being started
585 *
586 * \see nrs_policy_start_locked()
587 */
588 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
589 /**
590 * Called when deactivating a policy via lprocfs; policies deallocate
591 * their resources here; this operation is optional
592 *
593 * \param[in,out] policy The policy being stopped
594 *
595 * \see nrs_policy_stop0()
596 */
597 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
598 /**
599 * Used for policy-specific operations; i.e. not generic ones like
600 * \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
601 * to an ioctl; this operation is optional.
602 *
603 * \param[in,out] policy The policy carrying out operation \a opc
604 * \param[in] opc The command operation being carried out
605 * \param[in,out] arg An generic buffer for communication between the
606 * user and the control operation
607 *
608 * \retval -ve error
609 * \retval 0 success
610 *
611 * \see ptlrpc_nrs_policy_control()
612 */
613 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
614 enum ptlrpc_nrs_ctl opc, void *arg);
615
616 /**
617 * Called when obtaining references to the resources of the resource
618 * hierarchy for a request that has arrived for handling at the PTLRPC
619 * service. Policies should return -ve for requests they do not wish
620 * to handle. This operation is mandatory.
621 *
622 * \param[in,out] policy The policy we're getting resources for.
623 * \param[in,out] nrq The request we are getting resources for.
624 * \param[in] parent The parent resource of the resource being
625 * requested; set to NULL if none.
626 * \param[out] resp The resource is to be returned here; the
627 * fallback policy in an NRS head should
628 * \e always return a non-NULL pointer value.
629 * \param[in] moving_req When set, signifies that this is an attempt
630 * to obtain resources for a request being moved
631 * to the high-priority NRS head by
632 * ldlm_lock_reorder_req().
633 * This implies two things:
634 * 1. We are under obd_export::exp_rpc_lock and
635 * so should not sleep.
636 * 2. We should not perform non-idempotent or can
637 * skip performing idempotent operations that
638 * were carried out when resources were first
639 * taken for the request when it was initialized
640 * in ptlrpc_nrs_req_initialize().
641 *
642 * \retval 0, +ve The level of the returned resource in the resource
643 * hierarchy; currently only 0 (for a non-leaf resource)
644 * and 1 (for a leaf resource) are supported by the
645 * framework.
646 * \retval -ve error
647 *
648 * \see ptlrpc_nrs_req_initialize()
649 * \see ptlrpc_nrs_hpreq_add_nolock()
Peng Taod7e09d02013-05-02 16:46:55 +0800650 */
651 int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
652 struct ptlrpc_nrs_request *nrq,
653 const struct ptlrpc_nrs_resource *parent,
654 struct ptlrpc_nrs_resource **resp,
655 bool moving_req);
656 /**
657 * Called when releasing references taken for resources in the resource
658 * hierarchy for the request; this operation is optional.
659 *
660 * \param[in,out] policy The policy the resource belongs to
661 * \param[in] res The resource to be freed
662 *
663 * \see ptlrpc_nrs_req_finalize()
664 * \see ptlrpc_nrs_hpreq_add_nolock()
Peng Taod7e09d02013-05-02 16:46:55 +0800665 */
666 void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
667 const struct ptlrpc_nrs_resource *res);
668
669 /**
670 * Obtains a request for handling from the policy, and optionally
671 * removes the request from the policy; this operation is mandatory.
672 *
673 * \param[in,out] policy The policy to poll
674 * \param[in] peek When set, signifies that we just want to
675 * examine the request, and not handle it, so the
676 * request is not removed from the policy.
677 * \param[in] force When set, it will force a policy to return a
678 * request if it has one queued.
679 *
680 * \retval NULL No request available for handling
681 * \retval valid-pointer The request polled for handling
682 *
683 * \see ptlrpc_nrs_req_get_nolock()
684 */
685 struct ptlrpc_nrs_request *
686 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
687 bool force);
688 /**
689 * Called when attempting to add a request to a policy for later
690 * handling; this operation is mandatory.
691 *
692 * \param[in,out] policy The policy on which to enqueue \a nrq
693 * \param[in,out] nrq The request to enqueue
694 *
695 * \retval 0 success
696 * \retval != 0 error
697 *
698 * \see ptlrpc_nrs_req_add_nolock()
699 */
700 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
701 struct ptlrpc_nrs_request *nrq);
702 /**
703 * Removes a request from the policy's set of pending requests. Normally
704 * called after a request has been polled successfully from the policy
705 * for handling; this operation is mandatory.
706 *
707 * \param[in,out] policy The policy the request \a nrq belongs to
708 * \param[in,out] nrq The request to dequeue
Peng Taod7e09d02013-05-02 16:46:55 +0800709 */
710 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
711 struct ptlrpc_nrs_request *nrq);
712 /**
713 * Called after the request being carried out. Could be used for
714 * job/resource control; this operation is optional.
715 *
716 * \param[in,out] policy The policy which is stopping to handle request
717 * \a nrq
718 * \param[in,out] nrq The request
719 *
Li Xi5e42bc92014-04-27 13:07:06 -0400720 * \pre assert_spin_locked(&svcpt->scp_req_lock)
Peng Taod7e09d02013-05-02 16:46:55 +0800721 *
722 * \see ptlrpc_nrs_req_stop_nolock()
723 */
724 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
725 struct ptlrpc_nrs_request *nrq);
726 /**
727 * Registers the policy's lprocfs interface with a PTLRPC service.
728 *
729 * \param[in] svc The service
730 *
731 * \retval 0 success
732 * \retval != 0 error
733 */
734 int (*op_lprocfs_init) (struct ptlrpc_service *svc);
735 /**
736 * Unegisters the policy's lprocfs interface with a PTLRPC service.
737 *
738 * In cases of failed policy registration in
739 * \e ptlrpc_nrs_policy_register(), this function may be called for a
740 * service which has not registered the policy successfully, so
741 * implementations of this method should make sure their operations are
742 * safe in such cases.
743 *
744 * \param[in] svc The service
745 */
746 void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
747};
748
749/**
750 * Policy flags
751 */
752enum nrs_policy_flags {
753 /**
754 * Fallback policy, use this flag only on a single supported policy per
755 * service. The flag cannot be used on policies that use
756 * \e PTLRPC_NRS_FL_REG_EXTERN
757 */
758 PTLRPC_NRS_FL_FALLBACK = (1 << 0),
759 /**
760 * Start policy immediately after registering.
761 */
762 PTLRPC_NRS_FL_REG_START = (1 << 1),
763 /**
764 * This is a policy registering from a module different to the one NRS
765 * core ships in (currently ptlrpc).
766 */
767 PTLRPC_NRS_FL_REG_EXTERN = (1 << 2),
768};
769
770/**
771 * NRS queue type.
772 *
773 * Denotes whether an NRS instance is for handling normal or high-priority
774 * RPCs, or whether an operation pertains to one or both of the NRS instances
775 * in a service.
776 */
777enum ptlrpc_nrs_queue_type {
778 PTLRPC_NRS_QUEUE_REG = (1 << 0),
779 PTLRPC_NRS_QUEUE_HP = (1 << 1),
780 PTLRPC_NRS_QUEUE_BOTH = (PTLRPC_NRS_QUEUE_REG | PTLRPC_NRS_QUEUE_HP)
781};
782
783/**
784 * NRS head
785 *
786 * A PTLRPC service has at least one NRS head instance for handling normal
787 * priority RPCs, and may optionally have a second NRS head instance for
788 * handling high-priority RPCs. Each NRS head maintains a list of available
789 * policies, of which one and only one policy is acting as the fallback policy,
790 * and optionally a different policy may be acting as the primary policy. For
791 * all RPCs handled by this NRS head instance, NRS core will first attempt to
792 * enqueue the RPC using the primary policy (if any). The fallback policy is
793 * used in the following cases:
794 * - when there was no primary policy in the
795 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state at the time the request
796 * was initialized.
797 * - when the primary policy that was at the
798 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
799 * RPC was initialized, denoted it did not wish, or for some other reason was
800 * not able to handle the request, by returning a non-valid NRS resource
801 * reference.
802 * - when the primary policy that was at the
803 * ptlrpc_nrs_pol_state::PTLRPC_NRS_POL_STATE_STARTED state at the time the
804 * RPC was initialized, fails later during the request enqueueing stage.
805 *
806 * \see nrs_resource_get_safe()
807 * \see nrs_request_enqueue()
808 */
809struct ptlrpc_nrs {
810 spinlock_t nrs_lock;
811 /** XXX Possibly replace svcpt->scp_req_lock with another lock here. */
812 /**
813 * List of registered policies
814 */
815 struct list_head nrs_policy_list;
816 /**
817 * List of policies with queued requests. Policies that have any
818 * outstanding requests are queued here, and this list is queried
819 * in a round-robin manner from NRS core when obtaining a request
820 * for handling. This ensures that requests from policies that at some
821 * point transition away from the
822 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
823 */
824 struct list_head nrs_policy_queued;
825 /**
826 * Service partition for this NRS head
827 */
828 struct ptlrpc_service_part *nrs_svcpt;
829 /**
830 * Primary policy, which is the preferred policy for handling RPCs
831 */
832 struct ptlrpc_nrs_policy *nrs_policy_primary;
833 /**
834 * Fallback policy, which is the backup policy for handling RPCs
835 */
836 struct ptlrpc_nrs_policy *nrs_policy_fallback;
837 /**
838 * This NRS head handles either HP or regular requests
839 */
840 enum ptlrpc_nrs_queue_type nrs_queue_type;
841 /**
842 * # queued requests from all policies in this NRS head
843 */
844 unsigned long nrs_req_queued;
845 /**
846 * # scheduled requests from all policies in this NRS head
847 */
848 unsigned long nrs_req_started;
849 /**
850 * # policies on this NRS
851 */
852 unsigned nrs_num_pols;
853 /**
854 * This NRS head is in progress of starting a policy
855 */
856 unsigned nrs_policy_starting:1;
857 /**
858 * In progress of shutting down the whole NRS head; used during
859 * unregistration
860 */
861 unsigned nrs_stopping:1;
862};
863
864#define NRS_POL_NAME_MAX 16
865
866struct ptlrpc_nrs_pol_desc;
867
868/**
869 * Service compatibility predicate; this determines whether a policy is adequate
870 * for handling RPCs of a particular PTLRPC service.
871 *
872 * XXX:This should give the same result during policy registration and
873 * unregistration, and for all partitions of a service; so the result should not
874 * depend on temporal service or other properties, that may influence the
875 * result.
876 */
877typedef bool (*nrs_pol_desc_compat_t) (const struct ptlrpc_service *svc,
878 const struct ptlrpc_nrs_pol_desc *desc);
879
880struct ptlrpc_nrs_pol_conf {
881 /**
882 * Human-readable policy name
883 */
884 char nc_name[NRS_POL_NAME_MAX];
885 /**
886 * NRS operations for this policy
887 */
888 const struct ptlrpc_nrs_pol_ops *nc_ops;
889 /**
890 * Service compatibility predicate
891 */
892 nrs_pol_desc_compat_t nc_compat;
893 /**
894 * Set for policies that support a single ptlrpc service, i.e. ones that
895 * have \a pd_compat set to nrs_policy_compat_one(). The variable value
896 * depicts the name of the single service that such policies are
897 * compatible with.
898 */
899 const char *nc_compat_svc_name;
900 /**
901 * Owner module for this policy descriptor; policies registering from a
902 * different module to the one the NRS framework is held within
903 * (currently ptlrpc), should set this field to THIS_MODULE.
904 */
Greg Kroah-Hartmanc34d9cd2013-08-04 07:48:41 +0800905 struct module *nc_owner;
Peng Taod7e09d02013-05-02 16:46:55 +0800906 /**
Masanari Iidabd9070c2014-03-08 22:58:34 +0900907 * Policy registration flags; a bitmask of \e nrs_policy_flags
Peng Taod7e09d02013-05-02 16:46:55 +0800908 */
909 unsigned nc_flags;
910};
911
912/**
913 * NRS policy registering descriptor
914 *
915 * Is used to hold a description of a policy that can be passed to NRS core in
916 * order to register the policy with NRS heads in different PTLRPC services.
917 */
918struct ptlrpc_nrs_pol_desc {
919 /**
920 * Human-readable policy name
921 */
922 char pd_name[NRS_POL_NAME_MAX];
923 /**
924 * Link into nrs_core::nrs_policies
925 */
926 struct list_head pd_list;
927 /**
928 * NRS operations for this policy
929 */
930 const struct ptlrpc_nrs_pol_ops *pd_ops;
931 /**
932 * Service compatibility predicate
933 */
934 nrs_pol_desc_compat_t pd_compat;
935 /**
936 * Set for policies that are compatible with only one PTLRPC service.
937 *
938 * \see ptlrpc_nrs_pol_conf::nc_compat_svc_name
939 */
940 const char *pd_compat_svc_name;
941 /**
942 * Owner module for this policy descriptor.
943 *
944 * We need to hold a reference to the module whenever we might make use
945 * of any of the module's contents, i.e.
946 * - If one or more instances of the policy are at a state where they
947 * might be handling a request, i.e.
948 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED or
949 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING as we will have to
950 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference
951 * is taken on the module when
952 * \e ptlrpc_nrs_pol_desc::pd_refs becomes 1, and released when it
953 * becomes 0, so that we hold only one reference to the module maximum
954 * at any time.
955 *
956 * We do not need to hold a reference to the module, even though we
957 * might use code and data from the module, in the following cases:
958 * - During external policy registration, because this should happen in
959 * the module's init() function, in which case the module is safe from
960 * removal because a reference is being held on the module by the
961 * kernel, and iirc kmod (and I guess module-init-tools also) will
962 * serialize any racing processes properly anyway.
963 * - During external policy unregistration, because this should happen
964 * in a module's exit() function, and any attempts to start a policy
965 * instance would need to take a reference on the module, and this is
966 * not possible once we have reached the point where the exit()
967 * handler is called.
968 * - During service registration and unregistration, as service setup
969 * and cleanup, and policy registration, unregistration and policy
970 * instance starting, are serialized by \e nrs_core::nrs_mutex, so
971 * as long as users adhere to the convention of registering policies
972 * in init() and unregistering them in module exit() functions, there
973 * should not be a race between these operations.
974 * - During any policy-specific lprocfs operations, because a reference
975 * is held by the kernel on a proc entry that has been entered by a
976 * syscall, so as long as proc entries are removed during unregistration time,
977 * then unregistration and lprocfs operations will be properly
978 * serialized.
979 */
Greg Kroah-Hartmanc34d9cd2013-08-04 07:48:41 +0800980 struct module *pd_owner;
Peng Taod7e09d02013-05-02 16:46:55 +0800981 /**
982 * Bitmask of \e nrs_policy_flags
983 */
984 unsigned pd_flags;
985 /**
986 * # of references on this descriptor
987 */
988 atomic_t pd_refs;
989};
990
991/**
992 * NRS policy state
993 *
994 * Policies transition from one state to the other during their lifetime
995 */
996enum ptlrpc_nrs_pol_state {
997 /**
998 * Not a valid policy state.
999 */
1000 NRS_POL_STATE_INVALID,
1001 /**
1002 * Policies are at this state either at the start of their life, or
1003 * transition here when the user selects a different policy to act
1004 * as the primary one.
1005 */
1006 NRS_POL_STATE_STOPPED,
1007 /**
1008 * Policy is progress of stopping
1009 */
1010 NRS_POL_STATE_STOPPING,
1011 /**
1012 * Policy is in progress of starting
1013 */
1014 NRS_POL_STATE_STARTING,
1015 /**
1016 * A policy is in this state in two cases:
1017 * - it is the fallback policy, which is always in this state.
1018 * - it has been activated by the user; i.e. it is the primary policy,
1019 */
1020 NRS_POL_STATE_STARTED,
1021};
1022
1023/**
1024 * NRS policy information
1025 *
1026 * Used for obtaining information for the status of a policy via lprocfs
1027 */
1028struct ptlrpc_nrs_pol_info {
1029 /**
1030 * Policy name
1031 */
1032 char pi_name[NRS_POL_NAME_MAX];
1033 /**
1034 * Current policy state
1035 */
1036 enum ptlrpc_nrs_pol_state pi_state;
1037 /**
1038 * # RPCs enqueued for later dispatching by the policy
1039 */
1040 long pi_req_queued;
1041 /**
1042 * # RPCs started for dispatch by the policy
1043 */
1044 long pi_req_started;
1045 /**
1046 * Is this a fallback policy?
1047 */
1048 unsigned pi_fallback:1;
1049};
1050
1051/**
1052 * NRS policy
1053 *
1054 * There is one instance of this for each policy in each NRS head of each
1055 * PTLRPC service partition.
1056 */
1057struct ptlrpc_nrs_policy {
1058 /**
1059 * Linkage into the NRS head's list of policies,
1060 * ptlrpc_nrs:nrs_policy_list
1061 */
1062 struct list_head pol_list;
1063 /**
1064 * Linkage into the NRS head's list of policies with enqueued
1065 * requests ptlrpc_nrs:nrs_policy_queued
1066 */
1067 struct list_head pol_list_queued;
1068 /**
1069 * Current state of this policy
1070 */
1071 enum ptlrpc_nrs_pol_state pol_state;
1072 /**
1073 * Bitmask of nrs_policy_flags
1074 */
1075 unsigned pol_flags;
1076 /**
1077 * # RPCs enqueued for later dispatching by the policy
1078 */
1079 long pol_req_queued;
1080 /**
1081 * # RPCs started for dispatch by the policy
1082 */
1083 long pol_req_started;
1084 /**
1085 * Usage Reference count taken on the policy instance
1086 */
1087 long pol_ref;
1088 /**
1089 * The NRS head this policy has been created at
1090 */
1091 struct ptlrpc_nrs *pol_nrs;
1092 /**
1093 * Private policy data; varies by policy type
1094 */
1095 void *pol_private;
1096 /**
1097 * Policy descriptor for this policy instance.
1098 */
1099 struct ptlrpc_nrs_pol_desc *pol_desc;
1100};
1101
1102/**
1103 * NRS resource
1104 *
1105 * Resources are embedded into two types of NRS entities:
1106 * - Inside NRS policies, in the policy's private data in
1107 * ptlrpc_nrs_policy::pol_private
1108 * - In objects that act as prime-level scheduling entities in different NRS
1109 * policies; e.g. on a policy that performs round robin or similar order
1110 * scheduling across client NIDs, there would be one NRS resource per unique
1111 * client NID. On a policy which performs round robin scheduling across
1112 * backend filesystem objects, there would be one resource associated with
1113 * each of the backend filesystem objects partaking in the scheduling
1114 * performed by the policy.
1115 *
1116 * NRS resources share a parent-child relationship, in which resources embedded
1117 * in policy instances are the parent entities, with all scheduling entities
1118 * a policy schedules across being the children, thus forming a simple resource
1119 * hierarchy. This hierarchy may be extended with one or more levels in the
1120 * future if the ability to have more than one primary policy is added.
1121 *
1122 * Upon request initialization, references to the then active NRS policies are
1123 * taken and used to later handle the dispatching of the request with one of
1124 * these policies.
1125 *
1126 * \see nrs_resource_get_safe()
1127 * \see ptlrpc_nrs_req_add()
1128 */
1129struct ptlrpc_nrs_resource {
1130 /**
1131 * This NRS resource's parent; is NULL for resources embedded in NRS
1132 * policy instances; i.e. those are top-level ones.
1133 */
1134 struct ptlrpc_nrs_resource *res_parent;
1135 /**
1136 * The policy associated with this resource.
1137 */
1138 struct ptlrpc_nrs_policy *res_policy;
1139};
1140
1141enum {
1142 NRS_RES_FALLBACK,
1143 NRS_RES_PRIMARY,
1144 NRS_RES_MAX
1145};
1146
1147/* \name fifo
1148 *
1149 * FIFO policy
1150 *
1151 * This policy is a logical wrapper around previous, non-NRS functionality.
1152 * It dispatches RPCs in the same order as they arrive from the network. This
1153 * policy is currently used as the fallback policy, and the only enabled policy
1154 * on all NRS heads of all PTLRPC service partitions.
1155 * @{
1156 */
1157
1158/**
1159 * Private data structure for the FIFO policy
1160 */
1161struct nrs_fifo_head {
1162 /**
1163 * Resource object for policy instance.
1164 */
1165 struct ptlrpc_nrs_resource fh_res;
1166 /**
1167 * List of queued requests.
1168 */
1169 struct list_head fh_list;
1170 /**
1171 * For debugging purposes.
1172 */
1173 __u64 fh_sequence;
1174};
1175
1176struct nrs_fifo_req {
1177 struct list_head fr_list;
1178 __u64 fr_sequence;
1179};
1180
1181/** @} fifo */
1182
1183/**
Peng Taod7e09d02013-05-02 16:46:55 +08001184 * NRS request
1185 *
1186 * Instances of this object exist embedded within ptlrpc_request; the main
1187 * purpose of this object is to hold references to the request's resources
1188 * for the lifetime of the request, and to hold properties that policies use
1189 * use for determining the request's scheduling priority.
Oleg Drokinc56e2562016-02-24 22:00:25 -05001190 */
Peng Taod7e09d02013-05-02 16:46:55 +08001191struct ptlrpc_nrs_request {
1192 /**
1193 * The request's resource hierarchy.
1194 */
1195 struct ptlrpc_nrs_resource *nr_res_ptrs[NRS_RES_MAX];
1196 /**
1197 * Index into ptlrpc_nrs_request::nr_res_ptrs of the resource of the
1198 * policy that was used to enqueue the request.
1199 *
1200 * \see nrs_request_enqueue()
1201 */
1202 unsigned nr_res_idx;
1203 unsigned nr_initialized:1;
1204 unsigned nr_enqueued:1;
1205 unsigned nr_started:1;
1206 unsigned nr_finalized:1;
Peng Taod7e09d02013-05-02 16:46:55 +08001207
1208 /**
1209 * Policy-specific fields, used for determining a request's scheduling
1210 * priority, and other supporting functionality.
1211 */
1212 union {
1213 /**
1214 * Fields for the FIFO policy
1215 */
1216 struct nrs_fifo_req fifo;
Peng Taod7e09d02013-05-02 16:46:55 +08001217 } nr_u;
1218 /**
1219 * Externally-registering policies may want to use this to allocate
1220 * their own request properties.
1221 */
1222 void *ext;
1223};
1224
1225/** @} nrs */
1226
1227/**
1228 * Basic request prioritization operations structure.
1229 * The whole idea is centered around locks and RPCs that might affect locks.
1230 * When a lock is contended we try to give priority to RPCs that might lead
1231 * to fastest release of that lock.
1232 * Currently only implemented for OSTs only in a way that makes all
1233 * IO and truncate RPCs that are coming from a locked region where a lock is
1234 * contended a priority over other requests.
1235 */
1236struct ptlrpc_hpreq_ops {
1237 /**
1238 * Check if the lock handle of the given lock is the same as
1239 * taken from the request.
1240 */
1241 int (*hpreq_lock_match)(struct ptlrpc_request *, struct ldlm_lock *);
1242 /**
1243 * Check if the request is a high priority one.
1244 */
1245 int (*hpreq_check)(struct ptlrpc_request *);
1246 /**
1247 * Called after the request has been handled.
1248 */
1249 void (*hpreq_fini)(struct ptlrpc_request *);
1250};
1251
Liang Zhen32c87282016-06-20 16:55:30 -04001252struct ptlrpc_cli_req {
1253 /** For bulk requests on client only: bulk descriptor */
1254 struct ptlrpc_bulk_desc *cr_bulk;
1255 /** optional time limit for send attempts */
1256 long cr_delay_limit;
1257 /** time request was first queued */
1258 time_t cr_queued_time;
1259 /** request sent timeval */
1260 struct timespec64 cr_sent_tv;
1261 /** time for request really sent out */
1262 time_t cr_sent_out;
1263 /** when req reply unlink must finish. */
1264 time_t cr_reply_deadline;
1265 /** when req bulk unlink must finish. */
1266 time_t cr_bulk_deadline;
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04001267 /** when req unlink must finish. */
1268 time_t cr_req_deadline;
Liang Zhen32c87282016-06-20 16:55:30 -04001269 /** Portal to which this request would be sent */
1270 short cr_req_ptl;
1271 /** Portal where to wait for reply and where reply would be sent */
1272 short cr_rep_ptl;
1273 /** request resending number */
1274 unsigned int cr_resend_nr;
1275 /** What was import generation when this request was sent */
1276 int cr_imp_gen;
1277 enum lustre_imp_state cr_send_state;
1278 /** Per-request waitq introduced by bug 21938 for recovery waiting */
1279 wait_queue_head_t cr_set_waitq;
1280 /** Link item for request set lists */
1281 struct list_head cr_set_chain;
1282 /** link to waited ctx */
1283 struct list_head cr_ctx_chain;
1284
1285 /** client's half ctx */
1286 struct ptlrpc_cli_ctx *cr_cli_ctx;
1287 /** Link back to the request set */
1288 struct ptlrpc_request_set *cr_set;
1289 /** outgoing request MD handle */
1290 lnet_handle_md_t cr_req_md_h;
1291 /** request-out callback parameter */
1292 struct ptlrpc_cb_id cr_req_cbid;
1293 /** incoming reply MD handle */
1294 lnet_handle_md_t cr_reply_md_h;
1295 wait_queue_head_t cr_reply_waitq;
1296 /** reply callback parameter */
1297 struct ptlrpc_cb_id cr_reply_cbid;
1298 /** Async completion handler, called when reply is received */
1299 ptlrpc_interpterer_t cr_reply_interp;
1300 /** Async completion context */
1301 union ptlrpc_async_args cr_async_args;
1302 /** Opaq data for replay and commit callbacks. */
1303 void *cr_cb_data;
1304 /**
1305 * Commit callback, called when request is committed and about to be
1306 * freed.
1307 */
1308 void (*cr_commit_cb)(struct ptlrpc_request *);
1309 /** Replay callback, called after request is replayed at recovery */
1310 void (*cr_replay_cb)(struct ptlrpc_request *);
1311};
1312
1313/** client request member alias */
1314/* NB: these alias should NOT be used by any new code, instead they should
1315 * be removed step by step to avoid potential abuse
1316 */
1317#define rq_bulk rq_cli.cr_bulk
1318#define rq_delay_limit rq_cli.cr_delay_limit
1319#define rq_queued_time rq_cli.cr_queued_time
1320#define rq_sent_tv rq_cli.cr_sent_tv
1321#define rq_real_sent rq_cli.cr_sent_out
1322#define rq_reply_deadline rq_cli.cr_reply_deadline
1323#define rq_bulk_deadline rq_cli.cr_bulk_deadline
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04001324#define rq_req_deadline rq_cli.cr_req_deadline
Liang Zhen32c87282016-06-20 16:55:30 -04001325#define rq_nr_resend rq_cli.cr_resend_nr
1326#define rq_request_portal rq_cli.cr_req_ptl
1327#define rq_reply_portal rq_cli.cr_rep_ptl
1328#define rq_import_generation rq_cli.cr_imp_gen
1329#define rq_send_state rq_cli.cr_send_state
1330#define rq_set_chain rq_cli.cr_set_chain
1331#define rq_ctx_chain rq_cli.cr_ctx_chain
1332#define rq_set rq_cli.cr_set
1333#define rq_set_waitq rq_cli.cr_set_waitq
1334#define rq_cli_ctx rq_cli.cr_cli_ctx
1335#define rq_req_md_h rq_cli.cr_req_md_h
1336#define rq_req_cbid rq_cli.cr_req_cbid
1337#define rq_reply_md_h rq_cli.cr_reply_md_h
1338#define rq_reply_waitq rq_cli.cr_reply_waitq
1339#define rq_reply_cbid rq_cli.cr_reply_cbid
1340#define rq_interpret_reply rq_cli.cr_reply_interp
1341#define rq_async_args rq_cli.cr_async_args
1342#define rq_cb_data rq_cli.cr_cb_data
1343#define rq_commit_cb rq_cli.cr_commit_cb
1344#define rq_replay_cb rq_cli.cr_replay_cb
1345
1346struct ptlrpc_srv_req {
1347 /** initial thread servicing this request */
1348 struct ptlrpc_thread *sr_svc_thread;
1349 /**
1350 * Server side list of incoming unserved requests sorted by arrival
1351 * time. Traversed from time to time to notice about to expire
1352 * requests and sent back "early replies" to clients to let them
1353 * know server is alive and well, just very busy to service their
1354 * requests in time
1355 */
1356 struct list_head sr_timed_list;
1357 /** server-side per-export list */
1358 struct list_head sr_exp_list;
1359 /** server-side history, used for debuging purposes. */
1360 struct list_head sr_hist_list;
1361 /** history sequence # */
1362 __u64 sr_hist_seq;
1363 /** the index of service's srv_at_array into which request is linked */
1364 time_t sr_at_index;
1365 /** authed uid */
1366 uid_t sr_auth_uid;
1367 /** authed uid mapped to */
1368 uid_t sr_auth_mapped_uid;
1369 /** RPC is generated from what part of Lustre */
1370 enum lustre_sec_part sr_sp_from;
1371 /** request session context */
1372 struct lu_context sr_ses;
1373 /** \addtogroup nrs
1374 * @{
1375 */
1376 /** stub for NRS request */
1377 struct ptlrpc_nrs_request sr_nrq;
1378 /** @} nrs */
1379 /** request arrival time */
1380 struct timespec64 sr_arrival_time;
1381 /** server's half ctx */
1382 struct ptlrpc_svc_ctx *sr_svc_ctx;
1383 /** (server side), pointed directly into req buffer */
1384 struct ptlrpc_user_desc *sr_user_desc;
1385 /** separated reply state */
1386 struct ptlrpc_reply_state *sr_reply_state;
1387 /** server-side hp handlers */
1388 struct ptlrpc_hpreq_ops *sr_ops;
1389 /** incoming request buffer */
1390 struct ptlrpc_request_buffer_desc *sr_rqbd;
1391};
1392
1393/** server request member alias */
1394/* NB: these alias should NOT be used by any new code, instead they should
1395 * be removed step by step to avoid potential abuse
1396 */
1397#define rq_svc_thread rq_srv.sr_svc_thread
1398#define rq_timed_list rq_srv.sr_timed_list
1399#define rq_exp_list rq_srv.sr_exp_list
1400#define rq_history_list rq_srv.sr_hist_list
1401#define rq_history_seq rq_srv.sr_hist_seq
1402#define rq_at_index rq_srv.sr_at_index
1403#define rq_auth_uid rq_srv.sr_auth_uid
1404#define rq_auth_mapped_uid rq_srv.sr_auth_mapped_uid
1405#define rq_sp_from rq_srv.sr_sp_from
1406#define rq_session rq_srv.sr_ses
1407#define rq_nrq rq_srv.sr_nrq
1408#define rq_arrival_time rq_srv.sr_arrival_time
1409#define rq_reply_state rq_srv.sr_reply_state
1410#define rq_svc_ctx rq_srv.sr_svc_ctx
1411#define rq_user_desc rq_srv.sr_user_desc
1412#define rq_ops rq_srv.sr_ops
1413#define rq_rqbd rq_srv.sr_rqbd
1414
Peng Taod7e09d02013-05-02 16:46:55 +08001415/**
1416 * Represents remote procedure call.
1417 *
1418 * This is a staple structure used by everybody wanting to send a request
1419 * in Lustre.
1420 */
1421struct ptlrpc_request {
1422 /* Request type: one of PTL_RPC_MSG_* */
Liang Zhen32c87282016-06-20 16:55:30 -04001423 int rq_type;
Peng Taod7e09d02013-05-02 16:46:55 +08001424 /** Result of request processing */
Liang Zhen32c87282016-06-20 16:55:30 -04001425 int rq_status;
Peng Taod7e09d02013-05-02 16:46:55 +08001426 /**
1427 * Linkage item through which this request is included into
1428 * sending/delayed lists on client and into rqbd list on server
1429 */
Liang Zhen32c87282016-06-20 16:55:30 -04001430 struct list_head rq_list;
Peng Taod7e09d02013-05-02 16:46:55 +08001431 /** Lock to protect request flags and some other important bits, like
1432 * rq_list
1433 */
1434 spinlock_t rq_lock;
Liang Zhen9faa2ad2016-06-20 16:55:31 -04001435 /** client-side flags are serialized by rq_lock @{ */
Peng Taod7e09d02013-05-02 16:46:55 +08001436 unsigned int rq_intr:1, rq_replied:1, rq_err:1,
1437 rq_timedout:1, rq_resend:1, rq_restart:1,
1438 /**
1439 * when ->rq_replay is set, request is kept by the client even
1440 * after server commits corresponding transaction. This is
1441 * used for operations that require sequence of multiple
1442 * requests to be replayed. The only example currently is file
1443 * open/close. When last request in such a sequence is
1444 * committed, ->rq_replay is cleared on all requests in the
1445 * sequence.
1446 */
1447 rq_replay:1,
1448 rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
1449 rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
Alexey Lyashkovcf378ff2014-06-22 21:32:18 -04001450 rq_early:1,
Liang Zhen9faa2ad2016-06-20 16:55:31 -04001451 rq_req_unlinked:1, /* unlinked request buffer from lnet */
1452 rq_reply_unlinked:1, /* unlinked reply buffer from lnet */
Peng Taod7e09d02013-05-02 16:46:55 +08001453 rq_memalloc:1, /* req originated from "kswapd" */
Peng Taod7e09d02013-05-02 16:46:55 +08001454 rq_committed:1,
Liang Zhen9faa2ad2016-06-20 16:55:31 -04001455 rq_reply_truncated:1,
1456 /** whether the "rq_set" is a valid one */
Peng Taod7e09d02013-05-02 16:46:55 +08001457 rq_invalid_rqset:1,
1458 rq_generation_set:1,
Liang Zhen9faa2ad2016-06-20 16:55:31 -04001459 /** do not resend request on -EINPROGRESS */
Peng Taod7e09d02013-05-02 16:46:55 +08001460 rq_no_retry_einprogress:1,
1461 /* allow the req to be sent if the import is in recovery
Oleg Drokinc56e2562016-02-24 22:00:25 -05001462 * status
1463 */
Prakash Suryaac5b1482016-04-27 18:21:04 -04001464 rq_allow_replay:1,
1465 /* bulk request, sent to server, but uncommitted */
1466 rq_unstable:1;
Liang Zhen9faa2ad2016-06-20 16:55:31 -04001467 /** @} */
1468
1469 /** server-side flags @{ */
1470 unsigned int
1471 rq_hp:1, /**< high priority RPC */
1472 rq_at_linked:1, /**< link into service's srv_at_array */
1473 rq_packed_final:1; /**< packed final reply */
1474 /** @} */
Peng Taod7e09d02013-05-02 16:46:55 +08001475
Liang Zhen32c87282016-06-20 16:55:30 -04001476 /** one of RQ_PHASE_* */
1477 enum rq_phase rq_phase;
1478 /** one of RQ_PHASE_* to be used next */
1479 enum rq_phase rq_next_phase;
1480 /**
1481 * client-side refcount for SENT race, server-side refcount
1482 * for multiple replies
1483 */
1484 atomic_t rq_refcount;
Peng Taod7e09d02013-05-02 16:46:55 +08001485 /**
1486 * client-side:
1487 * !rq_truncate : # reply bytes actually received,
1488 * rq_truncate : required repbuf_len for resend
1489 */
1490 int rq_nob_received;
1491 /** Request length */
1492 int rq_reqlen;
1493 /** Reply length */
1494 int rq_replen;
Liang Zhen32c87282016-06-20 16:55:30 -04001495 /** Pool if request is from preallocated list */
1496 struct ptlrpc_request_pool *rq_pool;
Peng Taod7e09d02013-05-02 16:46:55 +08001497 /** Request message - what client sent */
1498 struct lustre_msg *rq_reqmsg;
1499 /** Reply message - server response */
1500 struct lustre_msg *rq_repmsg;
1501 /** Transaction number */
1502 __u64 rq_transno;
1503 /** xid */
1504 __u64 rq_xid;
1505 /**
Masanari Iida17891182014-03-08 22:58:33 +09001506 * List item to for replay list. Not yet committed requests get linked
Peng Taod7e09d02013-05-02 16:46:55 +08001507 * there.
1508 * Also see \a rq_replay comment above.
Liang Zhen32c87282016-06-20 16:55:30 -04001509 * It's also link chain on obd_export::exp_req_replay_queue
Peng Taod7e09d02013-05-02 16:46:55 +08001510 */
1511 struct list_head rq_replay_list;
Liang Zhen32c87282016-06-20 16:55:30 -04001512 /** non-shared members for client & server request*/
1513 union {
1514 struct ptlrpc_cli_req rq_cli;
1515 struct ptlrpc_srv_req rq_srv;
1516 };
Peng Taod7e09d02013-05-02 16:46:55 +08001517 /**
1518 * security and encryption data
Oleg Drokinc56e2562016-02-24 22:00:25 -05001519 * @{
1520 */
Liang Zhen32c87282016-06-20 16:55:30 -04001521 /** description of flavors for client & server */
1522 struct sptlrpc_flavor rq_flvr;
Peng Taod7e09d02013-05-02 16:46:55 +08001523
1524 /* client/server security flags */
1525 unsigned int
1526 rq_ctx_init:1, /* context initiation */
1527 rq_ctx_fini:1, /* context destroy */
1528 rq_bulk_read:1, /* request bulk read */
1529 rq_bulk_write:1, /* request bulk write */
1530 /* server authentication flags */
1531 rq_auth_gss:1, /* authenticated by gss */
Peng Taod7e09d02013-05-02 16:46:55 +08001532 rq_auth_usr_root:1, /* authed as root */
1533 rq_auth_usr_mdt:1, /* authed as mdt */
1534 rq_auth_usr_ost:1, /* authed as ost */
1535 /* security tfm flags */
1536 rq_pack_udesc:1,
1537 rq_pack_bulk:1,
1538 /* doesn't expect reply FIXME */
1539 rq_no_reply:1,
Liang Zhen32c87282016-06-20 16:55:30 -04001540 rq_pill_init:1, /* pill initialized */
1541 rq_srv_req:1; /* server request */
Peng Taod7e09d02013-05-02 16:46:55 +08001542
Liang Zhen32c87282016-06-20 16:55:30 -04001543 /** various buffer pointers */
1544 struct lustre_msg *rq_reqbuf; /**< req wrapper */
1545 char *rq_repbuf; /**< rep buffer */
1546 struct lustre_msg *rq_repdata; /**< rep wrapper msg */
1547 /** only in priv mode */
1548 struct lustre_msg *rq_clrbuf;
Peng Taod7e09d02013-05-02 16:46:55 +08001549 int rq_reqbuf_len; /* req wrapper buf len */
1550 int rq_reqdata_len; /* req wrapper msg len */
1551 int rq_repbuf_len; /* rep buffer len */
1552 int rq_repdata_len; /* rep wrapper msg len */
1553 int rq_clrbuf_len; /* only in priv mode */
1554 int rq_clrdata_len; /* only in priv mode */
1555
1556 /** early replies go to offset 0, regular replies go after that */
1557 unsigned int rq_reply_off;
1558
1559 /** @} */
1560
1561 /** Fields that help to see if request and reply were swabbed or not */
1562 __u32 rq_req_swab_mask;
1563 __u32 rq_rep_swab_mask;
1564
Peng Taod7e09d02013-05-02 16:46:55 +08001565 /** how many early replies (for stats) */
1566 int rq_early_count;
1567
Liang Zhen32c87282016-06-20 16:55:30 -04001568 /** Server-side, export on which request was received */
1569 struct obd_export *rq_export;
1570 /** import where request is being sent */
1571 struct obd_import *rq_import;
Peng Taod7e09d02013-05-02 16:46:55 +08001572 /** our LNet NID */
1573 lnet_nid_t rq_self;
1574 /** Peer description (the other side) */
1575 lnet_process_id_t rq_peer;
Peng Taod7e09d02013-05-02 16:46:55 +08001576 /**
Liang Zhen32c87282016-06-20 16:55:30 -04001577 * service time estimate (secs)
1578 * If the request is not served by this time, it is marked as timed out.
Peng Taod7e09d02013-05-02 16:46:55 +08001579 */
Liang Zhen32c87282016-06-20 16:55:30 -04001580 int rq_timeout;
Peng Taod7e09d02013-05-02 16:46:55 +08001581 /**
1582 * when request/reply sent (secs), or time when request should be sent
1583 */
Arnd Bergmann219e6de2015-09-27 16:45:30 -04001584 time64_t rq_sent;
Liang Zhen32c87282016-06-20 16:55:30 -04001585 /** when request must finish. */
1586 time64_t rq_deadline;
Peng Taod7e09d02013-05-02 16:46:55 +08001587 /** request format description */
1588 struct req_capsule rq_pill;
1589};
1590
1591/**
1592 * Call completion handler for rpc if any, return it's status or original
1593 * rc if there was no handler defined for this request.
1594 */
1595static inline int ptlrpc_req_interpret(const struct lu_env *env,
1596 struct ptlrpc_request *req, int rc)
1597{
Oleg Drokind2a13982016-02-16 00:46:52 -05001598 if (req->rq_interpret_reply) {
Peng Taod7e09d02013-05-02 16:46:55 +08001599 req->rq_status = req->rq_interpret_reply(env, req,
1600 &req->rq_async_args,
1601 rc);
1602 return req->rq_status;
1603 }
1604 return rc;
1605}
1606
Peng Taod7e09d02013-05-02 16:46:55 +08001607/*
1608 * Can the request be moved from the regular NRS head to the high-priority NRS
1609 * head (of the same PTLRPC service partition), if any?
1610 *
1611 * For a reliable result, this should be checked under svcpt->scp_req lock.
1612 */
1613static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req)
1614{
1615 struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
1616
1617 /**
1618 * LU-898: Check ptlrpc_nrs_request::nr_enqueued to make sure the
1619 * request has been enqueued first, and ptlrpc_nrs_request::nr_started
1620 * to make sure it has not been scheduled yet (analogous to previous
1621 * (non-NRS) checking of !list_empty(&ptlrpc_request::rq_list).
1622 */
1623 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp;
1624}
Mike Rapoportc9f6bb92015-10-13 16:03:42 +03001625
Peng Taod7e09d02013-05-02 16:46:55 +08001626/** @} nrs */
1627
1628/**
1629 * Returns 1 if request buffer at offset \a index was already swabbed
1630 */
1631static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
1632{
1633 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1634 return req->rq_req_swab_mask & (1 << index);
1635}
1636
1637/**
1638 * Returns 1 if request reply buffer at offset \a index was already swabbed
1639 */
1640static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
1641{
1642 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1643 return req->rq_rep_swab_mask & (1 << index);
1644}
1645
1646/**
1647 * Returns 1 if request needs to be swabbed into local cpu byteorder
1648 */
1649static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req)
1650{
1651 return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1652}
1653
1654/**
1655 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1656 */
1657static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req)
1658{
1659 return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1660}
1661
1662/**
1663 * Mark request buffer at offset \a index that it was already swabbed
1664 */
1665static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
1666{
1667 LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
1668 LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
1669 req->rq_req_swab_mask |= 1 << index;
1670}
1671
1672/**
1673 * Mark request reply buffer at offset \a index that it was already swabbed
1674 */
1675static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
1676{
1677 LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
1678 LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
1679 req->rq_rep_swab_mask |= 1 << index;
1680}
1681
1682/**
1683 * Convert numerical request phase value \a phase into text string description
1684 */
1685static inline const char *
1686ptlrpc_phase2str(enum rq_phase phase)
1687{
1688 switch (phase) {
1689 case RQ_PHASE_NEW:
1690 return "New";
1691 case RQ_PHASE_RPC:
1692 return "Rpc";
1693 case RQ_PHASE_BULK:
1694 return "Bulk";
1695 case RQ_PHASE_INTERPRET:
1696 return "Interpret";
1697 case RQ_PHASE_COMPLETE:
1698 return "Complete";
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04001699 case RQ_PHASE_UNREG_RPC:
1700 return "UnregRPC";
1701 case RQ_PHASE_UNREG_BULK:
1702 return "UnregBULK";
Peng Taod7e09d02013-05-02 16:46:55 +08001703 default:
1704 return "?Phase?";
1705 }
1706}
1707
1708/**
1709 * Convert numerical request phase of the request \a req into text stringi
1710 * description
1711 */
1712static inline const char *
1713ptlrpc_rqphase2str(struct ptlrpc_request *req)
1714{
1715 return ptlrpc_phase2str(req->rq_phase);
1716}
1717
1718/**
1719 * Debugging functions and helpers to print request structure into debug log
1720 * @{
1721 */
1722/* Spare the preprocessor, spoil the bugs. */
1723#define FLAG(field, str) (field ? str : "")
1724
1725/** Convert bit flags into a string */
1726#define DEBUG_REQ_FLAGS(req) \
1727 ptlrpc_rqphase2str(req), \
1728 FLAG(req->rq_intr, "I"), FLAG(req->rq_replied, "R"), \
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04001729 FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \
Peng Taod7e09d02013-05-02 16:46:55 +08001730 FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \
1731 FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \
1732 FLAG(req->rq_no_resend, "N"), \
1733 FLAG(req->rq_waiting, "W"), \
1734 FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \
1735 FLAG(req->rq_committed, "M")
1736
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04001737#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s"
Peng Taod7e09d02013-05-02 16:46:55 +08001738
1739void _debug_req(struct ptlrpc_request *req,
1740 struct libcfs_debug_msg_data *data, const char *fmt, ...)
Mario J. Rugiero70837c12015-03-10 12:02:03 -03001741 __printf(3, 4);
Peng Taod7e09d02013-05-02 16:46:55 +08001742
1743/**
Masanari Iida17891182014-03-08 22:58:33 +09001744 * Helper that decides if we need to print request according to current debug
Peng Taod7e09d02013-05-02 16:46:55 +08001745 * level settings
1746 */
1747#define debug_req(msgdata, mask, cdls, req, fmt, a...) \
1748do { \
1749 CFS_CHECK_STACK(msgdata, mask, cdls); \
1750 \
1751 if (((mask) & D_CANTMASK) != 0 || \
1752 ((libcfs_debug & (mask)) != 0 && \
1753 (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) \
1754 _debug_req((req), msgdata, fmt, ##a); \
Greg Donalda58a38a2014-08-21 12:40:35 -05001755} while (0)
Peng Taod7e09d02013-05-02 16:46:55 +08001756
1757/**
Masanari Iida17891182014-03-08 22:58:33 +09001758 * This is the debug print function you need to use to print request structure
Peng Taod7e09d02013-05-02 16:46:55 +08001759 * content into lustre debug log.
Oleg Drokinc56e2562016-02-24 22:00:25 -05001760 * for most callers (level is a constant) this is resolved at compile time
1761 */
Peng Taod7e09d02013-05-02 16:46:55 +08001762#define DEBUG_REQ(level, req, fmt, args...) \
1763do { \
1764 if ((level) & (D_ERROR | D_WARNING)) { \
Lisa Nguyena3ea59e2013-10-21 18:15:39 -07001765 static struct cfs_debug_limit_state cdls; \
Peng Taod7e09d02013-05-02 16:46:55 +08001766 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, &cdls); \
1767 debug_req(&msgdata, level, &cdls, req, "@@@ "fmt" ", ## args);\
1768 } else { \
1769 LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, level, NULL); \
1770 debug_req(&msgdata, level, NULL, req, "@@@ "fmt" ", ## args); \
1771 } \
1772} while (0)
1773/** @} */
1774
1775/**
1776 * Structure that defines a single page of a bulk transfer
1777 */
1778struct ptlrpc_bulk_page {
1779 /** Linkage to list of pages in a bulk */
1780 struct list_head bp_link;
1781 /**
1782 * Number of bytes in a page to transfer starting from \a bp_pageoffset
1783 */
1784 int bp_buflen;
1785 /** offset within a page */
1786 int bp_pageoffset;
1787 /** The page itself */
1788 struct page *bp_page;
1789};
1790
1791#define BULK_GET_SOURCE 0
1792#define BULK_PUT_SINK 1
1793#define BULK_GET_SINK 2
1794#define BULK_PUT_SOURCE 3
1795
1796/**
1797 * Definition of bulk descriptor.
1798 * Bulks are special "Two phase" RPCs where initial request message
1799 * is sent first and it is followed bt a transfer (o receiving) of a large
1800 * amount of data to be settled into pages referenced from the bulk descriptors.
1801 * Bulks transfers (the actual data following the small requests) are done
1802 * on separate LNet portals.
1803 * In lustre we use bulk transfers for READ and WRITE transfers from/to OSTs.
1804 * Another user is readpage for MDT.
1805 */
1806struct ptlrpc_bulk_desc {
1807 /** completed with failure */
1808 unsigned long bd_failure:1;
1809 /** {put,get}{source,sink} */
1810 unsigned long bd_type:2;
1811 /** client side */
1812 unsigned long bd_registered:1;
1813 /** For serialization with callback */
1814 spinlock_t bd_lock;
1815 /** Import generation when request for this bulk was sent */
1816 int bd_import_generation;
1817 /** LNet portal for this bulk */
1818 __u32 bd_portal;
1819 /** Server side - export this bulk created for */
1820 struct obd_export *bd_export;
1821 /** Client side - import this bulk was sent on */
1822 struct obd_import *bd_import;
1823 /** Back pointer to the request */
1824 struct ptlrpc_request *bd_req;
1825 wait_queue_head_t bd_waitq; /* server side only WQ */
1826 int bd_iov_count; /* # entries in bd_iov */
1827 int bd_max_iov; /* allocated size of bd_iov */
1828 int bd_nob; /* # bytes covered */
1829 int bd_nob_transferred; /* # bytes GOT/PUT */
1830
1831 __u64 bd_last_xid;
1832
1833 struct ptlrpc_cb_id bd_cbid; /* network callback info */
1834 lnet_nid_t bd_sender; /* stash event::sender */
1835 int bd_md_count; /* # valid entries in bd_mds */
1836 int bd_md_max_brw; /* max entries in bd_mds */
1837 /** array of associated MDs */
1838 lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT];
1839
1840 /*
1841 * encrypt iov, size is either 0 or bd_iov_count.
1842 */
1843 lnet_kiov_t *bd_enc_iov;
1844
1845 lnet_kiov_t bd_iov[0];
1846};
1847
1848enum {
1849 SVC_STOPPED = 1 << 0,
1850 SVC_STOPPING = 1 << 1,
1851 SVC_STARTING = 1 << 2,
1852 SVC_RUNNING = 1 << 3,
1853 SVC_EVENT = 1 << 4,
1854 SVC_SIGNAL = 1 << 5,
1855};
1856
1857#define PTLRPC_THR_NAME_LEN 32
1858/**
1859 * Definition of server service thread structure
1860 */
1861struct ptlrpc_thread {
1862 /**
1863 * List of active threads in svc->srv_threads
1864 */
1865 struct list_head t_link;
1866 /**
1867 * thread-private data (preallocated memory)
1868 */
1869 void *t_data;
1870 __u32 t_flags;
1871 /**
1872 * service thread index, from ptlrpc_start_threads
1873 */
1874 unsigned int t_id;
1875 /**
1876 * service thread pid
1877 */
1878 pid_t t_pid;
1879 /**
1880 * put watchdog in the structure per thread b=14840
Peng Tao5d4450c2013-07-15 22:27:15 +08001881 *
1882 * Lustre watchdog is removed for client in the hope
1883 * of a generic watchdog can be merged in kernel.
1884 * When that happens, we should add below back.
1885 *
1886 * struct lc_watchdog *t_watchdog;
Peng Taod7e09d02013-05-02 16:46:55 +08001887 */
Peng Taod7e09d02013-05-02 16:46:55 +08001888 /**
1889 * the svc this thread belonged to b=18582
1890 */
1891 struct ptlrpc_service_part *t_svcpt;
1892 wait_queue_head_t t_ctl_waitq;
1893 struct lu_env *t_env;
1894 char t_name[PTLRPC_THR_NAME_LEN];
1895};
1896
1897static inline int thread_is_init(struct ptlrpc_thread *thread)
1898{
1899 return thread->t_flags == 0;
1900}
1901
1902static inline int thread_is_stopped(struct ptlrpc_thread *thread)
1903{
1904 return !!(thread->t_flags & SVC_STOPPED);
1905}
1906
1907static inline int thread_is_stopping(struct ptlrpc_thread *thread)
1908{
1909 return !!(thread->t_flags & SVC_STOPPING);
1910}
1911
1912static inline int thread_is_starting(struct ptlrpc_thread *thread)
1913{
1914 return !!(thread->t_flags & SVC_STARTING);
1915}
1916
1917static inline int thread_is_running(struct ptlrpc_thread *thread)
1918{
1919 return !!(thread->t_flags & SVC_RUNNING);
1920}
1921
1922static inline int thread_is_event(struct ptlrpc_thread *thread)
1923{
1924 return !!(thread->t_flags & SVC_EVENT);
1925}
1926
1927static inline int thread_is_signal(struct ptlrpc_thread *thread)
1928{
1929 return !!(thread->t_flags & SVC_SIGNAL);
1930}
1931
1932static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
1933{
1934 thread->t_flags &= ~flags;
1935}
1936
1937static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
1938{
1939 thread->t_flags = flags;
1940}
1941
1942static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
1943{
1944 thread->t_flags |= flags;
1945}
1946
1947static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
1948 __u32 flags)
1949{
1950 if (thread->t_flags & flags) {
1951 thread->t_flags &= ~flags;
1952 return 1;
1953 }
1954 return 0;
1955}
1956
1957/**
1958 * Request buffer descriptor structure.
1959 * This is a structure that contains one posted request buffer for service.
1960 * Once data land into a buffer, event callback creates actual request and
1961 * notifies wakes one of the service threads to process new incoming request.
1962 * More than one request can fit into the buffer.
1963 */
1964struct ptlrpc_request_buffer_desc {
1965 /** Link item for rqbds on a service */
1966 struct list_head rqbd_list;
1967 /** History of requests for this buffer */
1968 struct list_head rqbd_reqs;
1969 /** Back pointer to service for which this buffer is registered */
1970 struct ptlrpc_service_part *rqbd_svcpt;
1971 /** LNet descriptor */
1972 lnet_handle_md_t rqbd_md_h;
1973 int rqbd_refcount;
1974 /** The buffer itself */
1975 char *rqbd_buffer;
1976 struct ptlrpc_cb_id rqbd_cbid;
1977 /**
1978 * This "embedded" request structure is only used for the
1979 * last request to fit into the buffer
1980 */
1981 struct ptlrpc_request rqbd_req;
1982};
1983
1984typedef int (*svc_handler_t)(struct ptlrpc_request *req);
1985
1986struct ptlrpc_service_ops {
1987 /**
1988 * if non-NULL called during thread creation (ptlrpc_start_thread())
1989 * to initialize service specific per-thread state.
1990 */
1991 int (*so_thr_init)(struct ptlrpc_thread *thr);
1992 /**
1993 * if non-NULL called during thread shutdown (ptlrpc_main()) to
1994 * destruct state created by ->srv_init().
1995 */
1996 void (*so_thr_done)(struct ptlrpc_thread *thr);
1997 /**
1998 * Handler function for incoming requests for this service
1999 */
2000 int (*so_req_handler)(struct ptlrpc_request *req);
2001 /**
2002 * function to determine priority of the request, it's called
2003 * on every new request
2004 */
2005 int (*so_hpreq_handler)(struct ptlrpc_request *);
2006 /**
2007 * service-specific print fn
2008 */
2009 void (*so_req_printer)(void *, struct ptlrpc_request *);
2010};
2011
2012#ifndef __cfs_cacheline_aligned
2013/* NB: put it here for reducing patche dependence */
2014# define __cfs_cacheline_aligned
2015#endif
2016
2017/**
2018 * How many high priority requests to serve before serving one normal
2019 * priority request
2020 */
2021#define PTLRPC_SVC_HP_RATIO 10
2022
2023/**
2024 * Definition of PortalRPC service.
2025 * The service is listening on a particular portal (like tcp port)
2026 * and perform actions for a specific server like IO service for OST
2027 * or general metadata service for MDS.
2028 */
2029struct ptlrpc_service {
Oleg Drokinf6e42a42016-02-16 00:46:33 -05002030 /** serialize sysfs operations */
Peng Taod7e09d02013-05-02 16:46:55 +08002031 spinlock_t srv_lock;
2032 /** most often accessed fields */
2033 /** chain thru all services */
2034 struct list_head srv_list;
2035 /** service operations table */
2036 struct ptlrpc_service_ops srv_ops;
2037 /** only statically allocated strings here; we don't clean them */
2038 char *srv_name;
2039 /** only statically allocated strings here; we don't clean them */
2040 char *srv_thread_name;
2041 /** service thread list */
2042 struct list_head srv_threads;
2043 /** threads # should be created for each partition on initializing */
2044 int srv_nthrs_cpt_init;
2045 /** limit of threads number for each partition */
2046 int srv_nthrs_cpt_limit;
Dmitry Eremin700815d2015-05-21 15:32:11 -04002047 /** Root of debugfs dir tree for this service */
2048 struct dentry *srv_debugfs_entry;
Peng Taod7e09d02013-05-02 16:46:55 +08002049 /** Pointer to statistic data for this service */
2050 struct lprocfs_stats *srv_stats;
2051 /** # hp per lp reqs to handle */
2052 int srv_hpreq_ratio;
2053 /** biggest request to receive */
2054 int srv_max_req_size;
2055 /** biggest reply to send */
2056 int srv_max_reply_size;
2057 /** size of individual buffers */
2058 int srv_buf_size;
2059 /** # buffers to allocate in 1 group */
2060 int srv_nbuf_per_group;
2061 /** Local portal on which to receive requests */
2062 __u32 srv_req_portal;
2063 /** Portal on the client to send replies to */
2064 __u32 srv_rep_portal;
2065 /**
2066 * Tags for lu_context associated with this thread, see struct
2067 * lu_context.
2068 */
2069 __u32 srv_ctx_tags;
2070 /** soft watchdog timeout multiplier */
2071 int srv_watchdog_factor;
2072 /** under unregister_service */
2073 unsigned srv_is_stopping:1;
2074
2075 /** max # request buffers in history per partition */
2076 int srv_hist_nrqbds_cpt_max;
2077 /** number of CPTs this service bound on */
2078 int srv_ncpts;
2079 /** CPTs array this service bound on */
2080 __u32 *srv_cpts;
2081 /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */
2082 int srv_cpt_bits;
2083 /** CPT table this service is running over */
2084 struct cfs_cpt_table *srv_cptable;
Oleg Drokin328676f2015-05-21 15:32:08 -04002085
2086 /* sysfs object */
2087 struct kobject srv_kobj;
2088 struct completion srv_kobj_unregister;
Peng Taod7e09d02013-05-02 16:46:55 +08002089 /**
2090 * partition data for ptlrpc service
2091 */
2092 struct ptlrpc_service_part *srv_parts[0];
2093};
2094
2095/**
2096 * Definition of PortalRPC service partition data.
2097 * Although a service only has one instance of it right now, but we
2098 * will have multiple instances very soon (instance per CPT).
2099 *
2100 * it has four locks:
2101 * \a scp_lock
2102 * serialize operations on rqbd and requests waiting for preprocess
2103 * \a scp_req_lock
2104 * serialize operations active requests sent to this portal
2105 * \a scp_at_lock
2106 * serialize adaptive timeout stuff
2107 * \a scp_rep_lock
2108 * serialize operations on RS list (reply states)
2109 *
2110 * We don't have any use-case to take two or more locks at the same time
2111 * for now, so there is no lock order issue.
2112 */
2113struct ptlrpc_service_part {
2114 /** back reference to owner */
2115 struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
2116 /* CPT id, reserved */
2117 int scp_cpt;
2118 /** always increasing number */
2119 int scp_thr_nextid;
2120 /** # of starting threads */
2121 int scp_nthrs_starting;
2122 /** # of stopping threads, reserved for shrinking threads */
2123 int scp_nthrs_stopping;
2124 /** # running threads */
2125 int scp_nthrs_running;
2126 /** service threads list */
2127 struct list_head scp_threads;
2128
2129 /**
2130 * serialize the following fields, used for protecting
2131 * rqbd list and incoming requests waiting for preprocess,
2132 * threads starting & stopping are also protected by this lock.
2133 */
Mike Rapoportb2952d62015-09-03 11:49:13 +03002134 spinlock_t scp_lock __cfs_cacheline_aligned;
Peng Taod7e09d02013-05-02 16:46:55 +08002135 /** total # req buffer descs allocated */
2136 int scp_nrqbds_total;
2137 /** # posted request buffers for receiving */
2138 int scp_nrqbds_posted;
2139 /** in progress of allocating rqbd */
2140 int scp_rqbd_allocating;
2141 /** # incoming reqs */
2142 int scp_nreqs_incoming;
2143 /** request buffers to be reposted */
2144 struct list_head scp_rqbd_idle;
2145 /** req buffers receiving */
2146 struct list_head scp_rqbd_posted;
2147 /** incoming reqs */
2148 struct list_head scp_req_incoming;
2149 /** timeout before re-posting reqs, in tick */
Greg Kroah-Hartmanb2d201b2014-07-12 00:45:51 -07002150 long scp_rqbd_timeout;
Peng Taod7e09d02013-05-02 16:46:55 +08002151 /**
2152 * all threads sleep on this. This wait-queue is signalled when new
2153 * incoming request arrives and when difficult reply has to be handled.
2154 */
2155 wait_queue_head_t scp_waitq;
2156
2157 /** request history */
2158 struct list_head scp_hist_reqs;
2159 /** request buffer history */
2160 struct list_head scp_hist_rqbds;
2161 /** # request buffers in history */
2162 int scp_hist_nrqbds;
2163 /** sequence number for request */
2164 __u64 scp_hist_seq;
2165 /** highest seq culled from history */
2166 __u64 scp_hist_seq_culled;
2167
2168 /**
2169 * serialize the following fields, used for processing requests
2170 * sent to this portal
2171 */
2172 spinlock_t scp_req_lock __cfs_cacheline_aligned;
2173 /** # reqs in either of the NRS heads below */
2174 /** # reqs being served */
2175 int scp_nreqs_active;
2176 /** # HPreqs being served */
2177 int scp_nhreqs_active;
2178 /** # hp requests handled */
2179 int scp_hreq_count;
2180
2181 /** NRS head for regular requests */
2182 struct ptlrpc_nrs scp_nrs_reg;
2183 /** NRS head for HP requests; this is only valid for services that can
Oleg Drokinc56e2562016-02-24 22:00:25 -05002184 * handle HP requests
2185 */
Peng Taod7e09d02013-05-02 16:46:55 +08002186 struct ptlrpc_nrs *scp_nrs_hp;
2187
2188 /** AT stuff */
2189 /** @{ */
2190 /**
2191 * serialize the following fields, used for changes on
2192 * adaptive timeout
2193 */
2194 spinlock_t scp_at_lock __cfs_cacheline_aligned;
2195 /** estimated rpc service time */
2196 struct adaptive_timeout scp_at_estimate;
2197 /** reqs waiting for replies */
2198 struct ptlrpc_at_array scp_at_array;
2199 /** early reply timer */
Greg Kroah-Hartman54319352013-08-04 08:35:40 +08002200 struct timer_list scp_at_timer;
Peng Taod7e09d02013-05-02 16:46:55 +08002201 /** debug */
Greg Kroah-Hartmana649ad12014-07-12 00:27:46 -07002202 unsigned long scp_at_checktime;
Peng Taod7e09d02013-05-02 16:46:55 +08002203 /** check early replies */
2204 unsigned scp_at_check;
2205 /** @} */
2206
2207 /**
2208 * serialize the following fields, used for processing
2209 * replies for this portal
2210 */
2211 spinlock_t scp_rep_lock __cfs_cacheline_aligned;
2212 /** all the active replies */
2213 struct list_head scp_rep_active;
2214 /** List of free reply_states */
2215 struct list_head scp_rep_idle;
2216 /** waitq to run, when adding stuff to srv_free_rs_list */
2217 wait_queue_head_t scp_rep_waitq;
2218 /** # 'difficult' replies */
2219 atomic_t scp_nreps_difficult;
2220};
2221
2222#define ptlrpc_service_for_each_part(part, i, svc) \
2223 for (i = 0; \
2224 i < (svc)->srv_ncpts && \
Oleg Drokind2a13982016-02-16 00:46:52 -05002225 (svc)->srv_parts && \
2226 ((part) = (svc)->srv_parts[i]); i++)
Peng Taod7e09d02013-05-02 16:46:55 +08002227
2228/**
2229 * Declaration of ptlrpcd control structure
2230 */
2231struct ptlrpcd_ctl {
2232 /**
2233 * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_FORCE)
2234 */
2235 unsigned long pc_flags;
2236 /**
2237 * Thread lock protecting structure fields.
2238 */
2239 spinlock_t pc_lock;
2240 /**
2241 * Start completion.
2242 */
2243 struct completion pc_starting;
2244 /**
2245 * Stop completion.
2246 */
2247 struct completion pc_finishing;
2248 /**
2249 * Thread requests set.
2250 */
2251 struct ptlrpc_request_set *pc_set;
2252 /**
Oleg Drokinf72f1302015-08-03 21:57:45 -04002253 * Thread name used in kthread_run()
Peng Taod7e09d02013-05-02 16:46:55 +08002254 */
2255 char pc_name[16];
2256 /**
2257 * Environment for request interpreters to run in.
2258 */
2259 struct lu_env pc_env;
2260 /**
Olaf Weberc5c4c6f2015-09-14 18:41:35 -04002261 * CPT the thread is bound on.
2262 */
2263 int pc_cpt;
2264 /**
Peng Taod7e09d02013-05-02 16:46:55 +08002265 * Index of ptlrpcd thread in the array.
2266 */
Olaf Weberc5c4c6f2015-09-14 18:41:35 -04002267 int pc_index;
Peng Taod7e09d02013-05-02 16:46:55 +08002268 /**
2269 * Pointer to the array of partners' ptlrpcd_ctl structure.
2270 */
2271 struct ptlrpcd_ctl **pc_partners;
2272 /**
Olaf Weberc5c4c6f2015-09-14 18:41:35 -04002273 * Number of the ptlrpcd's partners.
2274 */
2275 int pc_npartners;
2276 /**
Peng Taod7e09d02013-05-02 16:46:55 +08002277 * Record the partner index to be processed next.
2278 */
2279 int pc_cursor;
Olaf Weberc5c4c6f2015-09-14 18:41:35 -04002280 /**
2281 * Error code if the thread failed to fully start.
2282 */
2283 int pc_error;
Peng Taod7e09d02013-05-02 16:46:55 +08002284};
2285
2286/* Bits for pc_flags */
2287enum ptlrpcd_ctl_flags {
2288 /**
2289 * Ptlrpc thread start flag.
2290 */
2291 LIOD_START = 1 << 0,
2292 /**
2293 * Ptlrpc thread stop flag.
2294 */
2295 LIOD_STOP = 1 << 1,
2296 /**
2297 * Ptlrpc thread force flag (only stop force so far).
2298 * This will cause aborting any inflight rpcs handled
2299 * by thread if LIOD_STOP is specified.
2300 */
2301 LIOD_FORCE = 1 << 2,
2302 /**
2303 * This is a recovery ptlrpc thread.
2304 */
2305 LIOD_RECOVERY = 1 << 3,
Peng Taod7e09d02013-05-02 16:46:55 +08002306};
2307
2308/**
2309 * \addtogroup nrs
2310 * @{
2311 *
2312 * Service compatibility function; the policy is compatible with all services.
2313 *
2314 * \param[in] svc The service the policy is attempting to register with.
2315 * \param[in] desc The policy descriptor
2316 *
2317 * \retval true The policy is compatible with the service
2318 *
2319 * \see ptlrpc_nrs_pol_desc::pd_compat()
2320 */
2321static inline bool nrs_policy_compat_all(const struct ptlrpc_service *svc,
2322 const struct ptlrpc_nrs_pol_desc *desc)
2323{
2324 return true;
2325}
2326
2327/**
2328 * Service compatibility function; the policy is compatible with only a specific
2329 * service which is identified by its human-readable name at
2330 * ptlrpc_service::srv_name.
2331 *
2332 * \param[in] svc The service the policy is attempting to register with.
2333 * \param[in] desc The policy descriptor
2334 *
2335 * \retval false The policy is not compatible with the service
2336 * \retval true The policy is compatible with the service
2337 *
2338 * \see ptlrpc_nrs_pol_desc::pd_compat()
2339 */
2340static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc,
2341 const struct ptlrpc_nrs_pol_desc *desc)
2342{
Peng Taod7e09d02013-05-02 16:46:55 +08002343 return strcmp(svc->srv_name, desc->pd_compat_svc_name) == 0;
2344}
2345
2346/** @} nrs */
2347
2348/* ptlrpc/events.c */
2349extern lnet_handle_eq_t ptlrpc_eq_h;
Joe Perches8150a972015-08-10 14:51:21 -07002350int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
2351 lnet_process_id_t *peer, lnet_nid_t *self);
Peng Taod7e09d02013-05-02 16:46:55 +08002352/**
2353 * These callbacks are invoked by LNet when something happened to
2354 * underlying buffer
2355 * @{
2356 */
Joe Perches8150a972015-08-10 14:51:21 -07002357void request_out_callback(lnet_event_t *ev);
2358void reply_in_callback(lnet_event_t *ev);
2359void client_bulk_callback(lnet_event_t *ev);
2360void request_in_callback(lnet_event_t *ev);
2361void reply_out_callback(lnet_event_t *ev);
Peng Taod7e09d02013-05-02 16:46:55 +08002362/** @} */
2363
2364/* ptlrpc/connection.c */
2365struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer,
2366 lnet_nid_t self,
2367 struct obd_uuid *uuid);
2368int ptlrpc_connection_put(struct ptlrpc_connection *c);
2369struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *);
2370int ptlrpc_connection_init(void);
2371void ptlrpc_connection_fini(void);
Peng Taod7e09d02013-05-02 16:46:55 +08002372
2373/* ptlrpc/niobuf.c */
2374/**
2375 * Actual interfacing with LNet to put/get/register/unregister stuff
2376 * @{
2377 */
2378
Peng Taod7e09d02013-05-02 16:46:55 +08002379int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
2380
2381static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
2382{
2383 struct ptlrpc_bulk_desc *desc;
2384 int rc;
2385
Peng Taod7e09d02013-05-02 16:46:55 +08002386 desc = req->rq_bulk;
2387
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002388 if (req->rq_bulk_deadline > ktime_get_real_seconds())
Peng Taod7e09d02013-05-02 16:46:55 +08002389 return 1;
2390
2391 if (!desc)
2392 return 0;
2393
2394 spin_lock(&desc->bd_lock);
2395 rc = desc->bd_md_count;
2396 spin_unlock(&desc->bd_lock);
2397 return rc;
2398}
2399
2400#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01
2401#define PTLRPC_REPLY_EARLY 0x02
2402int ptlrpc_send_reply(struct ptlrpc_request *req, int flags);
2403int ptlrpc_reply(struct ptlrpc_request *req);
2404int ptlrpc_send_error(struct ptlrpc_request *req, int difficult);
2405int ptlrpc_error(struct ptlrpc_request *req);
2406void ptlrpc_resend_req(struct ptlrpc_request *request);
2407int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
2408int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
2409int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
2410/** @} */
2411
2412/* ptlrpc/client.c */
2413/**
2414 * Client-side portals API. Everything to send requests, receive replies,
2415 * request queues, request management, etc.
2416 * @{
2417 */
Hongchao Zhang63d42572014-02-28 21:16:37 -05002418void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
2419
Peng Taod7e09d02013-05-02 16:46:55 +08002420void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
2421 struct ptlrpc_client *);
Peng Taod7e09d02013-05-02 16:46:55 +08002422struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid);
2423
2424int ptlrpc_queue_wait(struct ptlrpc_request *req);
2425int ptlrpc_replay_req(struct ptlrpc_request *req);
2426int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
Peng Taod7e09d02013-05-02 16:46:55 +08002427void ptlrpc_abort_inflight(struct obd_import *imp);
Peng Taod7e09d02013-05-02 16:46:55 +08002428void ptlrpc_abort_set(struct ptlrpc_request_set *set);
2429
2430struct ptlrpc_request_set *ptlrpc_prep_set(void);
2431struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
2432 void *arg);
Peng Taod7e09d02013-05-02 16:46:55 +08002433int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
2434int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
2435int ptlrpc_set_wait(struct ptlrpc_request_set *);
2436int ptlrpc_expired_set(void *data);
2437void ptlrpc_interrupted_set(void *data);
2438void ptlrpc_mark_interrupted(struct ptlrpc_request *req);
2439void ptlrpc_set_destroy(struct ptlrpc_request_set *);
2440void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *);
2441void ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
2442 struct ptlrpc_request *req);
2443
2444void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool);
Li Xiaefd9d72015-09-14 18:41:32 -04002445int ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
Peng Taod7e09d02013-05-02 16:46:55 +08002446
2447struct ptlrpc_request_pool *
2448ptlrpc_init_rq_pool(int, int,
Li Xiaefd9d72015-09-14 18:41:32 -04002449 int (*populate_pool)(struct ptlrpc_request_pool *, int));
Peng Taod7e09d02013-05-02 16:46:55 +08002450
2451void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
2452struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp,
2453 const struct req_format *format);
2454struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp,
Oleg Drokin10457d42016-02-26 01:49:50 -05002455 struct ptlrpc_request_pool *,
2456 const struct req_format *);
Peng Taod7e09d02013-05-02 16:46:55 +08002457void ptlrpc_request_free(struct ptlrpc_request *request);
2458int ptlrpc_request_pack(struct ptlrpc_request *request,
2459 __u32 version, int opcode);
Oleg Drokin10457d42016-02-26 01:49:50 -05002460struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *,
2461 const struct req_format *,
2462 __u32, int);
Peng Taod7e09d02013-05-02 16:46:55 +08002463int ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
2464 __u32 version, int opcode, char **bufs,
2465 struct ptlrpc_cli_ctx *ctx);
Peng Taod7e09d02013-05-02 16:46:55 +08002466void ptlrpc_req_finished(struct ptlrpc_request *request);
Peng Taod7e09d02013-05-02 16:46:55 +08002467struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
2468struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
2469 unsigned npages, unsigned max_brw,
2470 unsigned type, unsigned portal);
2471void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk, int pin);
2472static inline void ptlrpc_free_bulk_pin(struct ptlrpc_bulk_desc *bulk)
2473{
2474 __ptlrpc_free_bulk(bulk, 1);
2475}
Mike Rapoportc9f6bb92015-10-13 16:03:42 +03002476
Peng Taod7e09d02013-05-02 16:46:55 +08002477static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk)
2478{
2479 __ptlrpc_free_bulk(bulk, 0);
2480}
Mike Rapoportc9f6bb92015-10-13 16:03:42 +03002481
Peng Taod7e09d02013-05-02 16:46:55 +08002482void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
2483 struct page *page, int pageoffset, int len, int);
2484static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc,
2485 struct page *page, int pageoffset,
2486 int len)
2487{
2488 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1);
2489}
2490
2491static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc,
2492 struct page *page, int pageoffset,
2493 int len)
2494{
2495 __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0);
2496}
2497
2498void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
2499 struct obd_import *imp);
2500__u64 ptlrpc_next_xid(void);
2501__u64 ptlrpc_sample_next_xid(void);
2502__u64 ptlrpc_req_xid(struct ptlrpc_request *request);
2503
2504/* Set of routines to run a function in ptlrpcd context */
2505void *ptlrpcd_alloc_work(struct obd_import *imp,
2506 int (*cb)(const struct lu_env *, void *), void *data);
2507void ptlrpcd_destroy_work(void *handler);
2508int ptlrpcd_queue_work(void *handler);
2509
2510/** @} */
2511struct ptlrpc_service_buf_conf {
2512 /* nbufs is buffers # to allocate when growing the pool */
2513 unsigned int bc_nbufs;
2514 /* buffer size to post */
2515 unsigned int bc_buf_size;
2516 /* portal to listed for requests on */
2517 unsigned int bc_req_portal;
2518 /* portal of where to send replies to */
2519 unsigned int bc_rep_portal;
2520 /* maximum request size to be accepted for this service */
2521 unsigned int bc_req_max_size;
2522 /* maximum reply size this service can ever send */
2523 unsigned int bc_rep_max_size;
2524};
2525
2526struct ptlrpc_service_thr_conf {
2527 /* threadname should be 8 characters or less - 6 will be added on */
2528 char *tc_thr_name;
2529 /* threads increasing factor for each CPU */
2530 unsigned int tc_thr_factor;
2531 /* service threads # to start on each partition while initializing */
2532 unsigned int tc_nthrs_init;
2533 /*
2534 * low water of threads # upper-limit on each partition while running,
2535 * service availability may be impacted if threads number is lower
2536 * than this value. It can be ZERO if the service doesn't require
2537 * CPU affinity or there is only one partition.
2538 */
2539 unsigned int tc_nthrs_base;
2540 /* "soft" limit for total threads number */
2541 unsigned int tc_nthrs_max;
2542 /* user specified threads number, it will be validated due to
Oleg Drokinc56e2562016-02-24 22:00:25 -05002543 * other members of this structure.
2544 */
Peng Taod7e09d02013-05-02 16:46:55 +08002545 unsigned int tc_nthrs_user;
2546 /* set NUMA node affinity for service threads */
2547 unsigned int tc_cpu_affinity;
2548 /* Tags for lu_context associated with service thread */
2549 __u32 tc_ctx_tags;
2550};
2551
2552struct ptlrpc_service_cpt_conf {
2553 struct cfs_cpt_table *cc_cptable;
2554 /* string pattern to describe CPTs for a service */
2555 char *cc_pattern;
2556};
2557
2558struct ptlrpc_service_conf {
2559 /* service name */
2560 char *psc_name;
2561 /* soft watchdog timeout multiplifier to print stuck service traces */
2562 unsigned int psc_watchdog_factor;
2563 /* buffer information */
2564 struct ptlrpc_service_buf_conf psc_buf;
2565 /* thread information */
2566 struct ptlrpc_service_thr_conf psc_thr;
2567 /* CPU partition information */
2568 struct ptlrpc_service_cpt_conf psc_cpt;
2569 /* function table */
2570 struct ptlrpc_service_ops psc_ops;
2571};
2572
2573/* ptlrpc/service.c */
2574/**
2575 * Server-side services API. Register/unregister service, request state
2576 * management, service thread management
2577 *
2578 * @{
2579 */
Peng Taod7e09d02013-05-02 16:46:55 +08002580void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
2581void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
Oleg Drokin10457d42016-02-26 01:49:50 -05002582struct ptlrpc_service *ptlrpc_register_service(struct ptlrpc_service_conf *conf,
2583 struct kset *parent,
2584 struct dentry *debugfs_entry);
Peng Taod7e09d02013-05-02 16:46:55 +08002585
2586int ptlrpc_start_threads(struct ptlrpc_service *svc);
2587int ptlrpc_unregister_service(struct ptlrpc_service *service);
Peng Taod7e09d02013-05-02 16:46:55 +08002588
2589int ptlrpc_hr_init(void);
2590void ptlrpc_hr_fini(void);
2591
2592/** @} */
2593
2594/* ptlrpc/import.c */
2595/**
2596 * Import API
2597 * @{
2598 */
2599int ptlrpc_connect_import(struct obd_import *imp);
2600int ptlrpc_init_import(struct obd_import *imp);
2601int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
2602int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
Peng Taod7e09d02013-05-02 16:46:55 +08002603
2604/* ptlrpc/pack_generic.c */
2605int ptlrpc_reconnect_import(struct obd_import *imp);
2606/** @} */
2607
2608/**
2609 * ptlrpc msg buffer and swab interface
2610 *
2611 * @{
2612 */
2613int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout,
2614 int index);
2615void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout,
Oleg Drokin10457d42016-02-26 01:49:50 -05002616 int index);
Peng Taod7e09d02013-05-02 16:46:55 +08002617int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len);
2618int ptlrpc_unpack_req_msg(struct ptlrpc_request *req, int len);
2619
Peng Taod7e09d02013-05-02 16:46:55 +08002620void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens,
2621 char **bufs);
2622int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count,
2623 __u32 *lens, char **bufs);
2624int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens,
2625 char **bufs);
2626int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
2627 __u32 *lens, char **bufs, int flags);
2628#define LPRFL_EARLY_REPLY 1
2629int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens,
2630 char **bufs, int flags);
2631int lustre_shrink_msg(struct lustre_msg *msg, int segment,
2632 unsigned int newlen, int move_data);
2633void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
2634int __lustre_unpack_msg(struct lustre_msg *m, int len);
2635int lustre_msg_hdr_size(__u32 magic, int count);
2636int lustre_msg_size(__u32 magic, int count, __u32 *lengths);
2637int lustre_msg_size_v2(int count, __u32 *lengths);
2638int lustre_packed_msg_size(struct lustre_msg *msg);
2639int lustre_msg_early_size(void);
2640void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size);
2641void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
2642int lustre_msg_buflen(struct lustre_msg *m, int n);
Peng Taod7e09d02013-05-02 16:46:55 +08002643int lustre_msg_bufcount(struct lustre_msg *m);
2644char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
2645__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
2646void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
2647__u32 lustre_msg_get_flags(struct lustre_msg *msg);
2648void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
2649void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
2650void lustre_msg_clear_flags(struct lustre_msg *msg, int flags);
2651__u32 lustre_msg_get_op_flags(struct lustre_msg *msg);
2652void lustre_msg_add_op_flags(struct lustre_msg *msg, int flags);
Peng Taod7e09d02013-05-02 16:46:55 +08002653struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg);
2654__u32 lustre_msg_get_type(struct lustre_msg *msg);
Peng Taod7e09d02013-05-02 16:46:55 +08002655void lustre_msg_add_version(struct lustre_msg *msg, int version);
2656__u32 lustre_msg_get_opc(struct lustre_msg *msg);
Peng Taod7e09d02013-05-02 16:46:55 +08002657__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
2658__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
2659__u64 lustre_msg_get_transno(struct lustre_msg *msg);
2660__u64 lustre_msg_get_slv(struct lustre_msg *msg);
2661__u32 lustre_msg_get_limit(struct lustre_msg *msg);
2662void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv);
2663void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit);
2664int lustre_msg_get_status(struct lustre_msg *msg);
2665__u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg);
Peng Taod7e09d02013-05-02 16:46:55 +08002666__u32 lustre_msg_get_magic(struct lustre_msg *msg);
2667__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
2668__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
Peng Taod7e09d02013-05-02 16:46:55 +08002669__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
Peng Taod7e09d02013-05-02 16:46:55 +08002670__u32 lustre_msg_calc_cksum(struct lustre_msg *msg);
Greg Donald1d8cb702014-08-25 20:07:19 -05002671void lustre_msg_set_handle(struct lustre_msg *msg,
2672 struct lustre_handle *handle);
Peng Taod7e09d02013-05-02 16:46:55 +08002673void lustre_msg_set_type(struct lustre_msg *msg, __u32 type);
2674void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
Peng Taod7e09d02013-05-02 16:46:55 +08002675void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
2676void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
2677void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
2678void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
Peng Taod7e09d02013-05-02 16:46:55 +08002679void ptlrpc_request_set_replen(struct ptlrpc_request *req);
2680void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
2681void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
2682void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
2683void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
2684
2685static inline void
2686lustre_shrink_reply(struct ptlrpc_request *req, int segment,
2687 unsigned int newlen, int move_data)
2688{
2689 LASSERT(req->rq_reply_state);
2690 LASSERT(req->rq_repmsg);
2691 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
2692 newlen, move_data);
2693}
Li Wei2d58de72013-07-23 00:06:32 +08002694
2695#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
2696
2697static inline int ptlrpc_status_hton(int h)
2698{
2699 /*
2700 * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
2701 * ELDLM_LOCK_ABORTED, etc.
2702 */
2703 if (h < 0)
2704 return -lustre_errno_hton(-h);
2705 else
2706 return h;
2707}
2708
2709static inline int ptlrpc_status_ntoh(int n)
2710{
2711 /*
2712 * See the comment in ptlrpc_status_hton().
2713 */
2714 if (n < 0)
2715 return -lustre_errno_ntoh(-n);
2716 else
2717 return n;
2718}
2719
2720#else
2721
2722#define ptlrpc_status_hton(h) (h)
2723#define ptlrpc_status_ntoh(n) (n)
2724
2725#endif
Peng Taod7e09d02013-05-02 16:46:55 +08002726/** @} */
2727
2728/** Change request phase of \a req to \a new_phase */
2729static inline void
2730ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
2731{
2732 if (req->rq_phase == new_phase)
2733 return;
2734
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002735 if (new_phase == RQ_PHASE_UNREG_RPC ||
2736 new_phase == RQ_PHASE_UNREG_BULK) {
2737 /* No embedded unregistering phases */
2738 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
2739 req->rq_phase == RQ_PHASE_UNREG_BULK)
2740 return;
2741
Peng Taod7e09d02013-05-02 16:46:55 +08002742 req->rq_next_phase = req->rq_phase;
2743 if (req->rq_import)
2744 atomic_inc(&req->rq_import->imp_unregistering);
2745 }
2746
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002747 if (req->rq_phase == RQ_PHASE_UNREG_RPC ||
2748 req->rq_phase == RQ_PHASE_UNREG_BULK) {
Peng Taod7e09d02013-05-02 16:46:55 +08002749 if (req->rq_import)
2750 atomic_dec(&req->rq_import->imp_unregistering);
2751 }
2752
2753 DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
2754 ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
2755
2756 req->rq_phase = new_phase;
2757}
2758
2759/**
2760 * Returns true if request \a req got early reply and hard deadline is not met
2761 */
2762static inline int
2763ptlrpc_client_early(struct ptlrpc_request *req)
2764{
Peng Taod7e09d02013-05-02 16:46:55 +08002765 return req->rq_early;
2766}
2767
2768/**
2769 * Returns true if we got real reply from server for this request
2770 */
2771static inline int
2772ptlrpc_client_replied(struct ptlrpc_request *req)
2773{
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002774 if (req->rq_reply_deadline > ktime_get_real_seconds())
Peng Taod7e09d02013-05-02 16:46:55 +08002775 return 0;
2776 return req->rq_replied;
2777}
2778
2779/** Returns true if request \a req is in process of receiving server reply */
2780static inline int
2781ptlrpc_client_recv(struct ptlrpc_request *req)
2782{
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002783 if (req->rq_reply_deadline > ktime_get_real_seconds())
Peng Taod7e09d02013-05-02 16:46:55 +08002784 return 1;
2785 return req->rq_receiving_reply;
2786}
2787
2788static inline int
2789ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req)
2790{
2791 int rc;
2792
2793 spin_lock(&req->rq_lock);
Vitaly Fertman81ea39e2016-06-20 16:55:34 -04002794 if (req->rq_reply_deadline > ktime_get_real_seconds()) {
2795 spin_unlock(&req->rq_lock);
2796 return 1;
2797 }
2798 if (req->rq_req_deadline > ktime_get_real_seconds()) {
Peng Taod7e09d02013-05-02 16:46:55 +08002799 spin_unlock(&req->rq_lock);
2800 return 1;
2801 }
Liang Zhen9faa2ad2016-06-20 16:55:31 -04002802 rc = !req->rq_req_unlinked || !req->rq_reply_unlinked ||
2803 req->rq_receiving_reply;
Peng Taod7e09d02013-05-02 16:46:55 +08002804 spin_unlock(&req->rq_lock);
2805 return rc;
2806}
2807
2808static inline void
2809ptlrpc_client_wake_req(struct ptlrpc_request *req)
2810{
Oleg Drokind2a13982016-02-16 00:46:52 -05002811 if (!req->rq_set)
Peng Taod7e09d02013-05-02 16:46:55 +08002812 wake_up(&req->rq_reply_waitq);
2813 else
2814 wake_up(&req->rq_set->set_waitq);
2815}
2816
2817static inline void
2818ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
2819{
2820 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2821 atomic_inc(&rs->rs_refcount);
2822}
2823
2824static inline void
2825ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
2826{
2827 LASSERT(atomic_read(&rs->rs_refcount) > 0);
2828 if (atomic_dec_and_test(&rs->rs_refcount))
2829 lustre_free_reply_state(rs);
2830}
2831
2832/* Should only be called once per req */
2833static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req)
2834{
Oleg Drokind2a13982016-02-16 00:46:52 -05002835 if (!req->rq_reply_state)
Peng Taod7e09d02013-05-02 16:46:55 +08002836 return; /* shouldn't occur */
2837 ptlrpc_rs_decref(req->rq_reply_state);
2838 req->rq_reply_state = NULL;
2839 req->rq_repmsg = NULL;
2840}
2841
2842static inline __u32 lustre_request_magic(struct ptlrpc_request *req)
2843{
2844 return lustre_msg_get_magic(req->rq_reqmsg);
2845}
2846
2847static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req)
2848{
2849 switch (req->rq_reqmsg->lm_magic) {
2850 case LUSTRE_MSG_MAGIC_V2:
2851 return req->rq_reqmsg->lm_repsize;
2852 default:
2853 LASSERTF(0, "incorrect message magic: %08x\n",
2854 req->rq_reqmsg->lm_magic);
2855 return -EFAULT;
2856 }
2857}
2858
2859static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req)
2860{
2861 if (req->rq_delay_limit != 0 &&
Greg Kroah-Hartman699503b2014-07-12 01:03:41 -07002862 time_before(cfs_time_add(req->rq_queued_time,
2863 cfs_time_seconds(req->rq_delay_limit)),
2864 cfs_time_current())) {
Peng Taod7e09d02013-05-02 16:46:55 +08002865 return 1;
2866 }
2867 return 0;
2868}
2869
2870static inline int ptlrpc_no_resend(struct ptlrpc_request *req)
2871{
2872 if (!req->rq_no_resend && ptlrpc_send_limit_expired(req)) {
2873 spin_lock(&req->rq_lock);
2874 req->rq_no_resend = 1;
2875 spin_unlock(&req->rq_lock);
2876 }
2877 return req->rq_no_resend;
2878}
2879
2880static inline int
2881ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
2882{
2883 int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
2884
2885 return svcpt->scp_service->srv_watchdog_factor *
2886 max_t(int, at, obd_timeout);
2887}
2888
2889static inline struct ptlrpc_service *
2890ptlrpc_req2svc(struct ptlrpc_request *req)
2891{
Peng Taod7e09d02013-05-02 16:46:55 +08002892 return req->rq_rqbd->rqbd_svcpt->scp_service;
2893}
2894
2895/* ldlm/ldlm_lib.c */
2896/**
2897 * Target client logic
2898 * @{
2899 */
2900int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg);
2901int client_obd_cleanup(struct obd_device *obddev);
2902int client_connect_import(const struct lu_env *env,
2903 struct obd_export **exp, struct obd_device *obd,
2904 struct obd_uuid *cluuid, struct obd_connect_data *,
2905 void *localdata);
2906int client_disconnect_export(struct obd_export *exp);
2907int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
2908 int priority);
2909int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
2910int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
2911 struct obd_uuid *uuid);
2912int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
2913void client_destroy_import(struct obd_import *imp);
2914/** @} */
2915
Peng Taod7e09d02013-05-02 16:46:55 +08002916/* ptlrpc/pinger.c */
2917/**
2918 * Pinger API (client side only)
2919 * @{
2920 */
2921enum timeout_event {
2922 TIMEOUT_GRANT = 1
2923};
Mike Rapoportc9f6bb92015-10-13 16:03:42 +03002924
Peng Taod7e09d02013-05-02 16:46:55 +08002925struct timeout_item;
2926typedef int (*timeout_cb_t)(struct timeout_item *, void *);
2927int ptlrpc_pinger_add_import(struct obd_import *imp);
2928int ptlrpc_pinger_del_import(struct obd_import *imp);
2929int ptlrpc_add_timeout_client(int time, enum timeout_event event,
2930 timeout_cb_t cb, void *data,
2931 struct list_head *obd_list);
2932int ptlrpc_del_timeout_client(struct list_head *obd_list,
2933 enum timeout_event event);
Greg Donaldaff9d8e2014-08-21 11:07:42 -05002934struct ptlrpc_request *ptlrpc_prep_ping(struct obd_import *imp);
Peng Taod7e09d02013-05-02 16:46:55 +08002935int ptlrpc_obd_ping(struct obd_device *obd);
Peng Taod7e09d02013-05-02 16:46:55 +08002936void ptlrpc_pinger_ir_up(void);
2937void ptlrpc_pinger_ir_down(void);
2938/** @} */
2939int ptlrpc_pinger_suppress_pings(void);
2940
Peng Taod7e09d02013-05-02 16:46:55 +08002941/* ptlrpc/ptlrpcd.c */
2942void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
2943void ptlrpcd_free(struct ptlrpcd_ctl *pc);
2944void ptlrpcd_wake(struct ptlrpc_request *req);
Olaf Weberc5c4c6f2015-09-14 18:41:35 -04002945void ptlrpcd_add_req(struct ptlrpc_request *req);
Peng Taod7e09d02013-05-02 16:46:55 +08002946int ptlrpcd_addref(void);
2947void ptlrpcd_decref(void);
2948
2949/* ptlrpc/lproc_ptlrpc.c */
2950/**
2951 * procfs output related functions
2952 * @{
2953 */
Greg Donald9c234f62014-10-02 18:10:23 -05002954const char *ll_opcode2str(__u32 opcode);
Peng Taod7e09d02013-05-02 16:46:55 +08002955void ptlrpc_lprocfs_register_obd(struct obd_device *obd);
2956void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd);
2957void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes);
Peng Taod7e09d02013-05-02 16:46:55 +08002958/** @} */
2959
Peng Taod7e09d02013-05-02 16:46:55 +08002960/* ptlrpc/llog_client.c */
2961extern struct llog_operations llog_client_ops;
Peng Taod7e09d02013-05-02 16:46:55 +08002962/** @} net */
2963
2964#endif
2965/** @} PtlRPC */