Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 1 | /* |
| 2 | * GPL HEADER START |
| 3 | * |
| 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 only, |
| 8 | * as published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License version 2 for more details (a copy is included |
| 14 | * in the LICENSE file that accompanied this code). |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * version 2 along with this program; If not, see |
| 18 | * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf |
| 19 | * |
| 20 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
| 21 | * CA 95054 USA or visit www.sun.com if you need additional information or |
| 22 | * have any questions. |
| 23 | * |
| 24 | * GPL HEADER END |
| 25 | */ |
| 26 | /* |
| 27 | * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. |
| 28 | * Use is subject to license terms. |
| 29 | * |
| 30 | * Copyright (c) 2011, 2012, Intel Corporation. |
| 31 | */ |
| 32 | /* |
| 33 | * This file is part of Lustre, http://www.lustre.org/ |
| 34 | * Lustre is a trademark of Sun Microsystems, Inc. |
| 35 | * |
| 36 | * libcfs/include/libcfs/libcfs_private.h |
| 37 | * |
| 38 | * Various defines for libcfs. |
| 39 | * |
| 40 | */ |
| 41 | |
| 42 | #ifndef __LIBCFS_PRIVATE_H__ |
| 43 | #define __LIBCFS_PRIVATE_H__ |
| 44 | |
| 45 | /* XXX this layering violation is for nidstrings */ |
| 46 | #include <linux/lnet/types.h> |
| 47 | |
| 48 | #ifndef DEBUG_SUBSYSTEM |
| 49 | # define DEBUG_SUBSYSTEM S_UNDEFINED |
| 50 | #endif |
| 51 | |
| 52 | |
| 53 | |
| 54 | /* |
| 55 | * When this is on, LASSERT macro includes check for assignment used instead |
| 56 | * of equality check, but doesn't have unlikely(). Turn this on from time to |
| 57 | * time to make test-builds. This shouldn't be on for production release. |
| 58 | */ |
| 59 | #define LASSERT_CHECKED (0) |
| 60 | |
| 61 | |
| 62 | #define LASSERTF(cond, fmt, ...) \ |
| 63 | do { \ |
| 64 | if (unlikely(!(cond))) { \ |
| 65 | LIBCFS_DEBUG_MSG_DATA_DECL(__msg_data, D_EMERG, NULL); \ |
| 66 | libcfs_debug_msg(&__msg_data, \ |
| 67 | "ASSERTION( %s ) failed: " fmt, #cond, \ |
| 68 | ## __VA_ARGS__); \ |
| 69 | lbug_with_loc(&__msg_data); \ |
| 70 | } \ |
| 71 | } while (0) |
| 72 | |
| 73 | #define LASSERT(cond) LASSERTF(cond, "\n") |
| 74 | |
Peng Tao | 4b5b4c7 | 2013-06-06 22:59:11 +0800 | [diff] [blame] | 75 | #ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK |
| 76 | /** |
| 77 | * This is for more expensive checks that one doesn't want to be enabled all |
| 78 | * the time. LINVRNT() has to be explicitly enabled by |
| 79 | * CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK option. |
| 80 | */ |
| 81 | # define LINVRNT(exp) LASSERT(exp) |
| 82 | #else |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 83 | # define LINVRNT(exp) ((void)sizeof!!(exp)) |
Peng Tao | 4b5b4c7 | 2013-06-06 22:59:11 +0800 | [diff] [blame] | 84 | #endif |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 85 | |
| 86 | #define KLASSERT(e) LASSERT(e) |
| 87 | |
| 88 | void lbug_with_loc(struct libcfs_debug_msg_data *) __attribute__((noreturn)); |
| 89 | |
| 90 | #define LBUG() \ |
| 91 | do { \ |
| 92 | LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_EMERG, NULL); \ |
| 93 | lbug_with_loc(&msgdata); \ |
| 94 | } while(0) |
| 95 | |
| 96 | extern atomic_t libcfs_kmemory; |
| 97 | /* |
| 98 | * Memory |
| 99 | */ |
| 100 | |
| 101 | # define libcfs_kmem_inc(ptr, size) \ |
| 102 | do { \ |
| 103 | atomic_add(size, &libcfs_kmemory); \ |
| 104 | } while (0) |
| 105 | |
| 106 | # define libcfs_kmem_dec(ptr, size) \ |
| 107 | do { \ |
| 108 | atomic_sub(size, &libcfs_kmemory); \ |
| 109 | } while (0) |
| 110 | |
| 111 | # define libcfs_kmem_read() \ |
| 112 | atomic_read(&libcfs_kmemory) |
| 113 | |
| 114 | |
| 115 | #ifndef LIBCFS_VMALLOC_SIZE |
| 116 | #define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ |
| 117 | #endif |
| 118 | |
| 119 | #define LIBCFS_ALLOC_PRE(size, mask) \ |
| 120 | do { \ |
| 121 | LASSERT(!in_interrupt() || \ |
| 122 | ((size) <= LIBCFS_VMALLOC_SIZE && \ |
| 123 | ((mask) & GFP_ATOMIC)) != 0); \ |
| 124 | } while (0) |
| 125 | |
| 126 | #define LIBCFS_ALLOC_POST(ptr, size) \ |
| 127 | do { \ |
| 128 | if (unlikely((ptr) == NULL)) { \ |
| 129 | CERROR("LNET: out of memory at %s:%d (tried to alloc '" \ |
| 130 | #ptr "' = %d)\n", __FILE__, __LINE__, (int)(size)); \ |
| 131 | CERROR("LNET: %d total bytes allocated by lnet\n", \ |
| 132 | libcfs_kmem_read()); \ |
| 133 | } else { \ |
| 134 | memset((ptr), 0, (size)); \ |
| 135 | libcfs_kmem_inc((ptr), (size)); \ |
| 136 | CDEBUG(D_MALLOC, "alloc '" #ptr "': %d at %p (tot %d).\n", \ |
| 137 | (int)(size), (ptr), libcfs_kmem_read()); \ |
| 138 | } \ |
| 139 | } while (0) |
| 140 | |
| 141 | /** |
| 142 | * allocate memory with GFP flags @mask |
| 143 | */ |
| 144 | #define LIBCFS_ALLOC_GFP(ptr, size, mask) \ |
| 145 | do { \ |
| 146 | LIBCFS_ALLOC_PRE((size), (mask)); \ |
| 147 | (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ |
| 148 | kmalloc((size), (mask)) : vmalloc(size); \ |
| 149 | LIBCFS_ALLOC_POST((ptr), (size)); \ |
| 150 | } while (0) |
| 151 | |
| 152 | /** |
| 153 | * default allocator |
| 154 | */ |
| 155 | #define LIBCFS_ALLOC(ptr, size) \ |
| 156 | LIBCFS_ALLOC_GFP(ptr, size, __GFP_IO) |
| 157 | |
| 158 | /** |
| 159 | * non-sleeping allocator |
| 160 | */ |
| 161 | #define LIBCFS_ALLOC_ATOMIC(ptr, size) \ |
| 162 | LIBCFS_ALLOC_GFP(ptr, size, GFP_ATOMIC) |
| 163 | |
| 164 | /** |
| 165 | * allocate memory for specified CPU partition |
| 166 | * \a cptab != NULL, \a cpt is CPU partition id of \a cptab |
| 167 | * \a cptab == NULL, \a cpt is HW NUMA node id |
| 168 | */ |
| 169 | #define LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, mask) \ |
| 170 | do { \ |
| 171 | LIBCFS_ALLOC_PRE((size), (mask)); \ |
| 172 | (ptr) = (size) <= LIBCFS_VMALLOC_SIZE ? \ |
Peng Tao | 49c02a7 | 2013-06-03 21:58:22 +0800 | [diff] [blame] | 173 | kmalloc_node((size), (mask), cfs_cpt_spread_node(cptab, cpt)) :\ |
| 174 | vmalloc_node(size, cfs_cpt_spread_node(cptab, cpt)); \ |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 175 | LIBCFS_ALLOC_POST((ptr), (size)); \ |
| 176 | } while (0) |
| 177 | |
| 178 | /** default numa allocator */ |
| 179 | #define LIBCFS_CPT_ALLOC(ptr, cptab, cpt, size) \ |
| 180 | LIBCFS_CPT_ALLOC_GFP(ptr, cptab, cpt, size, __GFP_IO) |
| 181 | |
| 182 | #define LIBCFS_FREE(ptr, size) \ |
| 183 | do { \ |
| 184 | int s = (size); \ |
| 185 | if (unlikely((ptr) == NULL)) { \ |
| 186 | CERROR("LIBCFS: free NULL '" #ptr "' (%d bytes) at " \ |
| 187 | "%s:%d\n", s, __FILE__, __LINE__); \ |
| 188 | break; \ |
| 189 | } \ |
| 190 | libcfs_kmem_dec((ptr), s); \ |
| 191 | CDEBUG(D_MALLOC, "kfreed '" #ptr "': %d at %p (tot %d).\n", \ |
| 192 | s, (ptr), libcfs_kmem_read()); \ |
| 193 | if (unlikely(s > LIBCFS_VMALLOC_SIZE)) \ |
| 194 | vfree(ptr); \ |
| 195 | else \ |
| 196 | kfree(ptr); \ |
| 197 | } while (0) |
| 198 | |
| 199 | /******************************************************************************/ |
| 200 | |
| 201 | /* htonl hack - either this, or compile with -O2. Stupid byteorder/generic.h */ |
| 202 | #if defined(__GNUC__) && (__GNUC__ >= 2) && !defined(__OPTIMIZE__) |
| 203 | #define ___htonl(x) __cpu_to_be32(x) |
| 204 | #define ___htons(x) __cpu_to_be16(x) |
| 205 | #define ___ntohl(x) __be32_to_cpu(x) |
| 206 | #define ___ntohs(x) __be16_to_cpu(x) |
| 207 | #define htonl(x) ___htonl(x) |
| 208 | #define ntohl(x) ___ntohl(x) |
| 209 | #define htons(x) ___htons(x) |
| 210 | #define ntohs(x) ___ntohs(x) |
| 211 | #endif |
| 212 | |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 213 | void libcfs_run_upcall(char **argv); |
| 214 | void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *); |
| 215 | void libcfs_debug_dumplog(void); |
| 216 | int libcfs_debug_init(unsigned long bufsize); |
| 217 | int libcfs_debug_cleanup(void); |
| 218 | int libcfs_debug_clear_buffer(void); |
| 219 | int libcfs_debug_mark_buffer(const char *text); |
| 220 | |
| 221 | void libcfs_debug_set_level(unsigned int debug_level); |
| 222 | |
| 223 | |
| 224 | /* |
| 225 | * allocate per-cpu-partition data, returned value is an array of pointers, |
| 226 | * variable can be indexed by CPU ID. |
| 227 | * cptable != NULL: size of array is number of CPU partitions |
| 228 | * cptable == NULL: size of array is number of HW cores |
| 229 | */ |
| 230 | void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size); |
| 231 | /* |
Masanari Iida | 253d50e | 2013-08-20 21:00:53 +0900 | [diff] [blame] | 232 | * destroy per-cpu-partition variable |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 233 | */ |
| 234 | void cfs_percpt_free(void *vars); |
| 235 | int cfs_percpt_number(void *vars); |
| 236 | void *cfs_percpt_current(void *vars); |
| 237 | void *cfs_percpt_index(void *vars, int idx); |
| 238 | |
| 239 | #define cfs_percpt_for_each(var, i, vars) \ |
| 240 | for (i = 0; i < cfs_percpt_number(vars) && \ |
| 241 | ((var) = (vars)[i]) != NULL; i++) |
| 242 | |
| 243 | /* |
| 244 | * allocate a variable array, returned value is an array of pointers. |
| 245 | * Caller can specify length of array by count. |
| 246 | */ |
| 247 | void *cfs_array_alloc(int count, unsigned int size); |
| 248 | void cfs_array_free(void *vars); |
| 249 | |
| 250 | #define LASSERT_ATOMIC_ENABLED (1) |
| 251 | |
| 252 | #if LASSERT_ATOMIC_ENABLED |
| 253 | |
| 254 | /** assert value of @a is equal to @v */ |
| 255 | #define LASSERT_ATOMIC_EQ(a, v) \ |
| 256 | do { \ |
| 257 | LASSERTF(atomic_read(a) == v, \ |
| 258 | "value: %d\n", atomic_read((a))); \ |
| 259 | } while (0) |
| 260 | |
| 261 | /** assert value of @a is unequal to @v */ |
| 262 | #define LASSERT_ATOMIC_NE(a, v) \ |
| 263 | do { \ |
| 264 | LASSERTF(atomic_read(a) != v, \ |
| 265 | "value: %d\n", atomic_read((a))); \ |
| 266 | } while (0) |
| 267 | |
| 268 | /** assert value of @a is little than @v */ |
| 269 | #define LASSERT_ATOMIC_LT(a, v) \ |
| 270 | do { \ |
| 271 | LASSERTF(atomic_read(a) < v, \ |
| 272 | "value: %d\n", atomic_read((a))); \ |
| 273 | } while (0) |
| 274 | |
| 275 | /** assert value of @a is little/equal to @v */ |
| 276 | #define LASSERT_ATOMIC_LE(a, v) \ |
| 277 | do { \ |
| 278 | LASSERTF(atomic_read(a) <= v, \ |
| 279 | "value: %d\n", atomic_read((a))); \ |
| 280 | } while (0) |
| 281 | |
| 282 | /** assert value of @a is great than @v */ |
| 283 | #define LASSERT_ATOMIC_GT(a, v) \ |
| 284 | do { \ |
| 285 | LASSERTF(atomic_read(a) > v, \ |
| 286 | "value: %d\n", atomic_read((a))); \ |
| 287 | } while (0) |
| 288 | |
| 289 | /** assert value of @a is great/equal to @v */ |
| 290 | #define LASSERT_ATOMIC_GE(a, v) \ |
| 291 | do { \ |
| 292 | LASSERTF(atomic_read(a) >= v, \ |
| 293 | "value: %d\n", atomic_read((a))); \ |
| 294 | } while (0) |
| 295 | |
| 296 | /** assert value of @a is great than @v1 and little than @v2 */ |
| 297 | #define LASSERT_ATOMIC_GT_LT(a, v1, v2) \ |
| 298 | do { \ |
| 299 | int __v = atomic_read(a); \ |
| 300 | LASSERTF(__v > v1 && __v < v2, "value: %d\n", __v); \ |
| 301 | } while (0) |
| 302 | |
| 303 | /** assert value of @a is great than @v1 and little/equal to @v2 */ |
| 304 | #define LASSERT_ATOMIC_GT_LE(a, v1, v2) \ |
| 305 | do { \ |
| 306 | int __v = atomic_read(a); \ |
| 307 | LASSERTF(__v > v1 && __v <= v2, "value: %d\n", __v); \ |
| 308 | } while (0) |
| 309 | |
| 310 | /** assert value of @a is great/equal to @v1 and little than @v2 */ |
| 311 | #define LASSERT_ATOMIC_GE_LT(a, v1, v2) \ |
| 312 | do { \ |
| 313 | int __v = atomic_read(a); \ |
| 314 | LASSERTF(__v >= v1 && __v < v2, "value: %d\n", __v); \ |
| 315 | } while (0) |
| 316 | |
| 317 | /** assert value of @a is great/equal to @v1 and little/equal to @v2 */ |
| 318 | #define LASSERT_ATOMIC_GE_LE(a, v1, v2) \ |
| 319 | do { \ |
| 320 | int __v = atomic_read(a); \ |
| 321 | LASSERTF(__v >= v1 && __v <= v2, "value: %d\n", __v); \ |
| 322 | } while (0) |
| 323 | |
| 324 | #else /* !LASSERT_ATOMIC_ENABLED */ |
| 325 | |
| 326 | #define LASSERT_ATOMIC_EQ(a, v) do {} while (0) |
| 327 | #define LASSERT_ATOMIC_NE(a, v) do {} while (0) |
| 328 | #define LASSERT_ATOMIC_LT(a, v) do {} while (0) |
| 329 | #define LASSERT_ATOMIC_LE(a, v) do {} while (0) |
| 330 | #define LASSERT_ATOMIC_GT(a, v) do {} while (0) |
| 331 | #define LASSERT_ATOMIC_GE(a, v) do {} while (0) |
| 332 | #define LASSERT_ATOMIC_GT_LT(a, v1, v2) do {} while (0) |
| 333 | #define LASSERT_ATOMIC_GT_LE(a, v1, v2) do {} while (0) |
| 334 | #define LASSERT_ATOMIC_GE_LT(a, v1, v2) do {} while (0) |
| 335 | #define LASSERT_ATOMIC_GE_LE(a, v1, v2) do {} while (0) |
| 336 | |
| 337 | #endif /* LASSERT_ATOMIC_ENABLED */ |
| 338 | |
| 339 | #define LASSERT_ATOMIC_ZERO(a) LASSERT_ATOMIC_EQ(a, 0) |
| 340 | #define LASSERT_ATOMIC_POS(a) LASSERT_ATOMIC_GT(a, 0) |
| 341 | |
| 342 | #define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof (*(ptr))); |
| 343 | #define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof (*(ptr))); |
| 344 | |
| 345 | /* |
| 346 | * percpu partition lock |
| 347 | * |
| 348 | * There are some use-cases like this in Lustre: |
| 349 | * . each CPU partition has it's own private data which is frequently changed, |
| 350 | * and mostly by the local CPU partition. |
| 351 | * . all CPU partitions share some global data, these data are rarely changed. |
| 352 | * |
| 353 | * LNet is typical example. |
| 354 | * CPU partition lock is designed for this kind of use-cases: |
| 355 | * . each CPU partition has it's own private lock |
| 356 | * . change on private data just needs to take the private lock |
| 357 | * . read on shared data just needs to take _any_ of private locks |
| 358 | * . change on shared data needs to take _all_ private locks, |
| 359 | * which is slow and should be really rare. |
| 360 | */ |
| 361 | |
| 362 | enum { |
| 363 | CFS_PERCPT_LOCK_EX = -1, /* negative */ |
| 364 | }; |
| 365 | |
| 366 | |
| 367 | struct cfs_percpt_lock { |
| 368 | /* cpu-partition-table for this lock */ |
| 369 | struct cfs_cpt_table *pcl_cptab; |
| 370 | /* exclusively locked */ |
| 371 | unsigned int pcl_locked; |
| 372 | /* private lock table */ |
| 373 | spinlock_t **pcl_locks; |
| 374 | }; |
| 375 | |
| 376 | /* return number of private locks */ |
| 377 | static inline int |
| 378 | cfs_percpt_lock_num(struct cfs_percpt_lock *pcl) |
| 379 | { |
| 380 | return cfs_cpt_number(pcl->pcl_cptab); |
| 381 | } |
| 382 | |
| 383 | |
| 384 | /* |
| 385 | * create a cpu-partition lock based on CPU partition table \a cptab, |
| 386 | * each private lock has extra \a psize bytes padding data |
| 387 | */ |
| 388 | struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab); |
| 389 | /* destroy a cpu-partition lock */ |
| 390 | void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl); |
| 391 | |
| 392 | /* lock private lock \a index of \a pcl */ |
| 393 | void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index); |
| 394 | /* unlock private lock \a index of \a pcl */ |
| 395 | void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index); |
| 396 | /* create percpt (atomic) refcount based on @cptab */ |
| 397 | atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val); |
| 398 | /* destroy percpt refcount */ |
| 399 | void cfs_percpt_atomic_free(atomic_t **refs); |
| 400 | /* return sum of all percpu refs */ |
| 401 | int cfs_percpt_atomic_summary(atomic_t **refs); |
| 402 | |
| 403 | |
| 404 | /** Compile-time assertion. |
| 405 | |
| 406 | * Check an invariant described by a constant expression at compile time by |
| 407 | * forcing a compiler error if it does not hold. \a cond must be a constant |
| 408 | * expression as defined by the ISO C Standard: |
| 409 | * |
| 410 | * 6.8.4.2 The switch statement |
| 411 | * .... |
| 412 | * [#3] The expression of each case label shall be an integer |
| 413 | * constant expression and no two of the case constant |
| 414 | * expressions in the same switch statement shall have the same |
| 415 | * value after conversion... |
| 416 | * |
| 417 | */ |
| 418 | #define CLASSERT(cond) do {switch(42) {case (cond): case 0: break;}} while (0) |
| 419 | |
| 420 | /* support decl needed both by kernel and liblustre */ |
| 421 | int libcfs_isknown_lnd(int type); |
| 422 | char *libcfs_lnd2modname(int type); |
| 423 | char *libcfs_lnd2str(int type); |
| 424 | int libcfs_str2lnd(const char *str); |
| 425 | char *libcfs_net2str(__u32 net); |
| 426 | char *libcfs_nid2str(lnet_nid_t nid); |
| 427 | __u32 libcfs_str2net(const char *str); |
| 428 | lnet_nid_t libcfs_str2nid(const char *str); |
| 429 | int libcfs_str2anynid(lnet_nid_t *nid, const char *str); |
| 430 | char *libcfs_id2str(lnet_process_id_t id); |
| 431 | void cfs_free_nidlist(struct list_head *list); |
| 432 | int cfs_parse_nidlist(char *str, int len, struct list_head *list); |
| 433 | int cfs_match_nid(lnet_nid_t nid, struct list_head *list); |
| 434 | |
| 435 | /** \addtogroup lnet_addr |
| 436 | * @{ */ |
| 437 | /* how an LNET NID encodes net:address */ |
| 438 | /** extract the address part of an lnet_nid_t */ |
| 439 | #define LNET_NIDADDR(nid) ((__u32)((nid) & 0xffffffff)) |
| 440 | /** extract the network part of an lnet_nid_t */ |
| 441 | #define LNET_NIDNET(nid) ((__u32)(((nid) >> 32)) & 0xffffffff) |
| 442 | /** make an lnet_nid_t from a network part and an address part */ |
| 443 | #define LNET_MKNID(net,addr) ((((__u64)(net))<<32)|((__u64)(addr))) |
| 444 | /* how net encodes type:number */ |
| 445 | #define LNET_NETNUM(net) ((net) & 0xffff) |
| 446 | #define LNET_NETTYP(net) (((net) >> 16) & 0xffff) |
| 447 | #define LNET_MKNET(typ,num) ((((__u32)(typ))<<16)|((__u32)(num))) |
| 448 | /** @} lnet_addr */ |
| 449 | |
| 450 | /* max value for numeric network address */ |
| 451 | #define MAX_NUMERIC_VALUE 0xffffffff |
| 452 | |
| 453 | /* implication */ |
| 454 | #define ergo(a, b) (!(a) || (b)) |
| 455 | /* logical equivalence */ |
| 456 | #define equi(a, b) (!!(a) == !!(b)) |
| 457 | |
Peng Tao | d7e09d0 | 2013-05-02 16:46:55 +0800 | [diff] [blame] | 458 | /* -------------------------------------------------------------------- |
| 459 | * Light-weight trace |
| 460 | * Support for temporary event tracing with minimal Heisenberg effect. |
| 461 | * All stuff about lwt are put in arch/kp30.h |
| 462 | * -------------------------------------------------------------------- */ |
| 463 | |
| 464 | struct libcfs_device_userstate |
| 465 | { |
| 466 | int ldu_memhog_pages; |
| 467 | struct page *ldu_memhog_root_page; |
| 468 | }; |
| 469 | |
| 470 | /* what used to be in portals_lib.h */ |
| 471 | #ifndef MIN |
| 472 | # define MIN(a,b) (((a)<(b)) ? (a): (b)) |
| 473 | #endif |
| 474 | #ifndef MAX |
| 475 | # define MAX(a,b) (((a)>(b)) ? (a): (b)) |
| 476 | #endif |
| 477 | |
| 478 | #define MKSTR(ptr) ((ptr))? (ptr) : "" |
| 479 | |
| 480 | static inline int cfs_size_round4 (int val) |
| 481 | { |
| 482 | return (val + 3) & (~0x3); |
| 483 | } |
| 484 | |
| 485 | #ifndef HAVE_CFS_SIZE_ROUND |
| 486 | static inline int cfs_size_round (int val) |
| 487 | { |
| 488 | return (val + 7) & (~0x7); |
| 489 | } |
| 490 | #define HAVE_CFS_SIZE_ROUND |
| 491 | #endif |
| 492 | |
| 493 | static inline int cfs_size_round16(int val) |
| 494 | { |
| 495 | return (val + 0xf) & (~0xf); |
| 496 | } |
| 497 | |
| 498 | static inline int cfs_size_round32(int val) |
| 499 | { |
| 500 | return (val + 0x1f) & (~0x1f); |
| 501 | } |
| 502 | |
| 503 | static inline int cfs_size_round0(int val) |
| 504 | { |
| 505 | if (!val) |
| 506 | return 0; |
| 507 | return (val + 1 + 7) & (~0x7); |
| 508 | } |
| 509 | |
| 510 | static inline size_t cfs_round_strlen(char *fset) |
| 511 | { |
| 512 | return (size_t)cfs_size_round((int)strlen(fset) + 1); |
| 513 | } |
| 514 | |
| 515 | /* roundup \a val to power2 */ |
| 516 | static inline unsigned int cfs_power2_roundup(unsigned int val) |
| 517 | { |
| 518 | if (val != LOWEST_BIT_SET(val)) { /* not a power of 2 already */ |
| 519 | do { |
| 520 | val &= ~LOWEST_BIT_SET(val); |
| 521 | } while (val != LOWEST_BIT_SET(val)); |
| 522 | /* ...and round up */ |
| 523 | val <<= 1; |
| 524 | } |
| 525 | return val; |
| 526 | } |
| 527 | |
| 528 | #define LOGL(var,len,ptr) \ |
| 529 | do { \ |
| 530 | if (var) \ |
| 531 | memcpy((char *)ptr, (const char *)var, len); \ |
| 532 | ptr += cfs_size_round(len); \ |
| 533 | } while (0) |
| 534 | |
| 535 | #define LOGU(var,len,ptr) \ |
| 536 | do { \ |
| 537 | if (var) \ |
| 538 | memcpy((char *)var, (const char *)ptr, len); \ |
| 539 | ptr += cfs_size_round(len); \ |
| 540 | } while (0) |
| 541 | |
| 542 | #define LOGL0(var,len,ptr) \ |
| 543 | do { \ |
| 544 | if (!len) \ |
| 545 | break; \ |
| 546 | memcpy((char *)ptr, (const char *)var, len); \ |
| 547 | *((char *)(ptr) + len) = 0; \ |
| 548 | ptr += cfs_size_round(len + 1); \ |
| 549 | } while (0) |
| 550 | |
| 551 | /** |
| 552 | * Lustre Network Driver types. |
| 553 | */ |
| 554 | enum { |
| 555 | /* Only add to these values (i.e. don't ever change or redefine them): |
| 556 | * network addresses depend on them... */ |
| 557 | QSWLND = 1, |
| 558 | SOCKLND = 2, |
| 559 | GMLND = 3, /* obsolete, keep it so that libcfs_nid2str works */ |
| 560 | PTLLND = 4, |
| 561 | O2IBLND = 5, |
| 562 | CIBLND = 6, |
| 563 | OPENIBLND = 7, |
| 564 | IIBLND = 8, |
| 565 | LOLND = 9, |
| 566 | RALND = 10, |
| 567 | VIBLND = 11, |
| 568 | MXLND = 12, |
| 569 | GNILND = 13, |
| 570 | }; |
| 571 | |
| 572 | #endif |