blob: c6d04a6f99fdaea418fdb8f77b8723ea38cc8052 [file] [log] [blame]
Prakash Surya5b8a39c2016-09-18 16:37:29 -04001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
19 *
20 * GPL HEADER END
21 */
22/*
23 * Range lock is used to allow multiple threads writing a single shared
24 * file given each thread is writing to a non-overlapping portion of the
25 * file.
26 *
27 * Refer to the possible upstream kernel version of range lock by
28 * Jan Kara <jack@suse.cz>: https://lkml.org/lkml/2013/1/31/480
29 *
30 * This file could later replaced by the upstream kernel version.
31 */
32/*
33 * Author: Prakash Surya <surya1@llnl.gov>
34 * Author: Bobi Jam <bobijam.xu@intel.com>
35 */
36#ifndef _RANGE_LOCK_H
37#define _RANGE_LOCK_H
38
39#include "../../include/linux/libcfs/libcfs.h"
40#include "../include/interval_tree.h"
41
42struct range_lock {
43 struct interval_node rl_node;
44 /**
45 * Process to enqueue this lock.
46 */
47 struct task_struct *rl_task;
48 /**
49 * List of locks with the same range.
50 */
51 struct list_head rl_next_lock;
52 /**
53 * Number of locks in the list rl_next_lock
54 */
55 unsigned int rl_lock_count;
56 /**
57 * Number of ranges which are blocking acquisition of the lock
58 */
59 unsigned int rl_blocking_ranges;
60 /**
61 * Sequence number of range lock. This number is used to get to know
62 * the order the locks are queued; this is required for range_cancel().
63 */
64 __u64 rl_sequence;
65};
66
67static inline struct range_lock *node2rangelock(const struct interval_node *n)
68{
69 return container_of(n, struct range_lock, rl_node);
70}
71
72struct range_lock_tree {
73 struct interval_node *rlt_root;
74 spinlock_t rlt_lock; /* protect range lock tree */
75 __u64 rlt_sequence;
76};
77
78void range_lock_tree_init(struct range_lock_tree *tree);
79void range_lock_init(struct range_lock *lock, __u64 start, __u64 end);
80int range_lock(struct range_lock_tree *tree, struct range_lock *lock);
81void range_unlock(struct range_lock_tree *tree, struct range_lock *lock);
82#endif