blob: f5023d9b78f5d833de8602d64b937aa8b8172233 [file] [log] [blame]
Peng Taod7e09d02013-05-02 16:46:55 +08001/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
Oleg Drokin6a5b99a2016-06-14 23:33:40 -040018 * http://www.gnu.org/licenses/gpl-2.0.html
Peng Taod7e09d02013-05-02 16:46:55 +080019 *
Peng Taod7e09d02013-05-02 16:46:55 +080020 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
26 * Copyright (c) 2010, 2012, Intel Corporation.
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
31 *
32 * lustre/ldlm/ldlm_extent.c
33 *
34 * Author: Peter Braam <braam@clusterfs.com>
35 * Author: Phil Schwan <phil@clusterfs.com>
36 */
37
38/**
39 * This file contains implementation of EXTENT lock type
40 *
41 * EXTENT lock type is for locking a contiguous range of values, represented
42 * by 64-bit starting and ending offsets (inclusive). There are several extent
43 * lock modes, some of which may be mutually incompatible. Extent locks are
44 * considered incompatible if their modes are incompatible and their extents
45 * intersect. See the lock mode compatibility matrix in lustre_dlm.h.
46 */
47
48#define DEBUG_SUBSYSTEM S_LDLM
Greg Kroah-Hartman9fdaf8c2014-07-11 20:51:16 -070049#include "../../include/linux/libcfs/libcfs.h"
Greg Kroah-Hartmane27db142014-07-11 22:29:36 -070050#include "../include/lustre_dlm.h"
51#include "../include/obd_support.h"
52#include "../include/obd.h"
53#include "../include/obd_class.h"
54#include "../include/lustre_lib.h"
Peng Taod7e09d02013-05-02 16:46:55 +080055#include "ldlm_internal.h"
56
Peng Taod7e09d02013-05-02 16:46:55 +080057/* When a lock is cancelled by a client, the KMS may undergo change if this
58 * is the "highest lock". This function returns the new KMS value.
59 * Caller must hold lr_lock already.
60 *
Oleg Drokin6f789a62016-02-24 22:00:29 -050061 * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes!
62 */
Peng Taod7e09d02013-05-02 16:46:55 +080063__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
64{
65 struct ldlm_resource *res = lock->l_resource;
66 struct list_head *tmp;
67 struct ldlm_lock *lck;
68 __u64 kms = 0;
Peng Taod7e09d02013-05-02 16:46:55 +080069
70 /* don't let another thread in ldlm_extent_shift_kms race in
71 * just after we finish and take our lock into account in its
Oleg Drokin6f789a62016-02-24 22:00:29 -050072 * calculation of the kms
73 */
Bruce Korb5a9a80b2016-04-27 18:20:57 -040074 ldlm_set_kms_ignore(lock);
Peng Taod7e09d02013-05-02 16:46:55 +080075
76 list_for_each(tmp, &res->lr_granted) {
77 lck = list_entry(tmp, struct ldlm_lock, l_res_link);
78
Bruce Korb5a9a80b2016-04-27 18:20:57 -040079 if (ldlm_is_kms_ignore(lck))
Peng Taod7e09d02013-05-02 16:46:55 +080080 continue;
81
82 if (lck->l_policy_data.l_extent.end >= old_kms)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +080083 return old_kms;
Peng Taod7e09d02013-05-02 16:46:55 +080084
85 /* This extent _has_ to be smaller than old_kms (checked above)
Oleg Drokin6f789a62016-02-24 22:00:29 -050086 * so kms can only ever be smaller or the same as old_kms.
87 */
Peng Taod7e09d02013-05-02 16:46:55 +080088 if (lck->l_policy_data.l_extent.end + 1 > kms)
89 kms = lck->l_policy_data.l_extent.end + 1;
90 }
Greg Kroah-Hartmanb0f5aad2014-07-12 20:06:04 -070091 LASSERTF(kms <= old_kms, "kms %llu old_kms %llu\n", kms, old_kms);
Peng Taod7e09d02013-05-02 16:46:55 +080092
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +080093 return kms;
Peng Taod7e09d02013-05-02 16:46:55 +080094}
95EXPORT_SYMBOL(ldlm_extent_shift_kms);
96
97struct kmem_cache *ldlm_interval_slab;
Oleg Drokin58c6d132015-10-01 00:12:39 -040098
99/* interval tree, for LDLM_EXTENT. */
100static void ldlm_interval_attach(struct ldlm_interval *n, struct ldlm_lock *l)
101{
102 LASSERT(!l->l_tree_node);
103 LASSERT(l->l_resource->lr_type == LDLM_EXTENT);
104
105 list_add_tail(&l->l_sl_policy, &n->li_group);
106 l->l_tree_node = n;
107}
108
Peng Taod7e09d02013-05-02 16:46:55 +0800109struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
110{
111 struct ldlm_interval *node;
Peng Taod7e09d02013-05-02 16:46:55 +0800112
113 LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
Amitoj Kaur Chawla3eed2d02016-02-26 14:25:02 +0530114 node = kmem_cache_zalloc(ldlm_interval_slab, GFP_NOFS);
Oleg Drokin44b53f12016-02-16 00:46:47 -0500115 if (!node)
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800116 return NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800117
118 INIT_LIST_HEAD(&node->li_group);
119 ldlm_interval_attach(node, lock);
Greg Kroah-Hartman0a3bdb02013-08-03 10:35:28 +0800120 return node;
Peng Taod7e09d02013-05-02 16:46:55 +0800121}
122
123void ldlm_interval_free(struct ldlm_interval *node)
124{
125 if (node) {
126 LASSERT(list_empty(&node->li_group));
127 LASSERT(!interval_is_intree(&node->li_node));
Mike Rapoport5c4d8ed2015-10-20 12:39:52 +0300128 kmem_cache_free(ldlm_interval_slab, node);
Peng Taod7e09d02013-05-02 16:46:55 +0800129 }
130}
131
Peng Taod7e09d02013-05-02 16:46:55 +0800132struct ldlm_interval *ldlm_interval_detach(struct ldlm_lock *l)
133{
134 struct ldlm_interval *n = l->l_tree_node;
135
Oleg Drokin44b53f12016-02-16 00:46:47 -0500136 if (!n)
Peng Taod7e09d02013-05-02 16:46:55 +0800137 return NULL;
138
139 LASSERT(!list_empty(&n->li_group));
140 l->l_tree_node = NULL;
141 list_del_init(&l->l_sl_policy);
142
Rashika Kheria730ebc82013-10-26 16:24:02 +0530143 return list_empty(&n->li_group) ? n : NULL;
Peng Taod7e09d02013-05-02 16:46:55 +0800144}
145
Oleg Drokin52ee0d22016-02-24 21:59:54 -0500146static inline int lock_mode_to_index(enum ldlm_mode mode)
Peng Taod7e09d02013-05-02 16:46:55 +0800147{
148 int index;
149
150 LASSERT(mode != 0);
Aya Mahfouz5f4179e2015-10-29 02:54:09 +0200151 LASSERT(is_power_of_2(mode));
Haneen Mohammed40c6dcc2015-02-28 22:20:14 +0300152 for (index = -1; mode; index++)
153 mode >>= 1;
Peng Taod7e09d02013-05-02 16:46:55 +0800154 LASSERT(index < LCK_MODE_NUM);
155 return index;
156}
157
158/** Add newly granted lock into interval tree for the resource. */
159void ldlm_extent_add_lock(struct ldlm_resource *res,
160 struct ldlm_lock *lock)
161{
162 struct interval_node *found, **root;
163 struct ldlm_interval *node;
164 struct ldlm_extent *extent;
165 int idx;
166
167 LASSERT(lock->l_granted_mode == lock->l_req_mode);
168
169 node = lock->l_tree_node;
Oleg Drokin44b53f12016-02-16 00:46:47 -0500170 LASSERT(node);
Peng Taod7e09d02013-05-02 16:46:55 +0800171 LASSERT(!interval_is_intree(&node->li_node));
172
173 idx = lock_mode_to_index(lock->l_granted_mode);
174 LASSERT(lock->l_granted_mode == 1 << idx);
175 LASSERT(lock->l_granted_mode == res->lr_itree[idx].lit_mode);
176
177 /* node extent initialize */
178 extent = &lock->l_policy_data.l_extent;
179 interval_set(&node->li_node, extent->start, extent->end);
180
181 root = &res->lr_itree[idx].lit_root;
182 found = interval_insert(&node->li_node, root);
183 if (found) { /* The policy group found. */
Andreas Ruprecht902f3bb2014-11-23 14:37:48 +0100184 struct ldlm_interval *tmp;
185
186 tmp = ldlm_interval_detach(lock);
Peng Taod7e09d02013-05-02 16:46:55 +0800187 ldlm_interval_free(tmp);
188 ldlm_interval_attach(to_ldlm_interval(found), lock);
189 }
190 res->lr_itree[idx].lit_size++;
191
192 /* even though we use interval tree to manage the extent lock, we also
Oleg Drokin6f789a62016-02-24 22:00:29 -0500193 * add the locks into grant list, for debug purpose, ..
194 */
Peng Taod7e09d02013-05-02 16:46:55 +0800195 ldlm_resource_add_lock(res, &res->lr_granted, lock);
196}
197
198/** Remove cancelled lock from resource interval tree. */
199void ldlm_extent_unlink_lock(struct ldlm_lock *lock)
200{
201 struct ldlm_resource *res = lock->l_resource;
202 struct ldlm_interval *node = lock->l_tree_node;
203 struct ldlm_interval_tree *tree;
204 int idx;
205
206 if (!node || !interval_is_intree(&node->li_node)) /* duplicate unlink */
207 return;
208
209 idx = lock_mode_to_index(lock->l_granted_mode);
210 LASSERT(lock->l_granted_mode == 1 << idx);
211 tree = &res->lr_itree[idx];
212
Oleg Drokin44b53f12016-02-16 00:46:47 -0500213 LASSERT(tree->lit_root); /* assure the tree is not null */
Peng Taod7e09d02013-05-02 16:46:55 +0800214
215 tree->lit_size--;
216 node = ldlm_interval_detach(lock);
217 if (node) {
218 interval_erase(&node->li_node, &tree->lit_root);
219 ldlm_interval_free(node);
220 }
221}
222
223void ldlm_extent_policy_wire_to_local(const ldlm_wire_policy_data_t *wpolicy,
224 ldlm_policy_data_t *lpolicy)
225{
226 memset(lpolicy, 0, sizeof(*lpolicy));
227 lpolicy->l_extent.start = wpolicy->l_extent.start;
228 lpolicy->l_extent.end = wpolicy->l_extent.end;
229 lpolicy->l_extent.gid = wpolicy->l_extent.gid;
230}
231
232void ldlm_extent_policy_local_to_wire(const ldlm_policy_data_t *lpolicy,
233 ldlm_wire_policy_data_t *wpolicy)
234{
235 memset(wpolicy, 0, sizeof(*wpolicy));
236 wpolicy->l_extent.start = lpolicy->l_extent.start;
237 wpolicy->l_extent.end = lpolicy->l_extent.end;
238 wpolicy->l_extent.gid = lpolicy->l_extent.gid;
239}