blob: 38c84c0a35d47f1bf1b02826c284170d2b8195f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Internal header file for device mapper
3 *
4 * Copyright (C) 2001, 2002 Sistina Software
Alasdair G Kergon2b06cff2006-06-26 00:27:32 -07005 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * This file is released under the LGPL.
8 */
9
10#ifndef DM_INTERNAL_H
11#define DM_INTERNAL_H
12
13#include <linux/fs.h>
14#include <linux/device-mapper.h>
15#include <linux/list.h>
Mike Snitzer4cc96132016-05-12 16:28:10 -040016#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/blkdev.h>
Tejun Heo66114ca2015-05-22 17:13:32 -040018#include <linux/backing-dev.h>
Darrick J. Wong3ac51e72006-03-27 01:17:54 -080019#include <linux/hdreg.h>
Mikulas Patockabe35f482014-01-06 23:01:22 -050020#include <linux/completion.h>
Mikulas Patocka2995fa72014-01-13 19:37:54 -050021#include <linux/kobject.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -040023#include "dm-stats.h"
24
Kiyoshi Ueda45cbcd72006-12-08 02:41:05 -080025/*
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -080026 * Suspend feature flags
27 */
28#define DM_SUSPEND_LOCKFS_FLAG (1 << 0)
Kiyoshi Ueda81fdb092006-12-08 02:41:07 -080029#define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
Kiyoshi Uedaa3d77d32006-12-08 02:41:04 -080030
31/*
Alasdair G Kergon1f4e0ff2012-07-27 15:08:16 +010032 * Status feature flags
33 */
34#define DM_STATUS_NOFLUSH_FLAG (1 << 0)
35
36/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 * List of devices that a metadevice uses and should open/close.
38 */
Mikulas Patocka82b15192008-10-10 13:37:09 +010039struct dm_dev_internal {
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 atomic_t count;
Benjamin Marzinski86f11522014-08-13 13:53:43 -050042 struct dm_dev *dm_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043};
44
45struct dm_table;
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010046struct dm_md_mempools;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
48/*-----------------------------------------------------------------
Alasdair G Kergon17b2f662006-06-26 00:27:33 -070049 * Internal table functions.
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 *---------------------------------------------------------------*/
Mikulas Patockad5816872009-01-06 03:05:10 +000051void dm_table_destroy(struct dm_table *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052void dm_table_event_callback(struct dm_table *t,
53 void (*fn)(void *), void *context);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
55struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
Mike Snitzer3ae70652012-09-26 23:45:45 +010056bool dm_table_has_no_data_devices(struct dm_table *table);
Mike Snitzer754c5fc2009-06-22 10:12:34 +010057int dm_calculate_queue_limits(struct dm_table *table,
58 struct queue_limits *limits);
59void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
60 struct queue_limits *limits);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061struct list_head *dm_table_get_devices(struct dm_table *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062void dm_table_presuspend_targets(struct dm_table *t);
Mike Snitzerd67ee212014-10-28 20:13:31 -040063void dm_table_presuspend_undo_targets(struct dm_table *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064void dm_table_postsuspend_targets(struct dm_table *t);
Milan Broz8757b772006-10-03 01:15:36 -070065int dm_table_resume_targets(struct dm_table *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070066int dm_table_any_congested(struct dm_table *t, int bdi_bits);
Bart Van Assche7e0d5742017-04-27 10:11:23 -070067enum dm_queue_mode dm_table_get_type(struct dm_table *t);
Alasdair G Kergon36a04562011-10-31 20:19:04 +000068struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
Mike Snitzer16f12262016-01-31 17:22:27 -050069struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
Mike Snitzerf083b092016-02-06 18:38:46 -050070struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
Toshi Kani545ed202016-06-22 17:54:53 -060071bool dm_table_bio_based(struct dm_table *t);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010072bool dm_table_request_based(struct dm_table *t);
Mike Snitzere83068a2016-05-24 21:16:51 -040073bool dm_table_all_blk_mq_devices(struct dm_table *t);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010074void dm_table_free_md_mempools(struct dm_table *t);
75struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Mike Snitzera5664da2010-08-12 04:14:01 +010077void dm_lock_md_type(struct mapped_device *md);
78void dm_unlock_md_type(struct mapped_device *md);
Bart Van Assche7e0d5742017-04-27 10:11:23 -070079void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
80enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
Alasdair G Kergon36a04562011-10-31 20:19:04 +000081struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
Mike Snitzera5664da2010-08-12 04:14:01 +010082
Mike Snitzer591ddcf2016-01-31 12:05:42 -050083int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
Mike Snitzer4a0b4dd2010-08-12 04:14:02 +010084
Jun'ichi Nomura512875b2007-12-13 14:15:25 +000085/*
86 * To check the return value from dm_table_find_target().
87 */
88#define dm_target_is_valid(t) ((t)->table)
89
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010090/*
Mike Snitzer169e2cc2013-08-22 18:21:38 -040091 * To check whether the target type is bio-based or not (request-based).
92 */
93#define dm_target_bio_based(t) ((t)->type->map != NULL)
94
95/*
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010096 * To check whether the target type is request-based or not (bio-based).
97 */
Christoph Hellwigeb8db832017-01-22 18:32:46 +010098#define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +010099
Mike Snitzer169e2cc2013-08-22 18:21:38 -0400100/*
101 * To check whether the target type is a hybrid (capable of being
102 * either request-based or bio-based).
103 */
104#define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106/*-----------------------------------------------------------------
107 * A registry of target types.
108 *---------------------------------------------------------------*/
109int dm_target_init(void);
110void dm_target_exit(void);
111struct target_type *dm_get_target_type(const char *name);
Cheng Renquan45194e42009-04-02 19:55:28 +0100112void dm_put_target_type(struct target_type *tt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113int dm_target_iterate(void (*iter_func)(struct target_type *tt,
114 void *param), void *param);
115
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116int dm_split_args(int *argc, char ***argvp, char *input);
117
118/*
Mike Anderson432a2122009-12-10 23:52:20 +0000119 * Is this mapped_device being deleted?
120 */
121int dm_deleting_md(struct mapped_device *md);
122
123/*
Kiyoshi Ueda4f186f82009-12-10 23:52:26 +0000124 * Is this mapped_device suspended?
125 */
126int dm_suspended_md(struct mapped_device *md);
127
128/*
Mike Snitzerffcc3932014-10-28 18:34:52 -0400129 * Internal suspend and resume methods.
130 */
131int dm_suspended_internally_md(struct mapped_device *md);
132void dm_internal_suspend_fast(struct mapped_device *md);
133void dm_internal_resume_fast(struct mapped_device *md);
134void dm_internal_suspend_noflush(struct mapped_device *md);
135void dm_internal_resume(struct mapped_device *md);
136
137/*
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400138 * Test if the device is scheduled for deferred remove.
139 */
140int dm_test_deferred_remove_flag(struct mapped_device *md);
141
142/*
143 * Try to remove devices marked for deferred removal.
144 */
145void dm_deferred_remove(void);
146
147/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 * The device-mapper can be driven through one of two interfaces;
149 * ioctl or filesystem, depending which patch you have applied.
150 */
151int dm_interface_init(void);
152void dm_interface_exit(void);
153
154/*
Milan Broz784aae72009-01-06 03:05:12 +0000155 * sysfs interface
156 */
157int dm_sysfs_init(struct mapped_device *md);
158void dm_sysfs_exit(struct mapped_device *md);
159struct kobject *dm_kobject(struct mapped_device *md);
160struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
Mikulas Patocka2995fa72014-01-13 19:37:54 -0500161
162/*
163 * The kobject helper
164 */
165void dm_kobject_release(struct kobject *kobj);
Milan Broz784aae72009-01-06 03:05:12 +0000166
167/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 * Targets for linear and striped mappings
169 */
170int dm_linear_init(void);
171void dm_linear_exit(void);
172
173int dm_stripe_init(void);
174void dm_stripe_exit(void);
175
Kiyoshi Ueda3f77316d2010-08-12 04:13:56 +0100176/*
177 * mapped_device operations
178 */
179void dm_destroy(struct mapped_device *md);
180void dm_destroy_immediate(struct mapped_device *md);
Alasdair G Kergon5c6bd752006-06-26 00:27:34 -0700181int dm_open_count(struct mapped_device *md);
Mikulas Patocka2c140a22013-11-01 18:27:41 -0400182int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
183int dm_cancel_deferred_remove(struct mapped_device *md);
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400184int dm_request_based(struct mapped_device *md);
185sector_t dm_get_size(struct mapped_device *md);
Mike Snitzer9974fa22014-02-28 15:33:43 +0100186struct request_queue *dm_get_md_queue(struct mapped_device *md);
Benjamin Marzinski86f11522014-08-13 13:53:43 -0500187int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
188 struct dm_dev **result);
189void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400190struct dm_stats *dm_get_stats(struct mapped_device *md);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Peter Rajnoha3abf85b2010-03-06 02:32:31 +0000192int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
193 unsigned cookie);
Alasdair G Kergon69267a32007-12-13 14:15:57 +0000194
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400195void dm_internal_suspend(struct mapped_device *md);
196void dm_internal_resume(struct mapped_device *md);
197
Mikulas Patocka952b3552009-12-10 23:51:57 +0000198int dm_io_init(void);
199void dm_io_exit(void);
200
Mikulas Patocka945fa4d2008-04-24 21:43:49 +0100201int dm_kcopyd_init(void);
202void dm_kcopyd_exit(void);
203
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100204/*
205 * Mempool operations
206 */
Bart Van Assche7e0d5742017-04-27 10:11:23 -0700207struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
Mike Snitzer78d8e582015-06-26 10:01:13 -0400208 unsigned integrity, unsigned per_bio_data_size);
Kiyoshi Uedae6ee8c02009-06-22 10:12:36 +0100209void dm_free_md_mempools(struct dm_md_mempools *pools);
210
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400211/*
Mike Snitzer4cc96132016-05-12 16:28:10 -0400212 * Various helpers
Mikulas Patockafd2ed4d2013-08-16 10:54:23 -0400213 */
Mike Snitzere8603132013-09-12 18:06:12 -0400214unsigned dm_get_reserved_bio_based_ios(void);
Mike Snitzer0ce65792015-02-26 00:50:28 -0500215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216#endif