blob: 11546f87c5dc58947f7e73d7365e06bd880a0a53 [file] [log] [blame]
Leon Romanovsky6c80b412017-06-20 09:14:15 +03001/*
2 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 * 3. Neither the names of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * Alternatively, this software may be distributed under the terms of the
17 * GNU General Public License ("GPL") version 2 as published by the Free
18 * Software Foundation.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
Leon Romanovskyb4c598a2017-06-20 09:59:14 +030033#include <net/netlink.h>
Leon Romanovsky6c80b412017-06-20 09:14:15 +030034#include <rdma/rdma_netlink.h>
35
36#include "core_priv.h"
37
Leon Romanovskyb4c598a2017-06-20 09:59:14 +030038static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
39 [RDMA_NLDEV_ATTR_DEV_INDEX] = { .type = NLA_U32 },
40 [RDMA_NLDEV_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING,
41 .len = IB_DEVICE_NAME_MAX - 1},
42 [RDMA_NLDEV_ATTR_PORT_INDEX] = { .type = NLA_U32 },
Leon Romanovsky8621a7e2017-06-27 16:58:59 +030043 [RDMA_NLDEV_ATTR_FW_VERSION] = { .type = NLA_NUL_STRING,
44 .len = IB_FW_VERSION_NAME_MAX - 1},
Leon Romanovsky1aaff892017-06-28 14:01:37 +030045 [RDMA_NLDEV_ATTR_NODE_GUID] = { .type = NLA_U64 },
46 [RDMA_NLDEV_ATTR_SYS_IMAGE_GUID] = { .type = NLA_U64 },
Leon Romanovsky12026fb2017-06-28 15:05:14 +030047 [RDMA_NLDEV_ATTR_SUBNET_PREFIX] = { .type = NLA_U64 },
Leon Romanovsky80a06dd2017-06-28 15:38:36 +030048 [RDMA_NLDEV_ATTR_LID] = { .type = NLA_U32 },
49 [RDMA_NLDEV_ATTR_SM_LID] = { .type = NLA_U32 },
Leon Romanovsky34840fe2017-06-28 15:49:30 +030050 [RDMA_NLDEV_ATTR_LMC] = { .type = NLA_U8 },
Leon Romanovskyb4c598a2017-06-20 09:59:14 +030051};
52
53static int fill_dev_info(struct sk_buff *msg, struct ib_device *device)
54{
Leon Romanovsky8621a7e2017-06-27 16:58:59 +030055 char fw[IB_FW_VERSION_NAME_MAX];
56
Leon Romanovskyb4c598a2017-06-20 09:59:14 +030057 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
58 return -EMSGSIZE;
59 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
60 return -EMSGSIZE;
61 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, rdma_end_port(device)))
62 return -EMSGSIZE;
Leon Romanovskyac505252017-06-20 14:47:08 +030063
64 BUILD_BUG_ON(sizeof(device->attrs.device_cap_flags) != sizeof(u64));
65 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
66 device->attrs.device_cap_flags, 0))
67 return -EMSGSIZE;
68
Leon Romanovsky8621a7e2017-06-27 16:58:59 +030069 ib_get_device_fw_str(device, fw);
70 /* Device without FW has strlen(fw) */
71 if (strlen(fw) && nla_put_string(msg, RDMA_NLDEV_ATTR_FW_VERSION, fw))
72 return -EMSGSIZE;
73
Leon Romanovsky1aaff892017-06-28 14:01:37 +030074 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_NODE_GUID,
75 be64_to_cpu(device->node_guid), 0))
76 return -EMSGSIZE;
77 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SYS_IMAGE_GUID,
78 be64_to_cpu(device->attrs.sys_image_guid), 0))
79 return -EMSGSIZE;
Leon Romanovskyb4c598a2017-06-20 09:59:14 +030080 return 0;
81}
82
Leon Romanovsky7d02f602017-06-20 11:30:33 +030083static int fill_port_info(struct sk_buff *msg,
84 struct ib_device *device, u32 port)
85{
Leon Romanovskyac505252017-06-20 14:47:08 +030086 struct ib_port_attr attr;
87 int ret;
88
Leon Romanovsky7d02f602017-06-20 11:30:33 +030089 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_DEV_INDEX, device->index))
90 return -EMSGSIZE;
91 if (nla_put_string(msg, RDMA_NLDEV_ATTR_DEV_NAME, device->name))
92 return -EMSGSIZE;
93 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
94 return -EMSGSIZE;
Leon Romanovskyac505252017-06-20 14:47:08 +030095
96 ret = ib_query_port(device, port, &attr);
97 if (ret)
98 return ret;
99
100 BUILD_BUG_ON(sizeof(attr.port_cap_flags) > sizeof(u64));
101 if (nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_CAP_FLAGS,
102 (u64)attr.port_cap_flags, 0))
103 return -EMSGSIZE;
Leon Romanovsky12026fb2017-06-28 15:05:14 +0300104 if (rdma_protocol_ib(device, port) &&
105 nla_put_u64_64bit(msg, RDMA_NLDEV_ATTR_SUBNET_PREFIX,
106 attr.subnet_prefix, 0))
107 return -EMSGSIZE;
Leon Romanovsky80a06dd2017-06-28 15:38:36 +0300108 if (rdma_protocol_ib(device, port)) {
109 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_LID, attr.lid))
110 return -EMSGSIZE;
111 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_SM_LID, attr.sm_lid))
112 return -EMSGSIZE;
Leon Romanovsky34840fe2017-06-28 15:49:30 +0300113 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_LMC, attr.lmc))
114 return -EMSGSIZE;
Leon Romanovsky80a06dd2017-06-28 15:38:36 +0300115 }
Leon Romanovsky7d02f602017-06-20 11:30:33 +0300116 return 0;
117}
118
Leon Romanovskye5c94692017-06-15 20:33:08 +0300119static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
120 struct netlink_ext_ack *extack)
121{
122 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
123 struct ib_device *device;
124 struct sk_buff *msg;
125 u32 index;
126 int err;
127
128 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
129 nldev_policy, extack);
130 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
131 return -EINVAL;
132
133 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
134
135 device = __ib_device_get_by_index(index);
136 if (!device)
137 return -EINVAL;
138
139 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
140 if (!msg)
141 return -ENOMEM;
142
143 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
144 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
145 0, 0);
146
147 err = fill_dev_info(msg, device);
148 if (err) {
149 nlmsg_free(msg);
150 return err;
151 }
152
153 nlmsg_end(msg, nlh);
154
155 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
156}
157
Leon Romanovskyb4c598a2017-06-20 09:59:14 +0300158static int _nldev_get_dumpit(struct ib_device *device,
159 struct sk_buff *skb,
160 struct netlink_callback *cb,
161 unsigned int idx)
162{
163 int start = cb->args[0];
164 struct nlmsghdr *nlh;
165
166 if (idx < start)
167 return 0;
168
169 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
170 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
171 0, NLM_F_MULTI);
172
173 if (fill_dev_info(skb, device)) {
174 nlmsg_cancel(skb, nlh);
175 goto out;
176 }
177
178 nlmsg_end(skb, nlh);
179
180 idx++;
181
182out: cb->args[0] = idx;
183 return skb->len;
184}
185
186static int nldev_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
187{
188 /*
189 * There is no need to take lock, because
190 * we are relying on ib_core's lists_rwsem
191 */
192 return ib_enum_all_devs(_nldev_get_dumpit, skb, cb);
193}
194
Leon Romanovskyc3f66f72017-06-22 16:10:38 +0300195static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
196 struct netlink_ext_ack *extack)
197{
198 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
199 struct ib_device *device;
200 struct sk_buff *msg;
201 u32 index;
202 u32 port;
203 int err;
204
205 err = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
206 nldev_policy, extack);
207 if (err || !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
208 return -EINVAL;
209
210 index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
211 device = __ib_device_get_by_index(index);
212 if (!device)
213 return -EINVAL;
214
215 port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
216 if (!rdma_is_port_valid(device, port))
217 return -EINVAL;
218
219 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
220 if (!msg)
221 return -ENOMEM;
222
223 nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
224 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
225 0, 0);
226
227 err = fill_port_info(msg, device, port);
228 if (err) {
229 nlmsg_free(msg);
230 return err;
231 }
232
233 nlmsg_end(msg, nlh);
234
235 return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
236}
237
Leon Romanovsky7d02f602017-06-20 11:30:33 +0300238static int nldev_port_get_dumpit(struct sk_buff *skb,
239 struct netlink_callback *cb)
240{
241 struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
242 struct ib_device *device;
243 int start = cb->args[0];
244 struct nlmsghdr *nlh;
245 u32 idx = 0;
246 u32 ifindex;
247 int err;
248 u32 p;
249
250 err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
251 nldev_policy, NULL);
252 if (err || !tb[RDMA_NLDEV_ATTR_DEV_INDEX])
253 return -EINVAL;
254
255 ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
256 device = __ib_device_get_by_index(ifindex);
257 if (!device)
258 return -EINVAL;
259
260 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
261 /*
262 * The dumpit function returns all information from specific
263 * index. This specific index is taken from the netlink
264 * messages request sent by user and it is available
265 * in cb->args[0].
266 *
267 * Usually, the user doesn't fill this field and it causes
268 * to return everything.
269 *
270 */
271 if (idx < start) {
272 idx++;
273 continue;
274 }
275
276 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
277 cb->nlh->nlmsg_seq,
278 RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
279 RDMA_NLDEV_CMD_PORT_GET),
280 0, NLM_F_MULTI);
281
282 if (fill_port_info(skb, device, p)) {
283 nlmsg_cancel(skb, nlh);
284 goto out;
285 }
286 idx++;
287 nlmsg_end(skb, nlh);
288 }
289
290out: cb->args[0] = idx;
291 return skb->len;
292}
293
Leon Romanovskyb4c598a2017-06-20 09:59:14 +0300294static const struct rdma_nl_cbs nldev_cb_table[] = {
295 [RDMA_NLDEV_CMD_GET] = {
Leon Romanovskye5c94692017-06-15 20:33:08 +0300296 .doit = nldev_get_doit,
Leon Romanovskyb4c598a2017-06-20 09:59:14 +0300297 .dump = nldev_get_dumpit,
298 },
Leon Romanovsky7d02f602017-06-20 11:30:33 +0300299 [RDMA_NLDEV_CMD_PORT_GET] = {
Leon Romanovskyc3f66f72017-06-22 16:10:38 +0300300 .doit = nldev_port_get_doit,
Leon Romanovsky7d02f602017-06-20 11:30:33 +0300301 .dump = nldev_port_get_dumpit,
302 },
Leon Romanovskyb4c598a2017-06-20 09:59:14 +0300303};
304
Leon Romanovsky6c80b412017-06-20 09:14:15 +0300305void __init nldev_init(void)
306{
Leon Romanovskyb4c598a2017-06-20 09:59:14 +0300307 rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table);
Leon Romanovsky6c80b412017-06-20 09:14:15 +0300308}
309
310void __exit nldev_exit(void)
311{
312 rdma_nl_unregister(RDMA_NL_NLDEV);
313}