blob: a9d7be4b964f1bdb0838f14bf5aa1590bbbfb899 [file] [log] [blame]
Stephen Hemminger95096f22016-12-03 12:34:40 -08001/*
2 * uio_hv_generic - generic UIO driver for VMBus
3 *
4 * Copyright (c) 2013-2016 Brocade Communications Systems, Inc.
5 * Copyright (c) 2016, Microsoft Corporation.
6 *
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 *
10 * Since the driver does not declare any device ids, you must allocate
11 * id and bind the device to the driver yourself. For example:
12 *
Stephen Hemminger42896962018-01-04 14:13:27 -080013 * Associate Network GUID with UIO device
Stephen Hemminger95096f22016-12-03 12:34:40 -080014 * # echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" \
Stephen Hemminger42896962018-01-04 14:13:27 -080015 * > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
16 * Then rebind
17 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
Stephen Hemminger95096f22016-12-03 12:34:40 -080018 * > /sys/bus/vmbus/drivers/hv_netvsc/unbind
Stephen Hemminger42896962018-01-04 14:13:27 -080019 * # echo -n "ed963694-e847-4b2a-85af-bc9cfc11d6f3" \
Stephen Hemminger95096f22016-12-03 12:34:40 -080020 * > /sys/bus/vmbus/drivers/uio_hv_generic/bind
21 */
22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25#include <linux/device.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/uio_driver.h>
29#include <linux/netdevice.h>
30#include <linux/if_ether.h>
31#include <linux/skbuff.h>
32#include <linux/hyperv.h>
33#include <linux/vmalloc.h>
34#include <linux/slab.h>
35
36#include "../hv/hyperv_vmbus.h"
37
38#define DRIVER_VERSION "0.02.0"
39#define DRIVER_AUTHOR "Stephen Hemminger <sthemmin at microsoft.com>"
40#define DRIVER_DESC "Generic UIO driver for VMBus devices"
41
Stephen Hemmingere7d21462018-01-09 12:57:30 -080042#define HV_RING_SIZE 512 /* pages */
43#define SEND_BUFFER_SIZE (15 * 1024 * 1024)
44#define RECV_BUFFER_SIZE (15 * 1024 * 1024)
45
Stephen Hemminger95096f22016-12-03 12:34:40 -080046/*
47 * List of resources to be mapped to user space
48 * can be extended up to MAX_UIO_MAPS(5) items
49 */
50enum hv_uio_map {
51 TXRX_RING_MAP = 0,
52 INT_PAGE_MAP,
53 MON_PAGE_MAP,
Stephen Hemmingere7d21462018-01-09 12:57:30 -080054 RECV_BUF_MAP,
55 SEND_BUF_MAP
Stephen Hemminger95096f22016-12-03 12:34:40 -080056};
57
Stephen Hemminger95096f22016-12-03 12:34:40 -080058struct hv_uio_private_data {
59 struct uio_info info;
60 struct hv_device *device;
Stephen Hemmingere7d21462018-01-09 12:57:30 -080061
62 void *recv_buf;
63 u32 recv_gpadl;
64 char recv_name[32]; /* "recv_4294967295" */
65
66 void *send_buf;
67 u32 send_gpadl;
68 char send_name[32];
Stephen Hemminger95096f22016-12-03 12:34:40 -080069};
70
Stephen Hemminger95096f22016-12-03 12:34:40 -080071/*
72 * This is the irqcontrol callback to be registered to uio_info.
73 * It can be used to disable/enable interrupt from user space processes.
74 *
75 * @param info
76 * pointer to uio_info.
77 * @param irq_state
78 * state value. 1 to enable interrupt, 0 to disable interrupt.
79 */
80static int
81hv_uio_irqcontrol(struct uio_info *info, s32 irq_state)
82{
83 struct hv_uio_private_data *pdata = info->priv;
84 struct hv_device *dev = pdata->device;
85
86 dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
87 virt_mb();
88
89 return 0;
90}
91
92/*
93 * Callback from vmbus_event when something is in inbound ring.
94 */
95static void hv_uio_channel_cb(void *context)
96{
Stephen Hemminger135db382018-04-16 11:19:26 -070097 struct vmbus_channel *chan = context;
98 struct hv_device *hv_dev = chan->device_obj;
99 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
Stephen Hemminger95096f22016-12-03 12:34:40 -0800100
Stephen Hemminger135db382018-04-16 11:19:26 -0700101 chan->inbound.ring_buffer->interrupt_mask = 1;
Stephen Hemminger95096f22016-12-03 12:34:40 -0800102 virt_mb();
103
104 uio_event_notify(&pdata->info);
105}
106
Stephen Hemmingerca3cda62018-01-09 12:57:32 -0800107/*
108 * Callback from vmbus_event when channel is rescinded.
109 */
110static void hv_uio_rescind(struct vmbus_channel *channel)
111{
112 struct hv_device *hv_dev = channel->primary_channel->device_obj;
113 struct hv_uio_private_data *pdata = hv_get_drvdata(hv_dev);
114
115 /*
116 * Turn off the interrupt file handle
117 * Next read for event will return -EIO
118 */
119 pdata->info.irq = 0;
120
121 /* Wake up reader */
122 uio_event_notify(&pdata->info);
123}
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800124
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800125/*
126 * Handle fault when looking for sub channel ring buffer
127 * Subchannel ring buffer is same as resource 0 which is main ring buffer
128 * This is derived from uio_vma_fault
129 */
130static int hv_uio_vma_fault(struct vm_fault *vmf)
131{
132 struct vm_area_struct *vma = vmf->vma;
133 void *ring_buffer = vma->vm_private_data;
134 struct page *page;
135 void *addr;
136
137 addr = ring_buffer + (vmf->pgoff << PAGE_SHIFT);
138 page = virt_to_page(addr);
139 get_page(page);
140 vmf->page = page;
141 return 0;
142}
143
144static const struct vm_operations_struct hv_uio_vm_ops = {
145 .fault = hv_uio_vma_fault,
146};
147
148/* Sysfs API to allow mmap of the ring buffers */
149static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
150 struct bin_attribute *attr,
151 struct vm_area_struct *vma)
152{
153 struct vmbus_channel *channel
154 = container_of(kobj, struct vmbus_channel, kobj);
155 unsigned long requested_pages, actual_pages;
156
157 if (vma->vm_end < vma->vm_start)
158 return -EINVAL;
159
160 /* only allow 0 for now */
161 if (vma->vm_pgoff > 0)
162 return -EINVAL;
163
164 requested_pages = vma_pages(vma);
165 actual_pages = 2 * HV_RING_SIZE;
166 if (requested_pages > actual_pages)
167 return -EINVAL;
168
169 vma->vm_private_data = channel->ringbuffer_pages;
170 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
171 vma->vm_ops = &hv_uio_vm_ops;
172 return 0;
173}
174
Stephen Hemminger6e3d66b2018-04-16 11:19:24 -0700175static const struct bin_attribute ring_buffer_bin_attr = {
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800176 .attr = {
177 .name = "ring",
178 .mode = 0600,
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800179 },
Stephen Hemminger6e3d66b2018-04-16 11:19:24 -0700180 .size = 2 * HV_RING_SIZE * PAGE_SIZE,
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800181 .mmap = hv_uio_ring_mmap,
182};
183
Stephen Hemminger135db382018-04-16 11:19:26 -0700184/* Callback from VMBUS subsystem when new channel created. */
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800185static void
186hv_uio_new_channel(struct vmbus_channel *new_sc)
187{
188 struct hv_device *hv_dev = new_sc->primary_channel->device_obj;
189 struct device *device = &hv_dev->device;
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800190 const size_t ring_bytes = HV_RING_SIZE * PAGE_SIZE;
191 int ret;
192
193 /* Create host communication ring */
194 ret = vmbus_open(new_sc, ring_bytes, ring_bytes, NULL, 0,
Stephen Hemminger135db382018-04-16 11:19:26 -0700195 hv_uio_channel_cb, new_sc);
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800196 if (ret) {
197 dev_err(device, "vmbus_open subchannel failed: %d\n", ret);
198 return;
199 }
200
201 /* Disable interrupts on sub channel */
202 new_sc->inbound.ring_buffer->interrupt_mask = 1;
203 set_channel_read_mode(new_sc, HV_CALL_ISR);
204
205 ret = sysfs_create_bin_file(&new_sc->kobj, &ring_buffer_bin_attr);
206 if (ret) {
207 dev_err(device, "sysfs create ring bin file failed; %d\n", ret);
208 vmbus_close(new_sc);
209 }
210}
211
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800212static void
213hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
214{
215 if (pdata->send_gpadl)
216 vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
217 vfree(pdata->send_buf);
218
219 if (pdata->recv_gpadl)
220 vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
221 vfree(pdata->recv_buf);
222}
223
Stephen Hemminger95096f22016-12-03 12:34:40 -0800224static int
225hv_uio_probe(struct hv_device *dev,
226 const struct hv_vmbus_device_id *dev_id)
227{
228 struct hv_uio_private_data *pdata;
229 int ret;
230
231 pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
232 if (!pdata)
233 return -ENOMEM;
234
235 ret = vmbus_open(dev->channel, HV_RING_SIZE * PAGE_SIZE,
236 HV_RING_SIZE * PAGE_SIZE, NULL, 0,
Stephen Hemminger135db382018-04-16 11:19:26 -0700237 hv_uio_channel_cb, dev->channel);
Stephen Hemminger95096f22016-12-03 12:34:40 -0800238 if (ret)
239 goto fail;
240
Stephen Hemminger06028d12018-01-09 12:57:31 -0800241 /* Communicating with host has to be via shared memory not hypercall */
242 if (!dev->channel->offermsg.monitor_allocated) {
243 dev_err(&dev->device, "vmbus channel requires hypercall\n");
244 ret = -ENOTSUPP;
245 goto fail_close;
246 }
247
Stephen Hemminger95096f22016-12-03 12:34:40 -0800248 dev->channel->inbound.ring_buffer->interrupt_mask = 1;
Stephen Hemminger2141a842018-01-04 14:13:31 -0800249 set_channel_read_mode(dev->channel, HV_CALL_ISR);
Stephen Hemminger95096f22016-12-03 12:34:40 -0800250
251 /* Fill general uio info */
252 pdata->info.name = "uio_hv_generic";
253 pdata->info.version = DRIVER_VERSION;
254 pdata->info.irqcontrol = hv_uio_irqcontrol;
Stephen Hemminger95096f22016-12-03 12:34:40 -0800255 pdata->info.irq = UIO_IRQ_CUSTOM;
256
257 /* mem resources */
258 pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
259 pdata->info.mem[TXRX_RING_MAP].addr
Arnd Bergmann72d14652018-01-10 17:42:38 +0100260 = (uintptr_t)dev->channel->ringbuffer_pages;
Stephen Hemminger95096f22016-12-03 12:34:40 -0800261 pdata->info.mem[TXRX_RING_MAP].size
Stephen Hemminger9c405462018-01-04 14:13:28 -0800262 = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
Stephen Hemminger95096f22016-12-03 12:34:40 -0800263 pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
264
265 pdata->info.mem[INT_PAGE_MAP].name = "int_page";
Stephen Hemminger9c405462018-01-04 14:13:28 -0800266 pdata->info.mem[INT_PAGE_MAP].addr
Arnd Bergmann72d14652018-01-10 17:42:38 +0100267 = (uintptr_t)vmbus_connection.int_page;
Stephen Hemminger95096f22016-12-03 12:34:40 -0800268 pdata->info.mem[INT_PAGE_MAP].size = PAGE_SIZE;
269 pdata->info.mem[INT_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
270
Stephen Hemminger9c405462018-01-04 14:13:28 -0800271 pdata->info.mem[MON_PAGE_MAP].name = "monitor_page";
272 pdata->info.mem[MON_PAGE_MAP].addr
Arnd Bergmann72d14652018-01-10 17:42:38 +0100273 = (uintptr_t)vmbus_connection.monitor_pages[1];
Stephen Hemminger95096f22016-12-03 12:34:40 -0800274 pdata->info.mem[MON_PAGE_MAP].size = PAGE_SIZE;
275 pdata->info.mem[MON_PAGE_MAP].memtype = UIO_MEM_LOGICAL;
276
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800277 pdata->recv_buf = vzalloc(RECV_BUFFER_SIZE);
278 if (pdata->recv_buf == NULL) {
279 ret = -ENOMEM;
280 goto fail_close;
281 }
282
283 ret = vmbus_establish_gpadl(dev->channel, pdata->recv_buf,
284 RECV_BUFFER_SIZE, &pdata->recv_gpadl);
285 if (ret)
286 goto fail_close;
287
288 /* put Global Physical Address Label in name */
289 snprintf(pdata->recv_name, sizeof(pdata->recv_name),
290 "recv:%u", pdata->recv_gpadl);
291 pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
292 pdata->info.mem[RECV_BUF_MAP].addr
Arnd Bergmannd6088e92018-01-12 16:51:14 +0100293 = (uintptr_t)pdata->recv_buf;
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800294 pdata->info.mem[RECV_BUF_MAP].size = RECV_BUFFER_SIZE;
295 pdata->info.mem[RECV_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
296
297
298 pdata->send_buf = vzalloc(SEND_BUFFER_SIZE);
299 if (pdata->send_buf == NULL) {
300 ret = -ENOMEM;
301 goto fail_close;
302 }
303
304 ret = vmbus_establish_gpadl(dev->channel, pdata->send_buf,
305 SEND_BUFFER_SIZE, &pdata->send_gpadl);
306 if (ret)
307 goto fail_close;
308
309 snprintf(pdata->send_name, sizeof(pdata->send_name),
310 "send:%u", pdata->send_gpadl);
311 pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
312 pdata->info.mem[SEND_BUF_MAP].addr
Arnd Bergmannd6088e92018-01-12 16:51:14 +0100313 = (uintptr_t)pdata->send_buf;
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800314 pdata->info.mem[SEND_BUF_MAP].size = SEND_BUFFER_SIZE;
315 pdata->info.mem[SEND_BUF_MAP].memtype = UIO_MEM_VIRTUAL;
316
Stephen Hemminger95096f22016-12-03 12:34:40 -0800317 pdata->info.priv = pdata;
318 pdata->device = dev;
319
320 ret = uio_register_device(&dev->device, &pdata->info);
321 if (ret) {
322 dev_err(&dev->device, "hv_uio register failed\n");
323 goto fail_close;
324 }
325
Stephen Hemmingerca3cda62018-01-09 12:57:32 -0800326 vmbus_set_chn_rescind_callback(dev->channel, hv_uio_rescind);
Stephen Hemminger37b96a42018-02-05 10:40:27 -0800327 vmbus_set_sc_create_callback(dev->channel, hv_uio_new_channel);
Stephen Hemmingerca3cda62018-01-09 12:57:32 -0800328
Stephen Hemminger9ab877a2018-04-16 11:19:25 -0700329 ret = sysfs_create_bin_file(&dev->channel->kobj, &ring_buffer_bin_attr);
330 if (ret)
331 dev_notice(&dev->device,
332 "sysfs create ring bin file failed; %d\n", ret);
333
Stephen Hemminger95096f22016-12-03 12:34:40 -0800334 hv_set_drvdata(dev, pdata);
335
336 return 0;
337
338fail_close:
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800339 hv_uio_cleanup(dev, pdata);
Stephen Hemminger95096f22016-12-03 12:34:40 -0800340 vmbus_close(dev->channel);
341fail:
342 kfree(pdata);
343
344 return ret;
345}
346
347static int
348hv_uio_remove(struct hv_device *dev)
349{
350 struct hv_uio_private_data *pdata = hv_get_drvdata(dev);
351
352 if (!pdata)
353 return 0;
354
355 uio_unregister_device(&pdata->info);
Stephen Hemmingere7d21462018-01-09 12:57:30 -0800356 hv_uio_cleanup(dev, pdata);
Stephen Hemminger95096f22016-12-03 12:34:40 -0800357 hv_set_drvdata(dev, NULL);
358 vmbus_close(dev->channel);
359 kfree(pdata);
360 return 0;
361}
362
363static struct hv_driver hv_uio_drv = {
364 .name = "uio_hv_generic",
365 .id_table = NULL, /* only dynamic id's */
366 .probe = hv_uio_probe,
367 .remove = hv_uio_remove,
368};
369
370static int __init
371hyperv_module_init(void)
372{
373 return vmbus_driver_register(&hv_uio_drv);
374}
375
376static void __exit
377hyperv_module_exit(void)
378{
379 vmbus_driver_unregister(&hv_uio_drv);
380}
381
382module_init(hyperv_module_init);
383module_exit(hyperv_module_exit);
384
385MODULE_VERSION(DRIVER_VERSION);
386MODULE_LICENSE("GPL v2");
387MODULE_AUTHOR(DRIVER_AUTHOR);
388MODULE_DESCRIPTION(DRIVER_DESC);