blob: 2960e26c6ea4c756064db41cc7d39c14f058068b [file] [log] [blame]
Gerd Hoffmanna5e6e652018-05-11 09:05:04 -06001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mediated virtual PCI display host device driver
4 *
5 * Emulate enough of qemu stdvga to make bochs-drm.ko happy. That is
6 * basically the vram memory bar and the bochs dispi interface vbe
7 * registers in the mmio register bar. Specifically it does *not*
8 * include any legacy vga stuff. Device looks a lot like "qemu -device
9 * secondary-vga".
10 *
11 * (c) Gerd Hoffmann <kraxel@redhat.com>
12 *
13 * based on mtty driver which is:
14 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
15 * Author: Neo Jia <cjia@nvidia.com>
16 * Kirti Wankhede <kwankhede@nvidia.com>
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License version 2 as
20 * published by the Free Software Foundation.
21 */
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/device.h>
25#include <linux/kernel.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/cdev.h>
29#include <linux/vfio.h>
30#include <linux/iommu.h>
31#include <linux/sysfs.h>
32#include <linux/mdev.h>
33#include <linux/pci.h>
34#include <linux/dma-buf.h>
35#include <linux/highmem.h>
36#include <drm/drm_fourcc.h>
37#include <drm/drm_rect.h>
38#include <drm/drm_modeset_lock.h>
39#include <drm/drm_property.h>
40#include <drm/drm_plane.h>
41
42
43#define VBE_DISPI_INDEX_ID 0x0
44#define VBE_DISPI_INDEX_XRES 0x1
45#define VBE_DISPI_INDEX_YRES 0x2
46#define VBE_DISPI_INDEX_BPP 0x3
47#define VBE_DISPI_INDEX_ENABLE 0x4
48#define VBE_DISPI_INDEX_BANK 0x5
49#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
50#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
51#define VBE_DISPI_INDEX_X_OFFSET 0x8
52#define VBE_DISPI_INDEX_Y_OFFSET 0x9
53#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa
54#define VBE_DISPI_INDEX_COUNT 0xb
55
56#define VBE_DISPI_ID0 0xB0C0
57#define VBE_DISPI_ID1 0xB0C1
58#define VBE_DISPI_ID2 0xB0C2
59#define VBE_DISPI_ID3 0xB0C3
60#define VBE_DISPI_ID4 0xB0C4
61#define VBE_DISPI_ID5 0xB0C5
62
63#define VBE_DISPI_DISABLED 0x00
64#define VBE_DISPI_ENABLED 0x01
65#define VBE_DISPI_GETCAPS 0x02
66#define VBE_DISPI_8BIT_DAC 0x20
67#define VBE_DISPI_LFB_ENABLED 0x40
68#define VBE_DISPI_NOCLEARMEM 0x80
69
70
71#define MBOCHS_NAME "mbochs"
72#define MBOCHS_CLASS_NAME "mbochs"
73
74#define MBOCHS_CONFIG_SPACE_SIZE 0xff
75#define MBOCHS_MMIO_BAR_OFFSET PAGE_SIZE
76#define MBOCHS_MMIO_BAR_SIZE PAGE_SIZE
77#define MBOCHS_MEMORY_BAR_OFFSET (MBOCHS_MMIO_BAR_OFFSET + \
78 MBOCHS_MMIO_BAR_SIZE)
79
80#define STORE_LE16(addr, val) (*(u16 *)addr = val)
81#define STORE_LE32(addr, val) (*(u32 *)addr = val)
82
83
84MODULE_LICENSE("GPL v2");
85
86static int max_mbytes = 256;
87module_param_named(count, max_mbytes, int, 0444);
88MODULE_PARM_DESC(mem, "megabytes available to " MBOCHS_NAME " devices");
89
90
91#define MBOCHS_TYPE_1 "small"
92#define MBOCHS_TYPE_2 "medium"
93#define MBOCHS_TYPE_3 "large"
94
95static const struct mbochs_type {
96 const char *name;
97 u32 mbytes;
98} mbochs_types[] = {
99 {
100 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_1,
101 .mbytes = 4,
102 }, {
103 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_2,
104 .mbytes = 16,
105 }, {
106 .name = MBOCHS_CLASS_NAME "-" MBOCHS_TYPE_3,
107 .mbytes = 64,
108 },
109};
110
111
112static dev_t mbochs_devt;
113static struct class *mbochs_class;
114static struct cdev mbochs_cdev;
115static struct device mbochs_dev;
116static int mbochs_used_mbytes;
117
118struct mbochs_mode {
119 u32 drm_format;
120 u32 bytepp;
121 u32 width;
122 u32 height;
123 u32 stride;
124 u32 __pad;
125 u64 offset;
126 u64 size;
127};
128
129struct mbochs_dmabuf {
130 struct mbochs_mode mode;
131 u32 id;
132 struct page **pages;
133 pgoff_t pagecount;
134 struct dma_buf *buf;
135 struct mdev_state *mdev_state;
136 struct list_head next;
137 bool unlinked;
138};
139
140/* State of each mdev device */
141struct mdev_state {
142 u8 *vconfig;
143 u64 bar_mask[3];
144 u32 memory_bar_mask;
145 struct mutex ops_lock;
146 struct mdev_device *mdev;
147 struct vfio_device_info dev_info;
148
149 const struct mbochs_type *type;
150 u16 vbe[VBE_DISPI_INDEX_COUNT];
151 u64 memsize;
152 struct page **pages;
153 pgoff_t pagecount;
154
155 struct list_head dmabufs;
156 u32 active_id;
157 u32 next_id;
158};
159
160static const char *vbe_name_list[VBE_DISPI_INDEX_COUNT] = {
161 [VBE_DISPI_INDEX_ID] = "id",
162 [VBE_DISPI_INDEX_XRES] = "xres",
163 [VBE_DISPI_INDEX_YRES] = "yres",
164 [VBE_DISPI_INDEX_BPP] = "bpp",
165 [VBE_DISPI_INDEX_ENABLE] = "enable",
166 [VBE_DISPI_INDEX_BANK] = "bank",
167 [VBE_DISPI_INDEX_VIRT_WIDTH] = "virt-width",
168 [VBE_DISPI_INDEX_VIRT_HEIGHT] = "virt-height",
169 [VBE_DISPI_INDEX_X_OFFSET] = "x-offset",
170 [VBE_DISPI_INDEX_Y_OFFSET] = "y-offset",
171 [VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = "video-mem",
172};
173
174static const char *vbe_name(u32 index)
175{
176 if (index < ARRAY_SIZE(vbe_name_list))
177 return vbe_name_list[index];
178 return "(invalid)";
179}
180
181static struct page *mbochs_get_page(struct mdev_state *mdev_state,
182 pgoff_t pgoff);
183
184static const struct mbochs_type *mbochs_find_type(struct kobject *kobj)
185{
186 int i;
187
188 for (i = 0; i < ARRAY_SIZE(mbochs_types); i++)
189 if (strcmp(mbochs_types[i].name, kobj->name) == 0)
190 return mbochs_types + i;
191 return NULL;
192}
193
194static void mbochs_create_config_space(struct mdev_state *mdev_state)
195{
196 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
197 0x1234);
198 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
199 0x1111);
200 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
201 PCI_SUBVENDOR_ID_REDHAT_QUMRANET);
202 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
203 PCI_SUBDEVICE_ID_QEMU);
204
205 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
206 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
207 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
208 PCI_CLASS_DISPLAY_OTHER);
209 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
210
211 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
212 PCI_BASE_ADDRESS_SPACE_MEMORY |
213 PCI_BASE_ADDRESS_MEM_TYPE_32 |
214 PCI_BASE_ADDRESS_MEM_PREFETCH);
215 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1;
216
217 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2],
218 PCI_BASE_ADDRESS_SPACE_MEMORY |
219 PCI_BASE_ADDRESS_MEM_TYPE_32);
220 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1;
221}
222
223static int mbochs_check_framebuffer(struct mdev_state *mdev_state,
224 struct mbochs_mode *mode)
225{
226 struct device *dev = mdev_dev(mdev_state->mdev);
227 u16 *vbe = mdev_state->vbe;
228 u32 virt_width;
229
230 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
231
232 if (!(vbe[VBE_DISPI_INDEX_ENABLE] & VBE_DISPI_ENABLED))
233 goto nofb;
234
235 memset(mode, 0, sizeof(*mode));
236 switch (vbe[VBE_DISPI_INDEX_BPP]) {
237 case 32:
238 mode->drm_format = DRM_FORMAT_XRGB8888;
239 mode->bytepp = 4;
240 break;
241 default:
242 dev_info_ratelimited(dev, "%s: bpp %d not supported\n",
243 __func__, vbe[VBE_DISPI_INDEX_BPP]);
244 goto nofb;
245 }
246
247 mode->width = vbe[VBE_DISPI_INDEX_XRES];
248 mode->height = vbe[VBE_DISPI_INDEX_YRES];
249 virt_width = vbe[VBE_DISPI_INDEX_VIRT_WIDTH];
250 if (virt_width < mode->width)
251 virt_width = mode->width;
252 mode->stride = virt_width * mode->bytepp;
253 mode->size = (u64)mode->stride * mode->height;
254 mode->offset = ((u64)vbe[VBE_DISPI_INDEX_X_OFFSET] * mode->bytepp +
255 (u64)vbe[VBE_DISPI_INDEX_Y_OFFSET] * mode->stride);
256
257 if (mode->width < 64 || mode->height < 64) {
258 dev_info_ratelimited(dev, "%s: invalid resolution %dx%d\n",
259 __func__, mode->width, mode->height);
260 goto nofb;
261 }
262 if (mode->offset + mode->size > mdev_state->memsize) {
263 dev_info_ratelimited(dev, "%s: framebuffer memory overflow\n",
264 __func__);
265 goto nofb;
266 }
267
268 return 0;
269
270nofb:
271 memset(mode, 0, sizeof(*mode));
272 return -EINVAL;
273}
274
275static bool mbochs_modes_equal(struct mbochs_mode *mode1,
276 struct mbochs_mode *mode2)
277{
278 return memcmp(mode1, mode2, sizeof(struct mbochs_mode)) == 0;
279}
280
281static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
282 char *buf, u32 count)
283{
284 struct device *dev = mdev_dev(mdev_state->mdev);
285 int index = (offset - PCI_BASE_ADDRESS_0) / 0x04;
286 u32 cfg_addr;
287
288 switch (offset) {
289 case PCI_BASE_ADDRESS_0:
290 case PCI_BASE_ADDRESS_2:
291 cfg_addr = *(u32 *)buf;
292
293 if (cfg_addr == 0xffffffff) {
294 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]);
295 } else {
296 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
297 if (cfg_addr)
298 dev_info(dev, "BAR #%d @ 0x%x\n",
299 index, cfg_addr);
300 }
301
302 cfg_addr |= (mdev_state->vconfig[offset] &
303 ~PCI_BASE_ADDRESS_MEM_MASK);
304 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
305 break;
306 }
307}
308
309static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset,
310 char *buf, u32 count)
311{
312 struct device *dev = mdev_dev(mdev_state->mdev);
313 int index;
314 u16 reg16;
315
316 switch (offset) {
317 case 0x400 ... 0x41f: /* vga ioports remapped */
318 goto unhandled;
319 case 0x500 ... 0x515: /* bochs dispi interface */
320 if (count != 2)
321 goto unhandled;
322 index = (offset - 0x500) / 2;
323 reg16 = *(u16 *)buf;
324 if (index < ARRAY_SIZE(mdev_state->vbe))
325 mdev_state->vbe[index] = reg16;
326 dev_dbg(dev, "%s: vbe write %d = %d (%s)\n",
327 __func__, index, reg16, vbe_name(index));
328 break;
329 case 0x600 ... 0x607: /* qemu extended regs */
330 goto unhandled;
331 default:
332unhandled:
333 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
334 __func__, offset, count);
335 break;
336 }
337}
338
339static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset,
340 char *buf, u32 count)
341{
342 struct device *dev = mdev_dev(mdev_state->mdev);
343 u16 reg16 = 0;
344 int index;
345
346 switch (offset) {
347 case 0x500 ... 0x515: /* bochs dispi interface */
348 if (count != 2)
349 goto unhandled;
350 index = (offset - 0x500) / 2;
351 if (index < ARRAY_SIZE(mdev_state->vbe))
352 reg16 = mdev_state->vbe[index];
353 dev_dbg(dev, "%s: vbe read %d = %d (%s)\n",
354 __func__, index, reg16, vbe_name(index));
355 *(u16 *)buf = reg16;
356 break;
357 default:
358unhandled:
359 dev_dbg(dev, "%s: @0x%03x, count %d (unhandled)\n",
360 __func__, offset, count);
361 memset(buf, 0, count);
362 break;
363 }
364}
365
366static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
367 loff_t pos, bool is_write)
368{
369 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
370 struct device *dev = mdev_dev(mdev);
371 struct page *pg;
372 loff_t poff;
373 char *map;
374 int ret = 0;
375
376 mutex_lock(&mdev_state->ops_lock);
377
378 if (pos < MBOCHS_CONFIG_SPACE_SIZE) {
379 if (is_write)
380 handle_pci_cfg_write(mdev_state, pos, buf, count);
381 else
382 memcpy(buf, (mdev_state->vconfig + pos), count);
383
384 } else if (pos >= MBOCHS_MMIO_BAR_OFFSET &&
385 pos + count <= MBOCHS_MEMORY_BAR_OFFSET) {
386 pos -= MBOCHS_MMIO_BAR_OFFSET;
387 if (is_write)
388 handle_mmio_write(mdev_state, pos, buf, count);
389 else
390 handle_mmio_read(mdev_state, pos, buf, count);
391
392 } else if (pos >= MBOCHS_MEMORY_BAR_OFFSET &&
393 pos + count <=
394 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
395 pos -= MBOCHS_MMIO_BAR_OFFSET;
396 poff = pos & ~PAGE_MASK;
397 pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
398 map = kmap(pg);
399 if (is_write)
400 memcpy(map + poff, buf, count);
401 else
402 memcpy(buf, map + poff, count);
403 kunmap(pg);
404 put_page(pg);
405
406 } else {
407 dev_dbg(dev, "%s: %s @0x%llx (unhandled)\n",
408 __func__, is_write ? "WR" : "RD", pos);
409 ret = -1;
410 goto accessfailed;
411 }
412
413 ret = count;
414
415
416accessfailed:
417 mutex_unlock(&mdev_state->ops_lock);
418
419 return ret;
420}
421
422static int mbochs_reset(struct mdev_device *mdev)
423{
424 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
425 u32 size64k = mdev_state->memsize / (64 * 1024);
426 int i;
427
428 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++)
429 mdev_state->vbe[i] = 0;
430 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5;
431 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k;
432 return 0;
433}
434
435static int mbochs_create(struct kobject *kobj, struct mdev_device *mdev)
436{
437 const struct mbochs_type *type = mbochs_find_type(kobj);
438 struct device *dev = mdev_dev(mdev);
439 struct mdev_state *mdev_state;
440
441 if (!type)
442 type = &mbochs_types[0];
443 if (type->mbytes + mbochs_used_mbytes > max_mbytes)
444 return -ENOMEM;
445
446 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
447 if (mdev_state == NULL)
448 return -ENOMEM;
449
450 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL);
451 if (mdev_state->vconfig == NULL)
452 goto err_mem;
453
454 mdev_state->memsize = type->mbytes * 1024 * 1024;
455 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT;
456 mdev_state->pages = kcalloc(mdev_state->pagecount,
457 sizeof(struct page *),
458 GFP_KERNEL);
459 if (!mdev_state->pages)
460 goto err_mem;
461
462 dev_info(dev, "%s: %s, %d MB, %ld pages\n", __func__,
463 kobj->name, type->mbytes, mdev_state->pagecount);
464
465 mutex_init(&mdev_state->ops_lock);
466 mdev_state->mdev = mdev;
467 mdev_set_drvdata(mdev, mdev_state);
468 INIT_LIST_HEAD(&mdev_state->dmabufs);
469 mdev_state->next_id = 1;
470
471 mdev_state->type = type;
472 mbochs_create_config_space(mdev_state);
473 mbochs_reset(mdev);
474
475 mbochs_used_mbytes += type->mbytes;
476 return 0;
477
478err_mem:
479 kfree(mdev_state->vconfig);
480 kfree(mdev_state);
481 return -ENOMEM;
482}
483
484static int mbochs_remove(struct mdev_device *mdev)
485{
486 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
487
488 mbochs_used_mbytes -= mdev_state->type->mbytes;
489 mdev_set_drvdata(mdev, NULL);
490 kfree(mdev_state->pages);
491 kfree(mdev_state->vconfig);
492 kfree(mdev_state);
493 return 0;
494}
495
496static ssize_t mbochs_read(struct mdev_device *mdev, char __user *buf,
497 size_t count, loff_t *ppos)
498{
499 unsigned int done = 0;
500 int ret;
501
502 while (count) {
503 size_t filled;
504
505 if (count >= 4 && !(*ppos % 4)) {
506 u32 val;
507
508 ret = mdev_access(mdev, (char *)&val, sizeof(val),
509 *ppos, false);
510 if (ret <= 0)
511 goto read_err;
512
513 if (copy_to_user(buf, &val, sizeof(val)))
514 goto read_err;
515
516 filled = 4;
517 } else if (count >= 2 && !(*ppos % 2)) {
518 u16 val;
519
520 ret = mdev_access(mdev, (char *)&val, sizeof(val),
521 *ppos, false);
522 if (ret <= 0)
523 goto read_err;
524
525 if (copy_to_user(buf, &val, sizeof(val)))
526 goto read_err;
527
528 filled = 2;
529 } else {
530 u8 val;
531
532 ret = mdev_access(mdev, (char *)&val, sizeof(val),
533 *ppos, false);
534 if (ret <= 0)
535 goto read_err;
536
537 if (copy_to_user(buf, &val, sizeof(val)))
538 goto read_err;
539
540 filled = 1;
541 }
542
543 count -= filled;
544 done += filled;
545 *ppos += filled;
546 buf += filled;
547 }
548
549 return done;
550
551read_err:
552 return -EFAULT;
553}
554
555static ssize_t mbochs_write(struct mdev_device *mdev, const char __user *buf,
556 size_t count, loff_t *ppos)
557{
558 unsigned int done = 0;
559 int ret;
560
561 while (count) {
562 size_t filled;
563
564 if (count >= 4 && !(*ppos % 4)) {
565 u32 val;
566
567 if (copy_from_user(&val, buf, sizeof(val)))
568 goto write_err;
569
570 ret = mdev_access(mdev, (char *)&val, sizeof(val),
571 *ppos, true);
572 if (ret <= 0)
573 goto write_err;
574
575 filled = 4;
576 } else if (count >= 2 && !(*ppos % 2)) {
577 u16 val;
578
579 if (copy_from_user(&val, buf, sizeof(val)))
580 goto write_err;
581
582 ret = mdev_access(mdev, (char *)&val, sizeof(val),
583 *ppos, true);
584 if (ret <= 0)
585 goto write_err;
586
587 filled = 2;
588 } else {
589 u8 val;
590
591 if (copy_from_user(&val, buf, sizeof(val)))
592 goto write_err;
593
594 ret = mdev_access(mdev, (char *)&val, sizeof(val),
595 *ppos, true);
596 if (ret <= 0)
597 goto write_err;
598
599 filled = 1;
600 }
601 count -= filled;
602 done += filled;
603 *ppos += filled;
604 buf += filled;
605 }
606
607 return done;
608write_err:
609 return -EFAULT;
610}
611
612static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
613 pgoff_t pgoff)
614{
615 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
616
617 if (!mdev_state->pages[pgoff]) {
618 mdev_state->pages[pgoff] =
619 alloc_pages(GFP_HIGHUSER | __GFP_ZERO, 0);
620 if (!mdev_state->pages[pgoff])
621 return NULL;
622 }
623
624 get_page(mdev_state->pages[pgoff]);
625 return mdev_state->pages[pgoff];
626}
627
628static struct page *mbochs_get_page(struct mdev_state *mdev_state,
629 pgoff_t pgoff)
630{
631 struct page *page;
632
633 if (WARN_ON(pgoff >= mdev_state->pagecount))
634 return NULL;
635
636 mutex_lock(&mdev_state->ops_lock);
637 page = __mbochs_get_page(mdev_state, pgoff);
638 mutex_unlock(&mdev_state->ops_lock);
639
640 return page;
641}
642
643static void mbochs_put_pages(struct mdev_state *mdev_state)
644{
645 struct device *dev = mdev_dev(mdev_state->mdev);
646 int i, count = 0;
647
648 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
649
650 for (i = 0; i < mdev_state->pagecount; i++) {
651 if (!mdev_state->pages[i])
652 continue;
653 put_page(mdev_state->pages[i]);
654 mdev_state->pages[i] = NULL;
655 count++;
656 }
657 dev_dbg(dev, "%s: %d pages released\n", __func__, count);
658}
659
660static int mbochs_region_vm_fault(struct vm_fault *vmf)
661{
662 struct vm_area_struct *vma = vmf->vma;
663 struct mdev_state *mdev_state = vma->vm_private_data;
664 pgoff_t page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
665
666 if (page_offset >= mdev_state->pagecount)
667 return VM_FAULT_SIGBUS;
668
669 vmf->page = mbochs_get_page(mdev_state, page_offset);
670 if (!vmf->page)
671 return VM_FAULT_SIGBUS;
672
673 return 0;
674}
675
676static const struct vm_operations_struct mbochs_region_vm_ops = {
677 .fault = mbochs_region_vm_fault,
678};
679
680static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
681{
682 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
683
684 if (vma->vm_pgoff != MBOCHS_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
685 return -EINVAL;
686 if (vma->vm_end < vma->vm_start)
687 return -EINVAL;
688 if (vma->vm_end - vma->vm_start > mdev_state->memsize)
689 return -EINVAL;
690 if ((vma->vm_flags & VM_SHARED) == 0)
691 return -EINVAL;
692
693 vma->vm_ops = &mbochs_region_vm_ops;
694 vma->vm_private_data = mdev_state;
695 return 0;
696}
697
698static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
699{
700 struct vm_area_struct *vma = vmf->vma;
701 struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
702
703 if (WARN_ON(vmf->pgoff >= dmabuf->pagecount))
704 return VM_FAULT_SIGBUS;
705
706 vmf->page = dmabuf->pages[vmf->pgoff];
707 get_page(vmf->page);
708 return 0;
709}
710
711static const struct vm_operations_struct mbochs_dmabuf_vm_ops = {
712 .fault = mbochs_dmabuf_vm_fault,
713};
714
715static int mbochs_mmap_dmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
716{
717 struct mbochs_dmabuf *dmabuf = buf->priv;
718 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
719
720 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
721
722 if ((vma->vm_flags & VM_SHARED) == 0)
723 return -EINVAL;
724
725 vma->vm_ops = &mbochs_dmabuf_vm_ops;
726 vma->vm_private_data = dmabuf;
727 return 0;
728}
729
730static void mbochs_print_dmabuf(struct mbochs_dmabuf *dmabuf,
731 const char *prefix)
732{
733 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
734 u32 fourcc = dmabuf->mode.drm_format;
735
736 dev_dbg(dev, "%s/%d: %c%c%c%c, %dx%d, stride %d, off 0x%llx, size 0x%llx, pages %ld\n",
737 prefix, dmabuf->id,
738 fourcc ? ((fourcc >> 0) & 0xff) : '-',
739 fourcc ? ((fourcc >> 8) & 0xff) : '-',
740 fourcc ? ((fourcc >> 16) & 0xff) : '-',
741 fourcc ? ((fourcc >> 24) & 0xff) : '-',
742 dmabuf->mode.width, dmabuf->mode.height, dmabuf->mode.stride,
743 dmabuf->mode.offset, dmabuf->mode.size, dmabuf->pagecount);
744}
745
746static struct sg_table *mbochs_map_dmabuf(struct dma_buf_attachment *at,
747 enum dma_data_direction direction)
748{
749 struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
750 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
751 struct sg_table *sg;
752
753 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
754
755 sg = kzalloc(sizeof(*sg), GFP_KERNEL);
756 if (!sg)
757 goto err1;
758 if (sg_alloc_table_from_pages(sg, dmabuf->pages, dmabuf->pagecount,
759 0, dmabuf->mode.size, GFP_KERNEL) < 0)
760 goto err2;
761 if (!dma_map_sg(at->dev, sg->sgl, sg->nents, direction))
762 goto err3;
763
764 return sg;
765
766err3:
767 sg_free_table(sg);
768err2:
769 kfree(sg);
770err1:
771 return ERR_PTR(-ENOMEM);
772}
773
774static void mbochs_unmap_dmabuf(struct dma_buf_attachment *at,
775 struct sg_table *sg,
776 enum dma_data_direction direction)
777{
778 struct mbochs_dmabuf *dmabuf = at->dmabuf->priv;
779 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev);
780
781 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
782
783 sg_free_table(sg);
784 kfree(sg);
785}
786
787static void mbochs_release_dmabuf(struct dma_buf *buf)
788{
789 struct mbochs_dmabuf *dmabuf = buf->priv;
790 struct mdev_state *mdev_state = dmabuf->mdev_state;
791 struct device *dev = mdev_dev(mdev_state->mdev);
792 pgoff_t pg;
793
794 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
795
796 for (pg = 0; pg < dmabuf->pagecount; pg++)
797 put_page(dmabuf->pages[pg]);
798
799 mutex_lock(&mdev_state->ops_lock);
800 dmabuf->buf = NULL;
801 if (dmabuf->unlinked)
802 kfree(dmabuf);
803 mutex_unlock(&mdev_state->ops_lock);
804}
805
806static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf,
807 unsigned long page_num)
808{
809 struct mbochs_dmabuf *dmabuf = buf->priv;
810 struct page *page = dmabuf->pages[page_num];
811
812 return kmap_atomic(page);
813}
814
815static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
816{
817 struct mbochs_dmabuf *dmabuf = buf->priv;
818 struct page *page = dmabuf->pages[page_num];
819
820 return kmap(page);
821}
822
823static struct dma_buf_ops mbochs_dmabuf_ops = {
824 .map_dma_buf = mbochs_map_dmabuf,
825 .unmap_dma_buf = mbochs_unmap_dmabuf,
826 .release = mbochs_release_dmabuf,
827 .map_atomic = mbochs_kmap_atomic_dmabuf,
828 .map = mbochs_kmap_dmabuf,
829 .mmap = mbochs_mmap_dmabuf,
830};
831
832static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state,
833 struct mbochs_mode *mode)
834{
835 struct mbochs_dmabuf *dmabuf;
836 pgoff_t page_offset, pg;
837
838 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
839
840 dmabuf = kzalloc(sizeof(struct mbochs_dmabuf), GFP_KERNEL);
841 if (!dmabuf)
842 return NULL;
843
844 dmabuf->mode = *mode;
845 dmabuf->id = mdev_state->next_id++;
846 dmabuf->pagecount = DIV_ROUND_UP(mode->size, PAGE_SIZE);
847 dmabuf->pages = kcalloc(dmabuf->pagecount, sizeof(struct page *),
848 GFP_KERNEL);
849 if (!dmabuf->pages)
850 goto err_free_dmabuf;
851
852 page_offset = dmabuf->mode.offset >> PAGE_SHIFT;
853 for (pg = 0; pg < dmabuf->pagecount; pg++) {
854 dmabuf->pages[pg] = __mbochs_get_page(mdev_state,
855 page_offset + pg);
856 if (!dmabuf->pages[pg])
857 goto err_free_pages;
858 }
859
860 dmabuf->mdev_state = mdev_state;
861 list_add(&dmabuf->next, &mdev_state->dmabufs);
862
863 mbochs_print_dmabuf(dmabuf, __func__);
864 return dmabuf;
865
866err_free_pages:
867 while (pg > 0)
868 put_page(dmabuf->pages[--pg]);
869 kfree(dmabuf->pages);
870err_free_dmabuf:
871 kfree(dmabuf);
872 return NULL;
873}
874
875static struct mbochs_dmabuf *
876mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state,
877 struct mbochs_mode *mode)
878{
879 struct mbochs_dmabuf *dmabuf;
880
881 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
882
883 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
884 if (mbochs_modes_equal(&dmabuf->mode, mode))
885 return dmabuf;
886
887 return NULL;
888}
889
890static struct mbochs_dmabuf *
891mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id)
892{
893 struct mbochs_dmabuf *dmabuf;
894
895 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
896
897 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next)
898 if (dmabuf->id == id)
899 return dmabuf;
900
901 return NULL;
902}
903
904static int mbochs_dmabuf_export(struct mbochs_dmabuf *dmabuf)
905{
906 struct mdev_state *mdev_state = dmabuf->mdev_state;
907 struct device *dev = mdev_dev(mdev_state->mdev);
908 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
909 struct dma_buf *buf;
910
911 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock));
912
913 if (!IS_ALIGNED(dmabuf->mode.offset, PAGE_SIZE)) {
914 dev_info_ratelimited(dev, "%s: framebuffer not page-aligned\n",
915 __func__);
916 return -EINVAL;
917 }
918
919 exp_info.ops = &mbochs_dmabuf_ops;
920 exp_info.size = dmabuf->mode.size;
921 exp_info.priv = dmabuf;
922
923 buf = dma_buf_export(&exp_info);
924 if (IS_ERR(buf)) {
925 dev_info_ratelimited(dev, "%s: dma_buf_export failed: %ld\n",
926 __func__, PTR_ERR(buf));
927 return PTR_ERR(buf);
928 }
929
930 dmabuf->buf = buf;
931 dev_dbg(dev, "%s: %d\n", __func__, dmabuf->id);
932 return 0;
933}
934
935static int mbochs_get_region_info(struct mdev_device *mdev,
936 struct vfio_region_info *region_info,
937 u16 *cap_type_id, void **cap_type)
938{
939 struct mdev_state *mdev_state;
940
941 mdev_state = mdev_get_drvdata(mdev);
942 if (!mdev_state)
943 return -EINVAL;
944
945 if (region_info->index >= VFIO_PCI_NUM_REGIONS)
946 return -EINVAL;
947
948 switch (region_info->index) {
949 case VFIO_PCI_CONFIG_REGION_INDEX:
950 region_info->offset = 0;
951 region_info->size = MBOCHS_CONFIG_SPACE_SIZE;
952 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
953 VFIO_REGION_INFO_FLAG_WRITE);
954 break;
955 case VFIO_PCI_BAR0_REGION_INDEX:
956 region_info->offset = MBOCHS_MEMORY_BAR_OFFSET;
957 region_info->size = mdev_state->memsize;
958 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
959 VFIO_REGION_INFO_FLAG_WRITE |
960 VFIO_REGION_INFO_FLAG_MMAP);
961 break;
962 case VFIO_PCI_BAR2_REGION_INDEX:
963 region_info->offset = MBOCHS_MMIO_BAR_OFFSET;
964 region_info->size = MBOCHS_MMIO_BAR_SIZE;
965 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
966 VFIO_REGION_INFO_FLAG_WRITE);
967 break;
968 default:
969 region_info->size = 0;
970 region_info->offset = 0;
971 region_info->flags = 0;
972 }
973
974 return 0;
975}
976
977static int mbochs_get_irq_info(struct mdev_device *mdev,
978 struct vfio_irq_info *irq_info)
979{
980 irq_info->count = 0;
981 return 0;
982}
983
984static int mbochs_get_device_info(struct mdev_device *mdev,
985 struct vfio_device_info *dev_info)
986{
987 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
988 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
989 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
990 return 0;
991}
992
993static int mbochs_query_gfx_plane(struct mdev_device *mdev,
994 struct vfio_device_gfx_plane_info *plane)
995{
996 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
997 struct device *dev = mdev_dev(mdev);
998 struct mbochs_dmabuf *dmabuf;
999 struct mbochs_mode mode;
1000 int ret;
1001
1002 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
1003 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
1004 VFIO_GFX_PLANE_TYPE_DMABUF))
1005 return 0;
1006 return -EINVAL;
1007 }
1008
1009 if (plane->flags != VFIO_GFX_PLANE_TYPE_DMABUF)
1010 return -EINVAL;
1011
1012 plane->drm_format_mod = 0;
1013 plane->x_pos = 0;
1014 plane->y_pos = 0;
1015 plane->x_hot = 0;
1016 plane->y_hot = 0;
1017
1018 mutex_lock(&mdev_state->ops_lock);
1019
1020 ret = -EINVAL;
1021 if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY)
1022 ret = mbochs_check_framebuffer(mdev_state, &mode);
1023 if (ret < 0) {
1024 plane->drm_format = 0;
1025 plane->width = 0;
1026 plane->height = 0;
1027 plane->stride = 0;
1028 plane->size = 0;
1029 plane->dmabuf_id = 0;
1030 goto done;
1031 }
1032
1033 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode);
1034 if (!dmabuf)
1035 mbochs_dmabuf_alloc(mdev_state, &mode);
1036 if (!dmabuf) {
1037 mutex_unlock(&mdev_state->ops_lock);
1038 return -ENOMEM;
1039 }
1040
1041 plane->drm_format = dmabuf->mode.drm_format;
1042 plane->width = dmabuf->mode.width;
1043 plane->height = dmabuf->mode.height;
1044 plane->stride = dmabuf->mode.stride;
1045 plane->size = dmabuf->mode.size;
1046 plane->dmabuf_id = dmabuf->id;
1047
1048done:
1049 if (plane->drm_plane_type == DRM_PLANE_TYPE_PRIMARY &&
1050 mdev_state->active_id != plane->dmabuf_id) {
1051 dev_dbg(dev, "%s: primary: %d => %d\n", __func__,
1052 mdev_state->active_id, plane->dmabuf_id);
1053 mdev_state->active_id = plane->dmabuf_id;
1054 }
1055 mutex_unlock(&mdev_state->ops_lock);
1056 return 0;
1057}
1058
1059static int mbochs_get_gfx_dmabuf(struct mdev_device *mdev,
1060 u32 id)
1061{
1062 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1063 struct mbochs_dmabuf *dmabuf;
1064
1065 mutex_lock(&mdev_state->ops_lock);
1066
1067 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id);
1068 if (!dmabuf) {
1069 mutex_unlock(&mdev_state->ops_lock);
1070 return -ENOENT;
1071 }
1072
1073 if (!dmabuf->buf)
1074 mbochs_dmabuf_export(dmabuf);
1075
1076 mutex_unlock(&mdev_state->ops_lock);
1077
1078 if (!dmabuf->buf)
1079 return -EINVAL;
1080
1081 return dma_buf_fd(dmabuf->buf, 0);
1082}
1083
1084static long mbochs_ioctl(struct mdev_device *mdev, unsigned int cmd,
1085 unsigned long arg)
1086{
1087 int ret = 0;
1088 unsigned long minsz;
1089 struct mdev_state *mdev_state;
1090
1091 mdev_state = mdev_get_drvdata(mdev);
1092
1093 switch (cmd) {
1094 case VFIO_DEVICE_GET_INFO:
1095 {
1096 struct vfio_device_info info;
1097
1098 minsz = offsetofend(struct vfio_device_info, num_irqs);
1099
1100 if (copy_from_user(&info, (void __user *)arg, minsz))
1101 return -EFAULT;
1102
1103 if (info.argsz < minsz)
1104 return -EINVAL;
1105
1106 ret = mbochs_get_device_info(mdev, &info);
1107 if (ret)
1108 return ret;
1109
1110 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1111
1112 if (copy_to_user((void __user *)arg, &info, minsz))
1113 return -EFAULT;
1114
1115 return 0;
1116 }
1117 case VFIO_DEVICE_GET_REGION_INFO:
1118 {
1119 struct vfio_region_info info;
1120 u16 cap_type_id = 0;
1121 void *cap_type = NULL;
1122
1123 minsz = offsetofend(struct vfio_region_info, offset);
1124
1125 if (copy_from_user(&info, (void __user *)arg, minsz))
1126 return -EFAULT;
1127
1128 if (info.argsz < minsz)
1129 return -EINVAL;
1130
1131 ret = mbochs_get_region_info(mdev, &info, &cap_type_id,
1132 &cap_type);
1133 if (ret)
1134 return ret;
1135
1136 if (copy_to_user((void __user *)arg, &info, minsz))
1137 return -EFAULT;
1138
1139 return 0;
1140 }
1141
1142 case VFIO_DEVICE_GET_IRQ_INFO:
1143 {
1144 struct vfio_irq_info info;
1145
1146 minsz = offsetofend(struct vfio_irq_info, count);
1147
1148 if (copy_from_user(&info, (void __user *)arg, minsz))
1149 return -EFAULT;
1150
1151 if ((info.argsz < minsz) ||
1152 (info.index >= mdev_state->dev_info.num_irqs))
1153 return -EINVAL;
1154
1155 ret = mbochs_get_irq_info(mdev, &info);
1156 if (ret)
1157 return ret;
1158
1159 if (copy_to_user((void __user *)arg, &info, minsz))
1160 return -EFAULT;
1161
1162 return 0;
1163 }
1164
1165 case VFIO_DEVICE_QUERY_GFX_PLANE:
1166 {
1167 struct vfio_device_gfx_plane_info plane;
1168
1169 minsz = offsetofend(struct vfio_device_gfx_plane_info,
1170 region_index);
1171
1172 if (copy_from_user(&plane, (void __user *)arg, minsz))
1173 return -EFAULT;
1174
1175 if (plane.argsz < minsz)
1176 return -EINVAL;
1177
1178 ret = mbochs_query_gfx_plane(mdev, &plane);
1179 if (ret)
1180 return ret;
1181
1182 if (copy_to_user((void __user *)arg, &plane, minsz))
1183 return -EFAULT;
1184
1185 return 0;
1186 }
1187
1188 case VFIO_DEVICE_GET_GFX_DMABUF:
1189 {
1190 u32 dmabuf_id;
1191
1192 if (get_user(dmabuf_id, (__u32 __user *)arg))
1193 return -EFAULT;
1194
1195 return mbochs_get_gfx_dmabuf(mdev, dmabuf_id);
1196 }
1197
1198 case VFIO_DEVICE_SET_IRQS:
1199 return -EINVAL;
1200
1201 case VFIO_DEVICE_RESET:
1202 return mbochs_reset(mdev);
1203 }
1204 return -ENOTTY;
1205}
1206
1207static int mbochs_open(struct mdev_device *mdev)
1208{
1209 if (!try_module_get(THIS_MODULE))
1210 return -ENODEV;
1211
1212 return 0;
1213}
1214
1215static void mbochs_close(struct mdev_device *mdev)
1216{
1217 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1218 struct mbochs_dmabuf *dmabuf, *tmp;
1219
1220 mutex_lock(&mdev_state->ops_lock);
1221
1222 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) {
1223 list_del(&dmabuf->next);
1224 if (dmabuf->buf) {
1225 /* free in mbochs_release_dmabuf() */
1226 dmabuf->unlinked = true;
1227 } else {
1228 kfree(dmabuf);
1229 }
1230 }
1231 mbochs_put_pages(mdev_state);
1232
1233 mutex_unlock(&mdev_state->ops_lock);
1234 module_put(THIS_MODULE);
1235}
1236
1237static ssize_t
1238memory_show(struct device *dev, struct device_attribute *attr,
1239 char *buf)
1240{
1241 struct mdev_device *mdev = mdev_from_dev(dev);
1242 struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
1243
1244 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes);
1245}
1246static DEVICE_ATTR_RO(memory);
1247
1248static struct attribute *mdev_dev_attrs[] = {
1249 &dev_attr_memory.attr,
1250 NULL,
1251};
1252
1253static const struct attribute_group mdev_dev_group = {
1254 .name = "vendor",
1255 .attrs = mdev_dev_attrs,
1256};
1257
1258const struct attribute_group *mdev_dev_groups[] = {
1259 &mdev_dev_group,
1260 NULL,
1261};
1262
1263static ssize_t
1264name_show(struct kobject *kobj, struct device *dev, char *buf)
1265{
1266 return sprintf(buf, "%s\n", kobj->name);
1267}
1268MDEV_TYPE_ATTR_RO(name);
1269
1270static ssize_t
1271description_show(struct kobject *kobj, struct device *dev, char *buf)
1272{
1273 const struct mbochs_type *type = mbochs_find_type(kobj);
1274
1275 return sprintf(buf, "virtual display, %d MB video memory\n",
1276 type ? type->mbytes : 0);
1277}
1278MDEV_TYPE_ATTR_RO(description);
1279
1280static ssize_t
1281available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1282{
1283 const struct mbochs_type *type = mbochs_find_type(kobj);
1284 int count = (max_mbytes - mbochs_used_mbytes) / type->mbytes;
1285
1286 return sprintf(buf, "%d\n", count);
1287}
1288MDEV_TYPE_ATTR_RO(available_instances);
1289
1290static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1291 char *buf)
1292{
1293 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1294}
1295MDEV_TYPE_ATTR_RO(device_api);
1296
1297static struct attribute *mdev_types_attrs[] = {
1298 &mdev_type_attr_name.attr,
1299 &mdev_type_attr_description.attr,
1300 &mdev_type_attr_device_api.attr,
1301 &mdev_type_attr_available_instances.attr,
1302 NULL,
1303};
1304
1305static struct attribute_group mdev_type_group1 = {
1306 .name = MBOCHS_TYPE_1,
1307 .attrs = mdev_types_attrs,
1308};
1309
1310static struct attribute_group mdev_type_group2 = {
1311 .name = MBOCHS_TYPE_2,
1312 .attrs = mdev_types_attrs,
1313};
1314
1315static struct attribute_group mdev_type_group3 = {
1316 .name = MBOCHS_TYPE_3,
1317 .attrs = mdev_types_attrs,
1318};
1319
1320static struct attribute_group *mdev_type_groups[] = {
1321 &mdev_type_group1,
1322 &mdev_type_group2,
1323 &mdev_type_group3,
1324 NULL,
1325};
1326
1327static const struct mdev_parent_ops mdev_fops = {
1328 .owner = THIS_MODULE,
1329 .mdev_attr_groups = mdev_dev_groups,
1330 .supported_type_groups = mdev_type_groups,
1331 .create = mbochs_create,
1332 .remove = mbochs_remove,
1333 .open = mbochs_open,
1334 .release = mbochs_close,
1335 .read = mbochs_read,
1336 .write = mbochs_write,
1337 .ioctl = mbochs_ioctl,
1338 .mmap = mbochs_mmap,
1339};
1340
1341static const struct file_operations vd_fops = {
1342 .owner = THIS_MODULE,
1343};
1344
1345static void mbochs_device_release(struct device *dev)
1346{
1347 /* nothing */
1348}
1349
1350static int __init mbochs_dev_init(void)
1351{
1352 int ret = 0;
1353
1354 ret = alloc_chrdev_region(&mbochs_devt, 0, MINORMASK, MBOCHS_NAME);
1355 if (ret < 0) {
1356 pr_err("Error: failed to register mbochs_dev, err: %d\n", ret);
1357 return ret;
1358 }
1359 cdev_init(&mbochs_cdev, &vd_fops);
1360 cdev_add(&mbochs_cdev, mbochs_devt, MINORMASK);
1361 pr_info("%s: major %d\n", __func__, MAJOR(mbochs_devt));
1362
1363 mbochs_class = class_create(THIS_MODULE, MBOCHS_CLASS_NAME);
1364 if (IS_ERR(mbochs_class)) {
1365 pr_err("Error: failed to register mbochs_dev class\n");
1366 ret = PTR_ERR(mbochs_class);
1367 goto failed1;
1368 }
1369 mbochs_dev.class = mbochs_class;
1370 mbochs_dev.release = mbochs_device_release;
1371 dev_set_name(&mbochs_dev, "%s", MBOCHS_NAME);
1372
1373 ret = device_register(&mbochs_dev);
1374 if (ret)
1375 goto failed2;
1376
1377 ret = mdev_register_device(&mbochs_dev, &mdev_fops);
1378 if (ret)
1379 goto failed3;
1380
1381 return 0;
1382
1383failed3:
1384 device_unregister(&mbochs_dev);
1385failed2:
1386 class_destroy(mbochs_class);
1387failed1:
1388 cdev_del(&mbochs_cdev);
1389 unregister_chrdev_region(mbochs_devt, MINORMASK);
1390 return ret;
1391}
1392
1393static void __exit mbochs_dev_exit(void)
1394{
1395 mbochs_dev.bus = NULL;
1396 mdev_unregister_device(&mbochs_dev);
1397
1398 device_unregister(&mbochs_dev);
1399 cdev_del(&mbochs_cdev);
1400 unregister_chrdev_region(mbochs_devt, MINORMASK);
1401 class_destroy(mbochs_class);
1402 mbochs_class = NULL;
1403}
1404
1405module_init(mbochs_dev_init)
1406module_exit(mbochs_dev_exit)