Greg Kroah-Hartman | b79c0f4 | 2017-11-07 14:58:47 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
David Binder | fa96c88 | 2017-08-22 13:27:27 -0400 | [diff] [blame] | 2 | /* |
Benjamin Romer | 6f14cc1 | 2015-07-16 12:40:48 -0400 | [diff] [blame] | 3 | * Copyright (C) 2010 - 2015 UNISYS CORPORATION |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 4 | * All rights reserved. |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 5 | */ |
| 6 | |
| 7 | /* |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 8 | * This provides s-Par channel communication primitives, which are |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 9 | * independent of the mechanism used to access the channel data. |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 10 | */ |
| 11 | |
David Kershner | 99c805f | 2015-06-15 23:31:56 -0400 | [diff] [blame] | 12 | #include <linux/uuid.h> |
Dan Williams | 3103dc0 | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 13 | #include <linux/io.h> |
David Kershner | eb6eb1e | 2017-09-27 13:14:29 -0400 | [diff] [blame] | 14 | #include <linux/slab.h> |
David Kershner | 93d3ad9 | 2017-12-07 12:11:07 -0500 | [diff] [blame] | 15 | #include <linux/visorbus.h> |
David Kershner | 99c805f | 2015-06-15 23:31:56 -0400 | [diff] [blame] | 16 | |
Laurent Navet | ae71909 | 2016-10-15 11:50:11 +0200 | [diff] [blame] | 17 | #include "visorbus_private.h" |
Don Zickus | 1fb3016 | 2015-05-13 13:22:18 -0400 | [diff] [blame] | 18 | #include "controlvmchannel.h" |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 19 | |
David Binder | b35fae7 | 2017-06-30 15:43:17 -0400 | [diff] [blame] | 20 | #define VISOR_DRV_NAME "visorchannel" |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 21 | |
Sameer Wadgaonkar | 2b8ec7d | 2017-05-19 16:17:46 -0400 | [diff] [blame] | 22 | #define VISOR_CONSOLEVIDEO_CHANNEL_GUID \ |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 23 | GUID_INIT(0x3cd6e705, 0xd6a2, 0x4aa5, \ |
| 24 | 0xad, 0x5c, 0x7b, 0x8, 0x88, 0x9d, 0xff, 0xe2) |
David Binder | c2a6010 | 2017-03-17 11:27:17 -0400 | [diff] [blame] | 25 | |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 26 | static const guid_t visor_video_guid = VISOR_CONSOLEVIDEO_CHANNEL_GUID; |
David Kershner | 99c805f | 2015-06-15 23:31:56 -0400 | [diff] [blame] | 27 | |
Bryan Thompson | 383df64 | 2014-12-05 17:09:25 -0500 | [diff] [blame] | 28 | struct visorchannel { |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 29 | u64 physaddr; |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 30 | ulong nbytes; |
Dan Williams | 3103dc0 | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 31 | void *mapped; |
David Kershner | 99c805f | 2015-06-15 23:31:56 -0400 | [diff] [blame] | 32 | bool requested; |
Benjamin Romer | 9fd1b95 | 2014-10-23 14:29:55 -0400 | [diff] [blame] | 33 | struct channel_header chan_hdr; |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 34 | guid_t guid; |
Sameer Wadgaonkar | 0496bed | 2017-06-30 15:43:07 -0400 | [diff] [blame] | 35 | /* |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 36 | * channel creator knows if more than one thread will be inserting or |
| 37 | * removing |
Sameer Wadgaonkar | 0496bed | 2017-06-30 15:43:07 -0400 | [diff] [blame] | 38 | */ |
| 39 | bool needs_lock; |
| 40 | /* protect head writes in chan_hdr */ |
| 41 | spinlock_t insert_lock; |
| 42 | /* protect tail writes in chan_hdr */ |
| 43 | spinlock_t remove_lock; |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 44 | guid_t type; |
| 45 | guid_t inst; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 46 | }; |
| 47 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 48 | void visorchannel_destroy(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 49 | { |
Benjamin Romer | fc11a55 | 2015-03-16 13:57:44 -0400 | [diff] [blame] | 50 | if (!channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 51 | return; |
David Kershner | a3b726c | 2017-09-27 13:14:25 -0400 | [diff] [blame] | 52 | |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 53 | if (channel->mapped) { |
Dan Williams | 3103dc0 | 2015-08-10 23:07:06 -0400 | [diff] [blame] | 54 | memunmap(channel->mapped); |
David Kershner | 99c805f | 2015-06-15 23:31:56 -0400 | [diff] [blame] | 55 | if (channel->requested) |
| 56 | release_mem_region(channel->physaddr, channel->nbytes); |
Jes Sorensen | 0dbb3fb | 2015-05-05 18:36:50 -0400 | [diff] [blame] | 57 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 58 | kfree(channel); |
| 59 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 60 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 61 | u64 visorchannel_get_physaddr(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 62 | { |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 63 | return channel->physaddr; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 64 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 65 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 66 | ulong visorchannel_get_nbytes(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 67 | { |
David Binder | 2b9bcf8 | 2016-04-04 23:31:32 -0400 | [diff] [blame] | 68 | return channel->nbytes; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 69 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 70 | |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 71 | char *visorchannel_guid_id(const guid_t *guid, char *s) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 72 | { |
Benjamin Romer | 90addb0 | 2014-05-06 09:58:23 -0400 | [diff] [blame] | 73 | sprintf(s, "%pUL", guid); |
| 74 | return s; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 75 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 76 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 77 | char *visorchannel_id(struct visorchannel *channel, char *s) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 78 | { |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 79 | return visorchannel_guid_id(&channel->guid, s); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 80 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 81 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 82 | char *visorchannel_zoneid(struct visorchannel *channel, char *s) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 83 | { |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 84 | return visorchannel_guid_id(&channel->chan_hdr.zone_guid, s); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 85 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 86 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 87 | u64 visorchannel_get_clientpartition(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 88 | { |
Benjamin Romer | a8a31f6 | 2014-10-23 14:29:56 -0400 | [diff] [blame] | 89 | return channel->chan_hdr.partition_handle; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 90 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 91 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 92 | int visorchannel_set_clientpartition(struct visorchannel *channel, |
| 93 | u64 partition_handle) |
Don Zickus | 4f6d8a9 | 2015-05-13 13:22:20 -0400 | [diff] [blame] | 94 | { |
| 95 | channel->chan_hdr.partition_handle = partition_handle; |
| 96 | return 0; |
| 97 | } |
Don Zickus | 4f6d8a9 | 2015-05-13 13:22:20 -0400 | [diff] [blame] | 98 | |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 99 | /** |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 100 | * visorchannel_get_guid() - queries the GUID of the designated channel |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 101 | * @channel: the channel to query |
| 102 | * |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 103 | * Return: the GUID of the provided channel |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 104 | */ |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 105 | const guid_t *visorchannel_get_guid(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 106 | { |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 107 | return &channel->guid; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 108 | } |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 109 | EXPORT_SYMBOL_GPL(visorchannel_get_guid); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 110 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 111 | int visorchannel_read(struct visorchannel *channel, ulong offset, void *dest, |
| 112 | ulong nbytes) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 113 | { |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 114 | if (offset + nbytes > channel->nbytes) |
Jes Sorensen | 36203e7 | 2015-05-05 18:36:55 -0400 | [diff] [blame] | 115 | return -EIO; |
Prarit Bhargava | 69141bb | 2015-05-05 18:36:18 -0400 | [diff] [blame] | 116 | |
Erik Arfvidson | 8c3c1e4 | 2016-11-21 12:15:42 -0500 | [diff] [blame] | 117 | memcpy(dest, channel->mapped + offset, nbytes); |
Jes Sorensen | 36203e7 | 2015-05-05 18:36:55 -0400 | [diff] [blame] | 118 | return 0; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 119 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 120 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 121 | int visorchannel_write(struct visorchannel *channel, ulong offset, void *dest, |
| 122 | ulong nbytes) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 123 | { |
Jes Sorensen | 0abb60c | 2015-05-05 18:36:56 -0400 | [diff] [blame] | 124 | size_t chdr_size = sizeof(struct channel_header); |
| 125 | size_t copy_size; |
Prarit Bhargava | 69141bb | 2015-05-05 18:36:18 -0400 | [diff] [blame] | 126 | |
Jes Sorensen | 434cbf2 | 2015-05-05 18:37:00 -0400 | [diff] [blame] | 127 | if (offset + nbytes > channel->nbytes) |
Jes Sorensen | ad44088 | 2015-05-05 18:36:54 -0400 | [diff] [blame] | 128 | return -EIO; |
| 129 | |
Jes Sorensen | 0abb60c | 2015-05-05 18:36:56 -0400 | [diff] [blame] | 130 | if (offset < chdr_size) { |
Jes Sorensen | 56df900 | 2015-06-16 09:13:33 -0400 | [diff] [blame] | 131 | copy_size = min(chdr_size - offset, nbytes); |
Tim Sell | d253058 | 2015-07-13 14:51:24 -0400 | [diff] [blame] | 132 | memcpy(((char *)(&channel->chan_hdr)) + offset, |
Erik Arfvidson | 8c3c1e4 | 2016-11-21 12:15:42 -0500 | [diff] [blame] | 133 | dest, copy_size); |
Jes Sorensen | 0abb60c | 2015-05-05 18:36:56 -0400 | [diff] [blame] | 134 | } |
Erik Arfvidson | 8c3c1e4 | 2016-11-21 12:15:42 -0500 | [diff] [blame] | 135 | memcpy(channel->mapped + offset, dest, nbytes); |
Jes Sorensen | ad44088 | 2015-05-05 18:36:54 -0400 | [diff] [blame] | 136 | return 0; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 137 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 138 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 139 | void *visorchannel_get_header(struct visorchannel *channel) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 140 | { |
David Binder | 5da77f3 | 2017-03-17 11:27:11 -0400 | [diff] [blame] | 141 | return &channel->chan_hdr; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 142 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 143 | |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 144 | /* |
| 145 | * Return offset of a specific SIGNAL_QUEUE_HEADER from the beginning of a |
| 146 | * channel header |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 147 | */ |
Colin Ian King | bc05a9c | 2017-09-01 11:08:10 +0100 | [diff] [blame] | 148 | static int sig_queue_offset(struct channel_header *chan_hdr, int q) |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 149 | { |
| 150 | return ((chan_hdr)->ch_space_offset + |
| 151 | ((q) * sizeof(struct signal_queue_header))); |
| 152 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 153 | |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 154 | /* |
| 155 | * Return offset of a specific queue entry (data) from the beginning of a |
| 156 | * channel header |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 157 | */ |
Colin Ian King | bc05a9c | 2017-09-01 11:08:10 +0100 | [diff] [blame] | 158 | static int sig_data_offset(struct channel_header *chan_hdr, int q, |
| 159 | struct signal_queue_header *sig_hdr, int slot) |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 160 | { |
| 161 | return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset + |
| 162 | (slot * sig_hdr->signal_size)); |
| 163 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 164 | |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 165 | /* |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 166 | * Write the contents of a specific field within a SIGNAL_QUEUE_HEADER back into |
| 167 | * host memory |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 168 | */ |
David Binder | c2a6010 | 2017-03-17 11:27:17 -0400 | [diff] [blame] | 169 | #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \ |
| 170 | visorchannel_write(channel, \ |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 171 | sig_queue_offset(&channel->chan_hdr, queue) + \ |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 172 | offsetof(struct signal_queue_header, FIELD), \ |
David Binder | c2a6010 | 2017-03-17 11:27:17 -0400 | [diff] [blame] | 173 | &((sig_hdr)->FIELD), \ |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 174 | sizeof((sig_hdr)->FIELD)) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 175 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 176 | static int sig_read_header(struct visorchannel *channel, u32 queue, |
| 177 | struct signal_queue_header *sig_hdr) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 178 | { |
Benjamin Romer | 0aca7844 | 2015-03-04 12:14:25 -0500 | [diff] [blame] | 179 | if (channel->chan_hdr.ch_space_offset < sizeof(struct channel_header)) |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 180 | return -EINVAL; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 181 | |
| 182 | /* Read the appropriate SIGNAL_QUEUE_HEADER into local memory. */ |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 183 | return visorchannel_read(channel, |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 184 | sig_queue_offset(&channel->chan_hdr, queue), |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 185 | sig_hdr, sizeof(struct signal_queue_header)); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 186 | } |
| 187 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 188 | static int sig_read_data(struct visorchannel *channel, u32 queue, |
| 189 | struct signal_queue_header *sig_hdr, u32 slot, |
| 190 | void *data) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 191 | { |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 192 | int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue, |
Prarit Bhargava | a4ed0ba | 2015-05-05 18:36:19 -0400 | [diff] [blame] | 193 | sig_hdr, slot); |
| 194 | |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 195 | return visorchannel_read(channel, signal_data_offset, |
| 196 | data, sig_hdr->signal_size); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 197 | } |
| 198 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 199 | static int sig_write_data(struct visorchannel *channel, u32 queue, |
| 200 | struct signal_queue_header *sig_hdr, u32 slot, |
| 201 | void *data) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 202 | { |
David Kershner | c061645 | 2017-08-30 13:36:15 -0400 | [diff] [blame] | 203 | int signal_data_offset = sig_data_offset(&channel->chan_hdr, queue, |
Prarit Bhargava | a4ed0ba | 2015-05-05 18:36:19 -0400 | [diff] [blame] | 204 | sig_hdr, slot); |
| 205 | |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 206 | return visorchannel_write(channel, signal_data_offset, |
| 207 | data, sig_hdr->signal_size); |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 208 | } |
| 209 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 210 | static int signalremove_inner(struct visorchannel *channel, u32 queue, |
| 211 | void *msg) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 212 | { |
Benjamin Romer | e0fed86 | 2014-10-23 14:30:03 -0400 | [diff] [blame] | 213 | struct signal_queue_header sig_hdr; |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 214 | int error; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 215 | |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 216 | error = sig_read_header(channel, queue, &sig_hdr); |
| 217 | if (error) |
| 218 | return error; |
David Kershner | 5d295bc | 2016-11-21 12:15:52 -0500 | [diff] [blame] | 219 | /* No signals to remove; have caller try again. */ |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 220 | if (sig_hdr.head == sig_hdr.tail) |
David Kershner | 5d295bc | 2016-11-21 12:15:52 -0500 | [diff] [blame] | 221 | return -EAGAIN; |
Benjamin Romer | 153cf71 | 2014-10-23 14:30:04 -0400 | [diff] [blame] | 222 | sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 223 | error = sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg); |
| 224 | if (error) |
| 225 | return error; |
Benjamin Romer | 153cf71 | 2014-10-23 14:30:04 -0400 | [diff] [blame] | 226 | sig_hdr.num_received++; |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 227 | /* |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 228 | * For each data field in SIGNAL_QUEUE_HEADER that was modified, update |
| 229 | * host memory. Required for channel sync. |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 230 | */ |
Sameer Wadgaonkar | 0496bed | 2017-06-30 15:43:07 -0400 | [diff] [blame] | 231 | mb(); |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 232 | error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail); |
| 233 | if (error) |
| 234 | return error; |
| 235 | error = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received); |
| 236 | if (error) |
| 237 | return error; |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 238 | return 0; |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 239 | } |
| 240 | |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 241 | /** |
| 242 | * visorchannel_signalremove() - removes a message from the designated |
| 243 | * channel/queue |
| 244 | * @channel: the channel the message will be removed from |
| 245 | * @queue: the queue the message will be removed from |
| 246 | * @msg: the message to remove |
| 247 | * |
David Binder | f621a96 | 2016-09-26 11:03:48 -0400 | [diff] [blame] | 248 | * Return: integer error code indicating the status of the removal |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 249 | */ |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 250 | int visorchannel_signalremove(struct visorchannel *channel, u32 queue, |
| 251 | void *msg) |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 252 | { |
David Binder | f621a96 | 2016-09-26 11:03:48 -0400 | [diff] [blame] | 253 | int rc; |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 254 | unsigned long flags; |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 255 | |
| 256 | if (channel->needs_lock) { |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 257 | spin_lock_irqsave(&channel->remove_lock, flags); |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 258 | rc = signalremove_inner(channel, queue, msg); |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 259 | spin_unlock_irqrestore(&channel->remove_lock, flags); |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 260 | } else { |
| 261 | rc = signalremove_inner(channel, queue, msg); |
| 262 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 263 | |
David Binder | f621a96 | 2016-09-26 11:03:48 -0400 | [diff] [blame] | 264 | return rc; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 265 | } |
| 266 | EXPORT_SYMBOL_GPL(visorchannel_signalremove); |
| 267 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 268 | static bool queue_empty(struct visorchannel *channel, u32 queue) |
Cathal Mullaney | f0208b7 | 2016-10-19 22:43:25 +0100 | [diff] [blame] | 269 | { |
| 270 | struct signal_queue_header sig_hdr; |
| 271 | |
| 272 | if (sig_read_header(channel, queue, &sig_hdr)) |
| 273 | return true; |
Cathal Mullaney | f0208b7 | 2016-10-19 22:43:25 +0100 | [diff] [blame] | 274 | return (sig_hdr.head == sig_hdr.tail); |
| 275 | } |
| 276 | |
David Binder | 7ec83df | 2017-02-21 12:53:20 -0500 | [diff] [blame] | 277 | /** |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 278 | * visorchannel_signalempty() - checks if the designated channel/queue contains |
| 279 | * any messages |
David Binder | 7ec83df | 2017-02-21 12:53:20 -0500 | [diff] [blame] | 280 | * @channel: the channel to query |
| 281 | * @queue: the queue in the channel to query |
| 282 | * |
| 283 | * Return: boolean indicating whether any messages in the designated |
| 284 | * channel/queue are present |
| 285 | */ |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 286 | bool visorchannel_signalempty(struct visorchannel *channel, u32 queue) |
Neil Horman | fdc792c | 2015-07-31 18:56:32 -0400 | [diff] [blame] | 287 | { |
Cathal Mullaney | f0208b7 | 2016-10-19 22:43:25 +0100 | [diff] [blame] | 288 | bool rc; |
| 289 | unsigned long flags; |
Neil Horman | fdc792c | 2015-07-31 18:56:32 -0400 | [diff] [blame] | 290 | |
Cathal Mullaney | f0208b7 | 2016-10-19 22:43:25 +0100 | [diff] [blame] | 291 | if (!channel->needs_lock) |
| 292 | return queue_empty(channel, queue); |
Cathal Mullaney | f0208b7 | 2016-10-19 22:43:25 +0100 | [diff] [blame] | 293 | spin_lock_irqsave(&channel->remove_lock, flags); |
| 294 | rc = queue_empty(channel, queue); |
| 295 | spin_unlock_irqrestore(&channel->remove_lock, flags); |
Neil Horman | fdc792c | 2015-07-31 18:56:32 -0400 | [diff] [blame] | 296 | return rc; |
| 297 | } |
| 298 | EXPORT_SYMBOL_GPL(visorchannel_signalempty); |
| 299 | |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 300 | static int signalinsert_inner(struct visorchannel *channel, u32 queue, |
| 301 | void *msg) |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 302 | { |
Benjamin Romer | e0fed86 | 2014-10-23 14:30:03 -0400 | [diff] [blame] | 303 | struct signal_queue_header sig_hdr; |
David Kershner | 9b2cae6 | 2017-04-18 16:55:07 -0400 | [diff] [blame] | 304 | int err; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 305 | |
David Kershner | 9b2cae6 | 2017-04-18 16:55:07 -0400 | [diff] [blame] | 306 | err = sig_read_header(channel, queue, &sig_hdr); |
| 307 | if (err) |
| 308 | return err; |
Janani Ravichandran | 90cb147 | 2016-02-09 14:02:04 -0500 | [diff] [blame] | 309 | sig_hdr.head = (sig_hdr.head + 1) % sig_hdr.max_slots; |
Benjamin Romer | 153cf71 | 2014-10-23 14:30:04 -0400 | [diff] [blame] | 310 | if (sig_hdr.head == sig_hdr.tail) { |
| 311 | sig_hdr.num_overflows++; |
David Kershner | 9b2cae6 | 2017-04-18 16:55:07 -0400 | [diff] [blame] | 312 | err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_overflows); |
| 313 | if (err) |
| 314 | return err; |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 315 | return -EIO; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 316 | } |
David Kershner | 9b2cae6 | 2017-04-18 16:55:07 -0400 | [diff] [blame] | 317 | err = sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg); |
| 318 | if (err) |
| 319 | return err; |
Benjamin Romer | 153cf71 | 2014-10-23 14:30:04 -0400 | [diff] [blame] | 320 | sig_hdr.num_sent++; |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 321 | /* |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 322 | * For each data field in SIGNAL_QUEUE_HEADER that was modified, update |
| 323 | * host memory. Required for channel sync. |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 324 | */ |
Sameer Wadgaonkar | 0496bed | 2017-06-30 15:43:07 -0400 | [diff] [blame] | 325 | mb(); |
David Kershner | 9b2cae6 | 2017-04-18 16:55:07 -0400 | [diff] [blame] | 326 | err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, head); |
| 327 | if (err) |
| 328 | return err; |
| 329 | err = SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent); |
| 330 | if (err) |
| 331 | return err; |
David Binder | 1306c42 | 2016-09-26 11:03:47 -0400 | [diff] [blame] | 332 | return 0; |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 333 | } |
| 334 | |
David Binder | 3a8bc4b | 2017-02-21 12:53:23 -0500 | [diff] [blame] | 335 | /* |
Sameer Wadgaonkar | 9047667 | 2017-09-27 13:14:44 -0400 | [diff] [blame] | 336 | * visorchannel_create() - creates the struct visorchannel abstraction for a |
| 337 | * data area in memory, but does NOT modify this data |
| 338 | * area |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 339 | * @physaddr: physical address of start of channel |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 340 | * @gfp: gfp_t to use when allocating memory for the data struct |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 341 | * @guid: GUID that identifies channel type; |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 342 | * @needs_lock: must specify true if you have multiple threads of execution |
| 343 | * that will be calling visorchannel methods of this |
| 344 | * visorchannel at the same time |
| 345 | * |
| 346 | * Return: pointer to visorchannel that was created if successful, |
| 347 | * otherwise NULL |
| 348 | */ |
Sameer Wadgaonkar | 9047667 | 2017-09-27 13:14:44 -0400 | [diff] [blame] | 349 | struct visorchannel *visorchannel_create(u64 physaddr, gfp_t gfp, |
| 350 | const guid_t *guid, bool needs_lock) |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 351 | { |
| 352 | struct visorchannel *channel; |
| 353 | int err; |
| 354 | size_t size = sizeof(struct channel_header); |
| 355 | |
| 356 | if (physaddr == 0) |
| 357 | return NULL; |
| 358 | |
| 359 | channel = kzalloc(sizeof(*channel), gfp); |
| 360 | if (!channel) |
| 361 | return NULL; |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 362 | channel->needs_lock = needs_lock; |
| 363 | spin_lock_init(&channel->insert_lock); |
| 364 | spin_lock_init(&channel->remove_lock); |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 365 | /* |
David Kershner | cbe7e02 | 2017-09-27 13:14:09 -0400 | [diff] [blame] | 366 | * Video driver constains the efi framebuffer so it will get a conflict |
| 367 | * resource when requesting its full mem region. Since we are only |
| 368 | * using the efi framebuffer for video we can ignore this. Remember that |
| 369 | * we haven't requested it so we don't try to release later on. |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 370 | */ |
David Binder | b35fae7 | 2017-06-30 15:43:17 -0400 | [diff] [blame] | 371 | channel->requested = request_mem_region(physaddr, size, VISOR_DRV_NAME); |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 372 | if (!channel->requested && !guid_equal(guid, &visor_video_guid)) |
David Kershner | 51646f4 | 2017-03-28 09:34:56 -0400 | [diff] [blame] | 373 | /* we only care about errors if this is not the video channel */ |
| 374 | goto err_destroy_channel; |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 375 | channel->mapped = memremap(physaddr, size, MEMREMAP_WB); |
| 376 | if (!channel->mapped) { |
| 377 | release_mem_region(physaddr, size); |
| 378 | goto err_destroy_channel; |
| 379 | } |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 380 | channel->physaddr = physaddr; |
| 381 | channel->nbytes = size; |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 382 | err = visorchannel_read(channel, 0, &channel->chan_hdr, size); |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 383 | if (err) |
| 384 | goto err_destroy_channel; |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 385 | size = (ulong)channel->chan_hdr.size; |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 386 | memunmap(channel->mapped); |
| 387 | if (channel->requested) |
| 388 | release_mem_region(channel->physaddr, channel->nbytes); |
| 389 | channel->mapped = NULL; |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 390 | channel->requested = request_mem_region(channel->physaddr, size, |
| 391 | VISOR_DRV_NAME); |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 392 | if (!channel->requested && !guid_equal(guid, &visor_video_guid)) |
David Kershner | 51646f4 | 2017-03-28 09:34:56 -0400 | [diff] [blame] | 393 | /* we only care about errors if this is not the video channel */ |
| 394 | goto err_destroy_channel; |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 395 | channel->mapped = memremap(channel->physaddr, size, MEMREMAP_WB); |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 396 | if (!channel->mapped) { |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 397 | release_mem_region(channel->physaddr, size); |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 398 | goto err_destroy_channel; |
| 399 | } |
David Kershner | d7f1589 | 2017-08-30 13:36:31 -0400 | [diff] [blame] | 400 | channel->nbytes = size; |
Andy Shevchenko | b32c5cb | 2017-08-22 13:26:54 -0400 | [diff] [blame] | 401 | guid_copy(&channel->guid, guid); |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 402 | return channel; |
| 403 | |
| 404 | err_destroy_channel: |
| 405 | visorchannel_destroy(channel); |
| 406 | return NULL; |
| 407 | } |
| 408 | |
David Kershner | 3995c5d | 2016-09-02 16:41:38 -0400 | [diff] [blame] | 409 | /** |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 410 | * visorchannel_signalinsert() - inserts a message into the designated |
| 411 | * channel/queue |
| 412 | * @channel: the channel the message will be added to |
| 413 | * @queue: the queue the message will be added to |
| 414 | * @msg: the message to insert |
| 415 | * |
David Binder | 264f7b8 | 2016-09-26 11:03:49 -0400 | [diff] [blame] | 416 | * Return: integer error code indicating the status of the insertion |
David Binder | e19674c | 2016-06-10 21:48:17 -0400 | [diff] [blame] | 417 | */ |
Charles Daniels | f230ba6 | 2017-07-17 16:17:18 -0400 | [diff] [blame] | 418 | int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, |
| 419 | void *msg) |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 420 | { |
David Binder | 264f7b8 | 2016-09-26 11:03:49 -0400 | [diff] [blame] | 421 | int rc; |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 422 | unsigned long flags; |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 423 | |
| 424 | if (channel->needs_lock) { |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 425 | spin_lock_irqsave(&channel->insert_lock, flags); |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 426 | rc = signalinsert_inner(channel, queue, msg); |
Tim Sell | 24ac107 | 2015-07-14 14:43:30 -0400 | [diff] [blame] | 427 | spin_unlock_irqrestore(&channel->insert_lock, flags); |
Zachary Warren | b12fdf7 | 2015-01-17 22:39:53 +1100 | [diff] [blame] | 428 | } else { |
| 429 | rc = signalinsert_inner(channel, queue, msg); |
| 430 | } |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 431 | |
David Binder | 264f7b8 | 2016-09-26 11:03:49 -0400 | [diff] [blame] | 432 | return rc; |
Ken Cox | e423812 | 2014-03-04 07:58:06 -0600 | [diff] [blame] | 433 | } |
| 434 | EXPORT_SYMBOL_GPL(visorchannel_signalinsert); |