blob: fd95deeb6133201824a8a1d5e335d570153bc2fb [file] [log] [blame]
Sahitya Tummala61e19252017-02-03 13:24:19 +05301/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
Nikhilesh Reddybc69c702015-06-01 16:08:32 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#define DRIVER_NAME "msm_sharedmem"
15#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
16
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/rwsem.h>
20#include <linux/slab.h>
21#include <linux/list.h>
22#include <linux/debugfs.h>
23#include <soc/qcom/msm_qmi_interface.h>
24#include "sharedmem_qmi.h"
25#include "remote_filesystem_access_v01.h"
26
27#define RFSA_SERVICE_INSTANCE_NUM 1
28#define SHARED_ADDR_ENTRY_NAME_MAX_LEN 10
29
30struct shared_addr_entry {
31 u32 id;
32 u64 address;
33 u32 size;
34 u64 request_count;
35 bool is_addr_dynamic;
36 char name[SHARED_ADDR_ENTRY_NAME_MAX_LEN + 1];
37};
38
39struct shared_addr_list {
40 struct list_head node;
41 struct shared_addr_entry entry;
42};
43
44static struct shared_addr_list list;
45
46static struct qmi_handle *sharedmem_qmi_svc_handle;
47static void sharedmem_qmi_svc_recv_msg(struct work_struct *work);
48static DECLARE_DELAYED_WORK(work_recv_msg, sharedmem_qmi_svc_recv_msg);
49static struct workqueue_struct *sharedmem_qmi_svc_workqueue;
50static struct dentry *dir_ent;
51
52static u32 rfsa_count;
53static u32 rmts_count;
54
55static DECLARE_RWSEM(sharedmem_list_lock); /* declare list lock semaphore */
56
57static struct work_struct sharedmem_qmi_init_work;
58
59static struct msg_desc rfsa_get_buffer_addr_req_desc = {
60 .max_msg_len = RFSA_GET_BUFF_ADDR_REQ_MSG_MAX_LEN_V01,
61 .msg_id = QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01,
62 .ei_array = rfsa_get_buff_addr_req_msg_v01_ei,
63};
64
65static struct msg_desc rfsa_get_buffer_addr_resp_desc = {
66 .max_msg_len = RFSA_GET_BUFF_ADDR_RESP_MSG_MAX_LEN_V01,
67 .msg_id = QMI_RFSA_GET_BUFF_ADDR_RESP_MSG_V01,
68 .ei_array = rfsa_get_buff_addr_resp_msg_v01_ei,
69};
70
71void sharedmem_qmi_add_entry(struct sharemem_qmi_entry *qmi_entry)
72{
73 struct shared_addr_list *list_entry;
74
75 list_entry = kzalloc(sizeof(*list_entry), GFP_KERNEL);
76
77 /* If we cannot add the entry log the failure and bail */
78 if (list_entry == NULL) {
79 pr_err("Alloc of new list entry failed\n");
80 return;
81 }
82
83 /* Copy as much of the client name that can fit in the entry. */
84 strlcpy(list_entry->entry.name, qmi_entry->client_name,
85 sizeof(list_entry->entry.name));
86
87 /* Setup the rest of the entry. */
88 list_entry->entry.id = qmi_entry->client_id;
89 list_entry->entry.address = qmi_entry->address;
90 list_entry->entry.size = qmi_entry->size;
91 list_entry->entry.is_addr_dynamic = qmi_entry->is_addr_dynamic;
92 list_entry->entry.request_count = 0;
93
94 down_write(&sharedmem_list_lock);
95 list_add_tail(&(list_entry->node), &(list.node));
96 up_write(&sharedmem_list_lock);
97 pr_debug("Added new entry to list\n");
98
99}
100
101static int get_buffer_for_client(u32 id, u32 size, u64 *address)
102{
103 int result = -ENOENT;
104 int client_found = 0;
105 struct list_head *curr_node;
106 struct shared_addr_list *list_entry;
107
108 if (size == 0)
109 return -ENOMEM;
110
111 down_read(&sharedmem_list_lock);
112
113 list_for_each(curr_node, &list.node) {
114 list_entry = list_entry(curr_node, struct shared_addr_list,
115 node);
116 if (list_entry->entry.id == id) {
117 if (list_entry->entry.size >= size) {
118 *address = list_entry->entry.address;
119 list_entry->entry.request_count++;
120 result = 0;
121 } else {
122 pr_err("Shared mem req too large for id=%u\n",
123 id);
124 result = -ENOMEM;
125 }
126 client_found = 1;
127 break;
128 }
129 }
130
131 up_read(&sharedmem_list_lock);
132
133 if (client_found != 1) {
134 pr_err("Unknown client id %u\n", id);
135 result = -ENOENT;
136 }
137 return result;
138}
139
140static int sharedmem_qmi_get_buffer(void *conn_h, void *req_handle, void *req)
141{
142 struct rfsa_get_buff_addr_req_msg_v01 *get_buffer_req;
143 struct rfsa_get_buff_addr_resp_msg_v01 get_buffer_resp;
144 int result;
145 u64 address = 0;
146
147 get_buffer_req = (struct rfsa_get_buff_addr_req_msg_v01 *)req;
148 pr_debug("req->client_id = 0x%X and req->size = %d\n",
149 get_buffer_req->client_id, get_buffer_req->size);
150
151 result = get_buffer_for_client(get_buffer_req->client_id,
152 get_buffer_req->size, &address);
153 if (result != 0)
154 return result;
155
156 if (address == 0) {
157 pr_err("Entry found for client id= 0x%X but address is zero\n",
158 get_buffer_req->client_id);
159 return -ENOMEM;
160 }
161
162 memset(&get_buffer_resp, 0, sizeof(get_buffer_resp));
163 get_buffer_resp.address_valid = 1;
164 get_buffer_resp.address = address;
165 get_buffer_resp.resp.result = QMI_RESULT_SUCCESS_V01;
166
167 result = qmi_send_resp_from_cb(sharedmem_qmi_svc_handle, conn_h,
168 req_handle,
169 &rfsa_get_buffer_addr_resp_desc,
170 &get_buffer_resp,
171 sizeof(get_buffer_resp));
172 return result;
173}
174
175
176static int sharedmem_qmi_connect_cb(struct qmi_handle *handle, void *conn_h)
177{
178 if (sharedmem_qmi_svc_handle != handle || !conn_h)
179 return -EINVAL;
180 return 0;
181}
182
183static int sharedmem_qmi_disconnect_cb(struct qmi_handle *handle, void *conn_h)
184{
185 if (sharedmem_qmi_svc_handle != handle || !conn_h)
186 return -EINVAL;
187 return 0;
188}
189
190static int sharedmem_qmi_req_desc_cb(unsigned int msg_id,
191 struct msg_desc **req_desc)
192{
193 int rc;
194
195 switch (msg_id) {
196 case QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01:
197 *req_desc = &rfsa_get_buffer_addr_req_desc;
198 rc = sizeof(struct rfsa_get_buff_addr_req_msg_v01);
199 break;
200
201 default:
202 rc = -ENOTSUPP;
203 break;
204 }
205 return rc;
206}
207
208static int sharedmem_qmi_req_cb(struct qmi_handle *handle, void *conn_h,
209 void *req_handle, unsigned int msg_id,
210 void *req)
211{
212 int rc = -ENOTSUPP;
213
214 if (sharedmem_qmi_svc_handle != handle || !conn_h)
215 return -EINVAL;
216
217 if (msg_id == QMI_RFSA_GET_BUFF_ADDR_REQ_MSG_V01)
218 rc = sharedmem_qmi_get_buffer(conn_h, req_handle, req);
219
220 return rc;
221}
222
223#define DEBUG_BUF_SIZE (2048)
224static char *debug_buffer;
225static u32 debug_data_size;
Sahitya Tummala61e19252017-02-03 13:24:19 +0530226static struct mutex dbg_buf_lock; /* mutex for debug_buffer */
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700227
228static ssize_t debug_read(struct file *file, char __user *buf,
229 size_t count, loff_t *file_pos)
230{
231 return simple_read_from_buffer(buf, count, file_pos, debug_buffer,
232 debug_data_size);
233}
234
235static u32 fill_debug_info(char *buffer, u32 buffer_size)
236{
237 u32 size = 0;
238 struct list_head *curr_node;
239 struct shared_addr_list *list_entry;
240
241 memset(buffer, 0, buffer_size);
242 size += scnprintf(buffer + size, buffer_size - size, "\n");
243
244 down_read(&sharedmem_list_lock);
245 list_for_each(curr_node, &list.node) {
246 list_entry = list_entry(curr_node, struct shared_addr_list,
247 node);
248 size += scnprintf(buffer + size, buffer_size - size,
249 "Client_name: %s\n", list_entry->entry.name);
250 size += scnprintf(buffer + size, buffer_size - size,
251 "Client_id: 0x%08X\n", list_entry->entry.id);
252 size += scnprintf(buffer + size, buffer_size - size,
253 "Buffer Size: 0x%08X (%d)\n",
254 list_entry->entry.size,
255 list_entry->entry.size);
256 size += scnprintf(buffer + size, buffer_size - size,
257 "Address: 0x%016llX\n",
258 list_entry->entry.address);
259 size += scnprintf(buffer + size, buffer_size - size,
260 "Address Allocation: %s\n",
261 (list_entry->entry.is_addr_dynamic ?
262 "Dynamic" : "Static"));
263 size += scnprintf(buffer + size, buffer_size - size,
264 "Request count: %llu\n",
265 list_entry->entry.request_count);
266 size += scnprintf(buffer + size, buffer_size - size, "\n\n");
267 }
268 up_read(&sharedmem_list_lock);
269
270 size += scnprintf(buffer + size, buffer_size - size,
271 "RFSA server start count = %u\n", rfsa_count);
272 size += scnprintf(buffer + size, buffer_size - size,
273 "RMTS server start count = %u\n", rmts_count);
274
275 size += scnprintf(buffer + size, buffer_size - size, "\n");
276 return size;
277}
278
279static int debug_open(struct inode *inode, struct file *file)
280{
281 u32 buffer_size;
282
Sahitya Tummala61e19252017-02-03 13:24:19 +0530283 mutex_lock(&dbg_buf_lock);
284 if (debug_buffer != NULL) {
285 mutex_unlock(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700286 return -EBUSY;
Sahitya Tummala61e19252017-02-03 13:24:19 +0530287 }
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700288 buffer_size = DEBUG_BUF_SIZE;
289 debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
Sahitya Tummala61e19252017-02-03 13:24:19 +0530290 if (debug_buffer == NULL) {
291 mutex_unlock(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700292 return -ENOMEM;
Sahitya Tummala61e19252017-02-03 13:24:19 +0530293 }
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700294 debug_data_size = fill_debug_info(debug_buffer, buffer_size);
Sahitya Tummala61e19252017-02-03 13:24:19 +0530295 mutex_unlock(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700296 return 0;
297}
298
299static int debug_close(struct inode *inode, struct file *file)
300{
Sahitya Tummala61e19252017-02-03 13:24:19 +0530301 mutex_lock(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700302 kfree(debug_buffer);
303 debug_buffer = NULL;
304 debug_data_size = 0;
Sahitya Tummala61e19252017-02-03 13:24:19 +0530305 mutex_unlock(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700306 return 0;
307}
308
309static const struct file_operations debug_ops = {
310 .read = debug_read,
311 .open = debug_open,
312 .release = debug_close,
313};
314
315static int rfsa_increment(void *data, u64 val)
316{
317 if (rfsa_count != ~0)
318 rfsa_count++;
319 return 0;
320}
321
322static int rmts_increment(void *data, u64 val)
323{
324 if (rmts_count != ~0)
325 rmts_count++;
326 return 0;
327}
328
329DEFINE_SIMPLE_ATTRIBUTE(rfsa_fops, NULL, rfsa_increment, "%llu\n");
330DEFINE_SIMPLE_ATTRIBUTE(rmts_fops, NULL, rmts_increment, "%llu\n");
331
332static void debugfs_init(void)
333{
334 struct dentry *f_ent;
335
Sahitya Tummala61e19252017-02-03 13:24:19 +0530336 mutex_init(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700337 dir_ent = debugfs_create_dir("rmt_storage", NULL);
338 if (IS_ERR(dir_ent)) {
339 pr_err("Failed to create debug_fs directory\n");
340 return;
341 }
342
343 f_ent = debugfs_create_file("info", 0400, dir_ent, NULL, &debug_ops);
344 if (IS_ERR(f_ent)) {
345 pr_err("Failed to create debug_fs info file\n");
346 return;
347 }
348
349 f_ent = debugfs_create_file("rfsa", 0200, dir_ent, NULL, &rfsa_fops);
350 if (IS_ERR(f_ent)) {
351 pr_err("Failed to create debug_fs rfsa file\n");
352 return;
353 }
354
355 f_ent = debugfs_create_file("rmts", 0200, dir_ent, NULL, &rmts_fops);
356 if (IS_ERR(f_ent)) {
357 pr_err("Failed to create debug_fs rmts file\n");
358 return;
359 }
360}
361
362static void debugfs_exit(void)
363{
364 debugfs_remove_recursive(dir_ent);
Sahitya Tummala61e19252017-02-03 13:24:19 +0530365 mutex_destroy(&dbg_buf_lock);
Nikhilesh Reddybc69c702015-06-01 16:08:32 -0700366}
367
368static void sharedmem_qmi_svc_recv_msg(struct work_struct *work)
369{
370 int rc;
371
372 do {
373 pr_debug("Notified about a Receive Event\n");
374 } while ((rc = qmi_recv_msg(sharedmem_qmi_svc_handle)) == 0);
375
376 if (rc != -ENOMSG)
377 pr_err("Error receiving message\n");
378}
379
380static void sharedmem_qmi_notify(struct qmi_handle *handle,
381 enum qmi_event_type event, void *priv)
382{
383 switch (event) {
384 case QMI_RECV_MSG:
385 queue_delayed_work(sharedmem_qmi_svc_workqueue,
386 &work_recv_msg, 0);
387 break;
388 default:
389 break;
390 }
391}
392
393static struct qmi_svc_ops_options sharedmem_qmi_ops_options = {
394 .version = 1,
395 .service_id = RFSA_SERVICE_ID_V01,
396 .service_vers = RFSA_SERVICE_VERS_V01,
397 .service_ins = RFSA_SERVICE_INSTANCE_NUM,
398 .connect_cb = sharedmem_qmi_connect_cb,
399 .disconnect_cb = sharedmem_qmi_disconnect_cb,
400 .req_desc_cb = sharedmem_qmi_req_desc_cb,
401 .req_cb = sharedmem_qmi_req_cb,
402};
403
404
405static void sharedmem_register_qmi(void)
406{
407 int rc;
408
409 sharedmem_qmi_svc_workqueue =
410 create_singlethread_workqueue("sharedmem_qmi_work");
411 if (!sharedmem_qmi_svc_workqueue)
412 return;
413
414 sharedmem_qmi_svc_handle = qmi_handle_create(sharedmem_qmi_notify,
415 NULL);
416 if (!sharedmem_qmi_svc_handle) {
417 pr_err("Creating sharedmem_qmi qmi handle failed\n");
418 destroy_workqueue(sharedmem_qmi_svc_workqueue);
419 return;
420 }
421 rc = qmi_svc_register(sharedmem_qmi_svc_handle,
422 &sharedmem_qmi_ops_options);
423 if (rc < 0) {
424 pr_err("Registering sharedmem_qmi failed %d\n", rc);
425 qmi_handle_destroy(sharedmem_qmi_svc_handle);
426 destroy_workqueue(sharedmem_qmi_svc_workqueue);
427 return;
428 }
429 pr_info("qmi init successful\n");
430}
431
432static void sharedmem_qmi_init_worker(struct work_struct *work)
433{
434 sharedmem_register_qmi();
435 debugfs_init();
436}
437
438int sharedmem_qmi_init(void)
439{
440 INIT_LIST_HEAD(&list.node);
441 INIT_WORK(&sharedmem_qmi_init_work, sharedmem_qmi_init_worker);
442 schedule_work(&sharedmem_qmi_init_work);
443 return 0;
444}
445
446void sharedmem_qmi_exit(void)
447{
448 qmi_svc_unregister(sharedmem_qmi_svc_handle);
449 flush_workqueue(sharedmem_qmi_svc_workqueue);
450 qmi_handle_destroy(sharedmem_qmi_svc_handle);
451 destroy_workqueue(sharedmem_qmi_svc_workqueue);
452 debugfs_exit();
453}