blob: b831d9ee425e7d0feccadabab8e8e36202924880 [file] [log] [blame]
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/delay.h>
15#include <linux/diagchar.h>
16#include <linux/kmemleak.h>
17#include <linux/workqueue.h>
18#include <linux/uaccess.h>
19#include "diagchar.h"
20#include "diagfwd_cntl.h"
21#include "diag_masks.h"
22#include "diagfwd_peripheral.h"
23#include "diag_ipc_logging.h"
24
25#define ALL_EQUIP_ID 100
26#define ALL_SSID -1
27
28#define DIAG_SET_FEATURE_MASK(x) (feature_bytes[(x)/8] |= (1 << (x & 0x7)))
29
30#define diag_check_update(x) \
31 (!info || (info && (info->peripheral_mask & MD_PERIPHERAL_MASK(x)))) \
32
33struct diag_mask_info msg_mask;
34struct diag_mask_info msg_bt_mask;
35struct diag_mask_info log_mask;
36struct diag_mask_info event_mask;
37
38static const struct diag_ssid_range_t msg_mask_tbl[] = {
39 { .ssid_first = MSG_SSID_0, .ssid_last = MSG_SSID_0_LAST },
40 { .ssid_first = MSG_SSID_1, .ssid_last = MSG_SSID_1_LAST },
41 { .ssid_first = MSG_SSID_2, .ssid_last = MSG_SSID_2_LAST },
42 { .ssid_first = MSG_SSID_3, .ssid_last = MSG_SSID_3_LAST },
43 { .ssid_first = MSG_SSID_4, .ssid_last = MSG_SSID_4_LAST },
44 { .ssid_first = MSG_SSID_5, .ssid_last = MSG_SSID_5_LAST },
45 { .ssid_first = MSG_SSID_6, .ssid_last = MSG_SSID_6_LAST },
46 { .ssid_first = MSG_SSID_7, .ssid_last = MSG_SSID_7_LAST },
47 { .ssid_first = MSG_SSID_8, .ssid_last = MSG_SSID_8_LAST },
48 { .ssid_first = MSG_SSID_9, .ssid_last = MSG_SSID_9_LAST },
49 { .ssid_first = MSG_SSID_10, .ssid_last = MSG_SSID_10_LAST },
50 { .ssid_first = MSG_SSID_11, .ssid_last = MSG_SSID_11_LAST },
51 { .ssid_first = MSG_SSID_12, .ssid_last = MSG_SSID_12_LAST },
52 { .ssid_first = MSG_SSID_13, .ssid_last = MSG_SSID_13_LAST },
53 { .ssid_first = MSG_SSID_14, .ssid_last = MSG_SSID_14_LAST },
54 { .ssid_first = MSG_SSID_15, .ssid_last = MSG_SSID_15_LAST },
55 { .ssid_first = MSG_SSID_16, .ssid_last = MSG_SSID_16_LAST },
56 { .ssid_first = MSG_SSID_17, .ssid_last = MSG_SSID_17_LAST },
57 { .ssid_first = MSG_SSID_18, .ssid_last = MSG_SSID_18_LAST },
58 { .ssid_first = MSG_SSID_19, .ssid_last = MSG_SSID_19_LAST },
59 { .ssid_first = MSG_SSID_20, .ssid_last = MSG_SSID_20_LAST },
60 { .ssid_first = MSG_SSID_21, .ssid_last = MSG_SSID_21_LAST },
61 { .ssid_first = MSG_SSID_22, .ssid_last = MSG_SSID_22_LAST },
62 { .ssid_first = MSG_SSID_23, .ssid_last = MSG_SSID_23_LAST },
63 { .ssid_first = MSG_SSID_24, .ssid_last = MSG_SSID_24_LAST }
64};
65
66static int diag_apps_responds(void)
67{
68 /*
69 * Apps processor should respond to mask commands only if the
70 * Modem channel is up, the feature mask is received from Modem
71 * and if Modem supports Mask Centralization.
72 */
73 if (!chk_apps_only())
74 return 0;
75
76 if (driver->diagfwd_cntl[PERIPHERAL_MODEM] &&
77 driver->diagfwd_cntl[PERIPHERAL_MODEM]->ch_open &&
78 driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
79 if (driver->feature[PERIPHERAL_MODEM].mask_centralization)
80 return 1;
81 return 0;
82 }
83 return 1;
84}
85
86static void diag_send_log_mask_update(uint8_t peripheral, int equip_id)
87{
88 int i;
89 int err = 0;
90 int send_once = 0;
91 int header_len = sizeof(struct diag_ctrl_log_mask);
92 uint8_t *buf = NULL;
93 uint8_t *temp = NULL;
94 uint32_t mask_size = 0;
95 struct diag_ctrl_log_mask ctrl_pkt;
96 struct diag_mask_info *mask_info = NULL;
97 struct diag_log_mask_t *mask = NULL;
98
99 if (peripheral >= NUM_PERIPHERALS)
100 return;
101
102 if (!driver->diagfwd_cntl[peripheral] ||
103 !driver->diagfwd_cntl[peripheral]->ch_open) {
104 pr_debug("diag: In %s, control channel is not open, p: %d\n",
105 __func__, peripheral);
106 return;
107 }
108
109 if (driver->md_session_mask != 0 &&
110 driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral))
111 mask_info = driver->md_session_map[peripheral]->log_mask;
112 else
113 mask_info = &log_mask;
114
115 if (!mask_info)
116 return;
117
118 mask = (struct diag_log_mask_t *)mask_info->ptr;
119 buf = mask_info->update_buf;
120
121 switch (mask_info->status) {
122 case DIAG_CTRL_MASK_ALL_DISABLED:
123 ctrl_pkt.equip_id = 0;
124 ctrl_pkt.num_items = 0;
125 ctrl_pkt.log_mask_size = 0;
126 send_once = 1;
127 break;
128 case DIAG_CTRL_MASK_ALL_ENABLED:
129 ctrl_pkt.equip_id = 0;
130 ctrl_pkt.num_items = 0;
131 ctrl_pkt.log_mask_size = 0;
132 send_once = 1;
133 break;
134 case DIAG_CTRL_MASK_VALID:
135 send_once = 0;
136 break;
137 default:
138 pr_debug("diag: In %s, invalid log_mask status\n", __func__);
139 return;
140 }
141
142 mutex_lock(&mask_info->lock);
143 for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
144 if (equip_id != i && equip_id != ALL_EQUIP_ID)
145 continue;
146
147 mutex_lock(&mask->lock);
148 ctrl_pkt.cmd_type = DIAG_CTRL_MSG_LOG_MASK;
149 ctrl_pkt.stream_id = 1;
150 ctrl_pkt.status = mask_info->status;
151 if (mask_info->status == DIAG_CTRL_MASK_VALID) {
152 mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
153 ctrl_pkt.equip_id = i;
154 ctrl_pkt.num_items = mask->num_items_tools;
155 ctrl_pkt.log_mask_size = mask_size;
156 }
157 ctrl_pkt.data_len = LOG_MASK_CTRL_HEADER_LEN + mask_size;
158
159 if (header_len + mask_size > mask_info->update_buf_len) {
160 temp = krealloc(buf, header_len + mask_size,
161 GFP_KERNEL);
162 if (!temp) {
163 pr_err_ratelimited("diag: Unable to realloc log update buffer, new size: %d, equip_id: %d\n",
164 header_len + mask_size, equip_id);
165 mutex_unlock(&mask->lock);
166 break;
167 }
168 mask_info->update_buf = temp;
169 mask_info->update_buf_len = header_len + mask_size;
170 }
171
172 memcpy(buf, &ctrl_pkt, header_len);
173 if (mask_size > 0)
174 memcpy(buf + header_len, mask->ptr, mask_size);
175 mutex_unlock(&mask->lock);
176
177 DIAG_LOG(DIAG_DEBUG_MASKS,
178 "sending ctrl pkt to %d, e %d num_items %d size %d\n",
179 peripheral, i, ctrl_pkt.num_items,
180 ctrl_pkt.log_mask_size);
181
182 err = diagfwd_write(peripheral, TYPE_CNTL,
183 buf, header_len + mask_size);
184 if (err && err != -ENODEV)
185 pr_err_ratelimited("diag: Unable to send log masks to peripheral %d, equip_id: %d, err: %d\n",
186 peripheral, i, err);
187 if (send_once || equip_id != ALL_EQUIP_ID)
188 break;
189
190 }
191 mutex_unlock(&mask_info->lock);
192}
193
194static void diag_send_event_mask_update(uint8_t peripheral)
195{
196 uint8_t *buf = NULL;
197 uint8_t *temp = NULL;
198 struct diag_ctrl_event_mask header;
199 struct diag_mask_info *mask_info = NULL;
200 int num_bytes = EVENT_COUNT_TO_BYTES(driver->last_event_id);
201 int write_len = 0;
202 int err = 0;
203 int temp_len = 0;
204
205 if (num_bytes <= 0 || num_bytes > driver->event_mask_size) {
206 pr_debug("diag: In %s, invalid event mask length %d\n",
207 __func__, num_bytes);
208 return;
209 }
210
211 if (peripheral >= NUM_PERIPHERALS)
212 return;
213
214 if (!driver->diagfwd_cntl[peripheral] ||
215 !driver->diagfwd_cntl[peripheral]->ch_open) {
216 pr_debug("diag: In %s, control channel is not open, p: %d\n",
217 __func__, peripheral);
218 return;
219 }
220
221 if (driver->md_session_mask != 0 &&
222 (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
223 mask_info = driver->md_session_map[peripheral]->event_mask;
224 else
225 mask_info = &event_mask;
226
227 if (!mask_info)
228 return;
229
230 buf = mask_info->update_buf;
231 mutex_lock(&mask_info->lock);
232 header.cmd_type = DIAG_CTRL_MSG_EVENT_MASK;
233 header.stream_id = 1;
234 header.status = mask_info->status;
235
236 switch (mask_info->status) {
237 case DIAG_CTRL_MASK_ALL_DISABLED:
238 header.event_config = 0;
239 header.event_mask_size = 0;
240 break;
241 case DIAG_CTRL_MASK_ALL_ENABLED:
242 header.event_config = 1;
243 header.event_mask_size = 0;
244 break;
245 case DIAG_CTRL_MASK_VALID:
246 header.event_config = 1;
247 header.event_mask_size = num_bytes;
248 if (num_bytes + sizeof(header) > mask_info->update_buf_len) {
249 temp_len = num_bytes + sizeof(header);
250 temp = krealloc(buf, temp_len, GFP_KERNEL);
251 if (!temp) {
252 pr_err("diag: Unable to realloc event mask update buffer\n");
253 goto err;
254 } else {
255 mask_info->update_buf = temp;
256 mask_info->update_buf_len = temp_len;
257 }
258 }
259 memcpy(buf + sizeof(header), mask_info->ptr, num_bytes);
260 write_len += num_bytes;
261 break;
262 default:
263 pr_debug("diag: In %s, invalid status %d\n", __func__,
264 mask_info->status);
265 goto err;
266 }
267 header.data_len = EVENT_MASK_CTRL_HEADER_LEN + header.event_mask_size;
268 memcpy(buf, &header, sizeof(header));
269 write_len += sizeof(header);
270
271 err = diagfwd_write(peripheral, TYPE_CNTL, buf, write_len);
272 if (err && err != -ENODEV)
273 pr_err_ratelimited("diag: Unable to send event masks to peripheral %d\n",
274 peripheral);
275err:
276 mutex_unlock(&mask_info->lock);
277}
278
279static void diag_send_msg_mask_update(uint8_t peripheral, int first, int last)
280{
281 int i;
282 int err = 0;
283 int header_len = sizeof(struct diag_ctrl_msg_mask);
284 int temp_len = 0;
285 uint8_t *buf = NULL;
286 uint8_t *temp = NULL;
287 uint32_t mask_size = 0;
288 struct diag_mask_info *mask_info = NULL;
289 struct diag_msg_mask_t *mask = NULL;
290 struct diag_ctrl_msg_mask header;
291
292 if (peripheral >= NUM_PERIPHERALS)
293 return;
294
295 if (!driver->diagfwd_cntl[peripheral] ||
296 !driver->diagfwd_cntl[peripheral]->ch_open) {
297 pr_debug("diag: In %s, control channel is not open, p: %d\n",
298 __func__, peripheral);
299 return;
300 }
301
302 if (driver->md_session_mask != 0 &&
303 (driver->md_session_mask & MD_PERIPHERAL_MASK(peripheral)))
304 mask_info = driver->md_session_map[peripheral]->msg_mask;
305 else
306 mask_info = &msg_mask;
307
308 if (!mask_info)
309 return;
310
311 mask = (struct diag_msg_mask_t *)mask_info->ptr;
312 buf = mask_info->update_buf;
313 mutex_lock(&mask_info->lock);
314 switch (mask_info->status) {
315 case DIAG_CTRL_MASK_ALL_DISABLED:
316 mask_size = 0;
317 break;
318 case DIAG_CTRL_MASK_ALL_ENABLED:
319 mask_size = 1;
320 break;
321 case DIAG_CTRL_MASK_VALID:
322 break;
323 default:
324 pr_debug("diag: In %s, invalid status: %d\n", __func__,
325 mask_info->status);
326 goto err;
327 }
328
329 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
330 if (((first < mask->ssid_first) ||
331 (last > mask->ssid_last_tools)) && first != ALL_SSID) {
332 continue;
333 }
334
335 mutex_lock(&mask->lock);
336 if (mask_info->status == DIAG_CTRL_MASK_VALID) {
337 mask_size =
338 mask->ssid_last_tools - mask->ssid_first + 1;
339 temp_len = mask_size * sizeof(uint32_t);
340 if (temp_len + header_len <= mask_info->update_buf_len)
341 goto proceed;
342 temp = krealloc(mask_info->update_buf, temp_len,
343 GFP_KERNEL);
344 if (!temp) {
345 pr_err("diag: In %s, unable to realloc msg_mask update buffer\n",
346 __func__);
347 mask_size = (mask_info->update_buf_len -
348 header_len) / sizeof(uint32_t);
349 } else {
350 mask_info->update_buf = temp;
351 mask_info->update_buf_len = temp_len;
352 pr_debug("diag: In %s, successfully reallocated msg_mask update buffer to len: %d\n",
353 __func__, mask_info->update_buf_len);
354 }
355 } else if (mask_info->status == DIAG_CTRL_MASK_ALL_ENABLED) {
356 mask_size = 1;
357 }
358proceed:
359 header.cmd_type = DIAG_CTRL_MSG_F3_MASK;
360 header.status = mask_info->status;
361 header.stream_id = 1;
362 header.msg_mode = 0;
363 header.ssid_first = mask->ssid_first;
364 header.ssid_last = mask->ssid_last_tools;
365 header.msg_mask_size = mask_size;
366 mask_size *= sizeof(uint32_t);
367 header.data_len = MSG_MASK_CTRL_HEADER_LEN + mask_size;
368 memcpy(buf, &header, header_len);
369 if (mask_size > 0)
370 memcpy(buf + header_len, mask->ptr, mask_size);
371 mutex_unlock(&mask->lock);
372
373 err = diagfwd_write(peripheral, TYPE_CNTL, buf,
374 header_len + mask_size);
375 if (err && err != -ENODEV)
376 pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
377 peripheral);
378
379 if (first != ALL_SSID)
380 break;
381 }
382err:
383 mutex_unlock(&mask_info->lock);
384}
385
386static void diag_send_time_sync_update(uint8_t peripheral)
387{
388 struct diag_ctrl_msg_time_sync time_sync_msg;
389 int msg_size = sizeof(struct diag_ctrl_msg_time_sync);
390 int err = 0;
391
392 if (peripheral >= NUM_PERIPHERALS) {
393 pr_err("diag: In %s, Invalid peripheral, %d\n",
394 __func__, peripheral);
395 return;
396 }
397
398 if (!driver->diagfwd_cntl[peripheral] ||
399 !driver->diagfwd_cntl[peripheral]->ch_open) {
400 pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
401 __func__, peripheral, driver->diagfwd_cntl[peripheral]);
402 return;
403 }
404
405 mutex_lock(&driver->diag_cntl_mutex);
406 time_sync_msg.ctrl_pkt_id = DIAG_CTRL_MSG_TIME_SYNC_PKT;
407 time_sync_msg.ctrl_pkt_data_len = 5;
408 time_sync_msg.version = 1;
409 time_sync_msg.time_api = driver->uses_time_api;
410
411 err = diagfwd_write(peripheral, TYPE_CNTL, &time_sync_msg, msg_size);
412 if (err)
413 pr_err("diag: In %s, unable to write to peripheral: %d, type: %d, len: %d, err: %d\n",
414 __func__, peripheral, TYPE_CNTL,
415 msg_size, err);
416 mutex_unlock(&driver->diag_cntl_mutex);
417}
418
419static void diag_send_feature_mask_update(uint8_t peripheral)
420{
421 void *buf = driver->buf_feature_mask_update;
422 int header_size = sizeof(struct diag_ctrl_feature_mask);
423 uint8_t feature_bytes[FEATURE_MASK_LEN] = {0, 0};
424 struct diag_ctrl_feature_mask feature_mask;
425 int total_len = 0;
426 int err = 0;
427
428 if (peripheral >= NUM_PERIPHERALS) {
429 pr_err("diag: In %s, Invalid peripheral, %d\n",
430 __func__, peripheral);
431 return;
432 }
433
434 if (!driver->diagfwd_cntl[peripheral] ||
435 !driver->diagfwd_cntl[peripheral]->ch_open) {
436 pr_err("diag: In %s, control channel is not open, p: %d, %pK\n",
437 __func__, peripheral, driver->diagfwd_cntl[peripheral]);
438 return;
439 }
440
441 mutex_lock(&driver->diag_cntl_mutex);
442 /* send feature mask update */
443 feature_mask.ctrl_pkt_id = DIAG_CTRL_MSG_FEATURE;
444 feature_mask.ctrl_pkt_data_len = sizeof(uint32_t) + FEATURE_MASK_LEN;
445 feature_mask.feature_mask_len = FEATURE_MASK_LEN;
446 memcpy(buf, &feature_mask, header_size);
447 DIAG_SET_FEATURE_MASK(F_DIAG_FEATURE_MASK_SUPPORT);
448 DIAG_SET_FEATURE_MASK(F_DIAG_LOG_ON_DEMAND_APPS);
449 DIAG_SET_FEATURE_MASK(F_DIAG_STM);
450 DIAG_SET_FEATURE_MASK(F_DIAG_DCI_EXTENDED_HEADER_SUPPORT);
451 if (driver->supports_separate_cmdrsp)
452 DIAG_SET_FEATURE_MASK(F_DIAG_REQ_RSP_SUPPORT);
453 if (driver->supports_apps_hdlc_encoding)
454 DIAG_SET_FEATURE_MASK(F_DIAG_APPS_HDLC_ENCODE);
455 DIAG_SET_FEATURE_MASK(F_DIAG_MASK_CENTRALIZATION);
456 if (driver->supports_sockets)
457 DIAG_SET_FEATURE_MASK(F_DIAG_SOCKETS_ENABLED);
458
459 memcpy(buf + header_size, &feature_bytes, FEATURE_MASK_LEN);
460 total_len = header_size + FEATURE_MASK_LEN;
461
462 err = diagfwd_write(peripheral, TYPE_CNTL, buf, total_len);
463 if (err) {
464 pr_err_ratelimited("diag: In %s, unable to write feature mask to peripheral: %d, type: %d, len: %d, err: %d\n",
465 __func__, peripheral, TYPE_CNTL,
466 total_len, err);
467 mutex_unlock(&driver->diag_cntl_mutex);
468 return;
469 }
470 driver->feature[peripheral].sent_feature_mask = 1;
471 mutex_unlock(&driver->diag_cntl_mutex);
472}
473
474static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
475 unsigned char *dest_buf, int dest_len,
476 struct diag_md_session_t *info)
477{
478 int i;
479 int write_len = 0;
480 struct diag_msg_mask_t *mask_ptr = NULL;
481 struct diag_msg_ssid_query_t rsp;
482 struct diag_ssid_range_t ssid_range;
483 struct diag_mask_info *mask_info = NULL;
484
485 mask_info = (!info) ? &msg_mask : info->msg_mask;
486 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
487 !mask_info) {
488 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
489 __func__, src_buf, src_len, dest_buf, dest_len,
490 mask_info);
491 return -EINVAL;
492 }
493
494 if (!diag_apps_responds())
495 return 0;
496
497 rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
498 rsp.sub_cmd = DIAG_CMD_OP_GET_SSID_RANGE;
499 rsp.status = MSG_STATUS_SUCCESS;
500 rsp.padding = 0;
501 rsp.count = driver->msg_mask_tbl_count;
502 memcpy(dest_buf, &rsp, sizeof(rsp));
503 write_len += sizeof(rsp);
504
505 mask_ptr = (struct diag_msg_mask_t *)mask_info->ptr;
506 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask_ptr++) {
507 if (write_len + sizeof(ssid_range) > dest_len) {
508 pr_err("diag: In %s, Truncating response due to size limitations of rsp buffer\n",
509 __func__);
510 break;
511 }
512 ssid_range.ssid_first = mask_ptr->ssid_first;
513 ssid_range.ssid_last = mask_ptr->ssid_last_tools;
514 memcpy(dest_buf + write_len, &ssid_range, sizeof(ssid_range));
515 write_len += sizeof(ssid_range);
516 }
517
518 return write_len;
519}
520
521static int diag_cmd_get_build_mask(unsigned char *src_buf, int src_len,
522 unsigned char *dest_buf, int dest_len,
523 struct diag_md_session_t *info)
524{
525 int i = 0;
526 int write_len = 0;
527 int num_entries = 0;
528 int copy_len = 0;
529 struct diag_msg_mask_t *build_mask = NULL;
530 struct diag_build_mask_req_t *req = NULL;
531 struct diag_msg_build_mask_t rsp;
532
533 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
534 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
535 __func__, src_buf, src_len, dest_buf, dest_len);
536 return -EINVAL;
537 }
538
539 if (!diag_apps_responds())
540 return 0;
541
542 req = (struct diag_build_mask_req_t *)src_buf;
543 rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
544 rsp.sub_cmd = DIAG_CMD_OP_GET_BUILD_MASK;
545 rsp.ssid_first = req->ssid_first;
546 rsp.ssid_last = req->ssid_last;
547 rsp.status = MSG_STATUS_FAIL;
548 rsp.padding = 0;
549
550 build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
551 for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
552 if (build_mask->ssid_first != req->ssid_first)
553 continue;
554 num_entries = req->ssid_last - req->ssid_first + 1;
555 if (num_entries > build_mask->range) {
556 pr_warn("diag: In %s, truncating ssid range for ssid_first: %d ssid_last %d\n",
557 __func__, req->ssid_first, req->ssid_last);
558 num_entries = build_mask->range;
559 req->ssid_last = req->ssid_first + build_mask->range;
560 }
561 copy_len = num_entries * sizeof(uint32_t);
562 if (copy_len + sizeof(rsp) > dest_len)
563 copy_len = dest_len - sizeof(rsp);
564 memcpy(dest_buf + sizeof(rsp), build_mask->ptr, copy_len);
565 write_len += copy_len;
566 rsp.ssid_last = build_mask->ssid_last;
567 rsp.status = MSG_STATUS_SUCCESS;
568 break;
569 }
570 memcpy(dest_buf, &rsp, sizeof(rsp));
571 write_len += sizeof(rsp);
572
573 return write_len;
574}
575
576static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
577 unsigned char *dest_buf, int dest_len,
578 struct diag_md_session_t *info)
579{
580 int i;
581 int write_len = 0;
582 uint32_t mask_size = 0;
583 struct diag_msg_mask_t *mask = NULL;
584 struct diag_build_mask_req_t *req = NULL;
585 struct diag_msg_build_mask_t rsp;
586 struct diag_mask_info *mask_info = NULL;
587
588 mask_info = (!info) ? &msg_mask : info->msg_mask;
589 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
590 !mask_info) {
591 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
592 __func__, src_buf, src_len, dest_buf, dest_len,
593 mask_info);
594 return -EINVAL;
595 }
596
597 if (!diag_apps_responds())
598 return 0;
599
600 req = (struct diag_build_mask_req_t *)src_buf;
601 rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
602 rsp.sub_cmd = DIAG_CMD_OP_GET_MSG_MASK;
603 rsp.ssid_first = req->ssid_first;
604 rsp.ssid_last = req->ssid_last;
605 rsp.status = MSG_STATUS_FAIL;
606 rsp.padding = 0;
607
608 mask = (struct diag_msg_mask_t *)mask_info->ptr;
609 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
610 if ((req->ssid_first < mask->ssid_first) ||
611 (req->ssid_first > mask->ssid_last_tools)) {
612 continue;
613 }
614 mask_size = mask->range * sizeof(uint32_t);
615 /* Copy msg mask only till the end of the rsp buffer */
616 if (mask_size + sizeof(rsp) > dest_len)
617 mask_size = dest_len - sizeof(rsp);
618 memcpy(dest_buf + sizeof(rsp), mask->ptr, mask_size);
619 write_len += mask_size;
620 rsp.status = MSG_STATUS_SUCCESS;
621 break;
622 }
623 memcpy(dest_buf, &rsp, sizeof(rsp));
624 write_len += sizeof(rsp);
625
626 return write_len;
627}
628
629static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
630 unsigned char *dest_buf, int dest_len,
631 struct diag_md_session_t *info)
632{
633 int i;
634 int write_len = 0;
635 int header_len = sizeof(struct diag_msg_build_mask_t);
636 int found = 0;
637 uint32_t mask_size = 0;
638 uint32_t offset = 0;
639 struct diag_msg_mask_t *mask = NULL;
640 struct diag_msg_build_mask_t *req = NULL;
641 struct diag_msg_build_mask_t rsp;
642 struct diag_mask_info *mask_info = NULL;
643 struct diag_msg_mask_t *mask_next = NULL;
644 uint32_t *temp = NULL;
645
646 mask_info = (!info) ? &msg_mask : info->msg_mask;
647 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
648 !mask_info) {
649 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
650 __func__, src_buf, src_len, dest_buf, dest_len,
651 mask_info);
652 return -EINVAL;
653 }
654
655 req = (struct diag_msg_build_mask_t *)src_buf;
656
657 mutex_lock(&mask_info->lock);
658 mask = (struct diag_msg_mask_t *)mask_info->ptr;
659 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
660 if (i < (driver->msg_mask_tbl_count - 1)) {
661 mask_next = mask;
662 mask_next++;
663 } else
664 mask_next = NULL;
665
666 if ((req->ssid_first < mask->ssid_first) ||
667 (req->ssid_first > mask->ssid_first + MAX_SSID_PER_RANGE) ||
668 (mask_next && (req->ssid_first >= mask_next->ssid_first))) {
669 continue;
670 }
671 mask_next = NULL;
672 found = 1;
673 mutex_lock(&mask->lock);
674 mask_size = req->ssid_last - req->ssid_first + 1;
675 if (mask_size > MAX_SSID_PER_RANGE) {
676 pr_warn("diag: In %s, truncating ssid range, %d-%d to max allowed: %d\n",
677 __func__, mask->ssid_first, mask->ssid_last,
678 MAX_SSID_PER_RANGE);
679 mask_size = MAX_SSID_PER_RANGE;
680 mask->range_tools = MAX_SSID_PER_RANGE;
681 mask->ssid_last_tools =
682 mask->ssid_first + mask->range_tools;
683 }
684 if (req->ssid_last > mask->ssid_last_tools) {
685 pr_debug("diag: Msg SSID range mismatch\n");
686 if (mask_size != MAX_SSID_PER_RANGE)
687 mask->ssid_last_tools = req->ssid_last;
688 mask->range_tools =
689 mask->ssid_last_tools - mask->ssid_first + 1;
690 temp = krealloc(mask->ptr,
691 mask->range_tools * sizeof(uint32_t),
692 GFP_KERNEL);
693 if (!temp) {
694 pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
695 __func__, mask_size);
696 mutex_unlock(&mask->lock);
697 return -ENOMEM;
698 }
699 mask->ptr = temp;
700 }
701
702 offset = req->ssid_first - mask->ssid_first;
703 if (offset + mask_size > mask->range_tools) {
704 pr_err("diag: In %s, Not in msg mask range, mask_size: %d, offset: %d\n",
705 __func__, mask_size, offset);
706 mutex_unlock(&mask->lock);
707 break;
708 }
709 mask_size = mask_size * sizeof(uint32_t);
710 memcpy(mask->ptr + offset, src_buf + header_len, mask_size);
711 mutex_unlock(&mask->lock);
712 mask_info->status = DIAG_CTRL_MASK_VALID;
713 break;
714 }
715 mutex_unlock(&mask_info->lock);
716
717 if (diag_check_update(APPS_DATA))
718 diag_update_userspace_clients(MSG_MASKS_TYPE);
719
720 /*
721 * Apps processor must send the response to this command. Frame the
722 * response.
723 */
724 rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
725 rsp.sub_cmd = DIAG_CMD_OP_SET_MSG_MASK;
726 rsp.ssid_first = req->ssid_first;
727 rsp.ssid_last = req->ssid_last;
728 rsp.status = found;
729 rsp.padding = 0;
730 memcpy(dest_buf, &rsp, header_len);
731 write_len += header_len;
732 if (!found)
733 goto end;
734 if (mask_size + write_len > dest_len)
735 mask_size = dest_len - write_len;
736 memcpy(dest_buf + write_len, src_buf + header_len, mask_size);
737 write_len += mask_size;
738 for (i = 0; i < NUM_PERIPHERALS; i++) {
739 if (!diag_check_update(i))
740 continue;
741 diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
742 }
743end:
744 return write_len;
745}
746
747static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
748 unsigned char *dest_buf, int dest_len,
749 struct diag_md_session_t *info)
750{
751 int i;
752 int write_len = 0;
753 int header_len = sizeof(struct diag_msg_config_rsp_t);
754 struct diag_msg_config_rsp_t rsp;
755 struct diag_msg_config_rsp_t *req = NULL;
756 struct diag_msg_mask_t *mask = NULL;
757 struct diag_mask_info *mask_info = NULL;
758
759 mask_info = (!info) ? &msg_mask : info->msg_mask;
760 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
761 !mask_info) {
762 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
763 __func__, src_buf, src_len, dest_buf, dest_len,
764 mask_info);
765 return -EINVAL;
766 }
767
768 req = (struct diag_msg_config_rsp_t *)src_buf;
769
770 mask = (struct diag_msg_mask_t *)mask_info->ptr;
771 mutex_lock(&mask_info->lock);
772 mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
773 DIAG_CTRL_MASK_ALL_DISABLED;
774 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
775 mutex_lock(&mask->lock);
776 memset(mask->ptr, req->rt_mask,
777 mask->range * sizeof(uint32_t));
778 mutex_unlock(&mask->lock);
779 }
780 mutex_unlock(&mask_info->lock);
781
782 if (diag_check_update(APPS_DATA))
783 diag_update_userspace_clients(MSG_MASKS_TYPE);
784
785 /*
786 * Apps processor must send the response to this command. Frame the
787 * response.
788 */
789 rsp.cmd_code = DIAG_CMD_MSG_CONFIG;
790 rsp.sub_cmd = DIAG_CMD_OP_SET_ALL_MSG_MASK;
791 rsp.status = MSG_STATUS_SUCCESS;
792 rsp.padding = 0;
793 rsp.rt_mask = req->rt_mask;
794 memcpy(dest_buf, &rsp, header_len);
795 write_len += header_len;
796
797 for (i = 0; i < NUM_PERIPHERALS; i++) {
798 if (!diag_check_update(i))
799 continue;
800 diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
801 }
802
803 return write_len;
804}
805
806static int diag_cmd_get_event_mask(unsigned char *src_buf, int src_len,
807 unsigned char *dest_buf, int dest_len,
808 struct diag_md_session_t *info)
809{
810 int write_len = 0;
811 uint32_t mask_size;
812 struct diag_event_mask_config_t rsp;
813
814 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0) {
815 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
816 __func__, src_buf, src_len, dest_buf, dest_len);
817 return -EINVAL;
818 }
819
820 if (!diag_apps_responds())
821 return 0;
822
823 mask_size = EVENT_COUNT_TO_BYTES(driver->last_event_id);
824 if (mask_size + sizeof(rsp) > dest_len) {
825 pr_err("diag: In %s, invalid mask size: %d\n", __func__,
826 mask_size);
827 return -ENOMEM;
828 }
829
830 rsp.cmd_code = DIAG_CMD_GET_EVENT_MASK;
831 rsp.status = EVENT_STATUS_SUCCESS;
832 rsp.padding = 0;
833 rsp.num_bits = driver->last_event_id + 1;
834 memcpy(dest_buf, &rsp, sizeof(rsp));
835 write_len += sizeof(rsp);
836 memcpy(dest_buf + write_len, event_mask.ptr, mask_size);
837 write_len += mask_size;
838
839 return write_len;
840}
841
842static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
843 unsigned char *dest_buf, int dest_len,
844 struct diag_md_session_t *info)
845{
846 int i;
847 int write_len = 0;
848 int mask_len = 0;
849 int header_len = sizeof(struct diag_event_mask_config_t);
850 struct diag_event_mask_config_t rsp;
851 struct diag_event_mask_config_t *req;
852 struct diag_mask_info *mask_info = NULL;
853
854 mask_info = (!info) ? &event_mask : info->event_mask;
855 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
856 !mask_info) {
857 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
858 __func__, src_buf, src_len, dest_buf, dest_len,
859 mask_info);
860 return -EINVAL;
861 }
862
863 req = (struct diag_event_mask_config_t *)src_buf;
864 mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
865 if (mask_len <= 0 || mask_len > event_mask.mask_len) {
866 pr_err("diag: In %s, invalid event mask len: %d\n", __func__,
867 mask_len);
868 return -EIO;
869 }
870
871 mutex_lock(&mask_info->lock);
872 memcpy(mask_info->ptr, src_buf + header_len, mask_len);
873 mask_info->status = DIAG_CTRL_MASK_VALID;
874 mutex_unlock(&mask_info->lock);
875 if (diag_check_update(APPS_DATA))
876 diag_update_userspace_clients(EVENT_MASKS_TYPE);
877
878 /*
879 * Apps processor must send the response to this command. Frame the
880 * response.
881 */
882 rsp.cmd_code = DIAG_CMD_SET_EVENT_MASK;
883 rsp.status = EVENT_STATUS_SUCCESS;
884 rsp.padding = 0;
885 rsp.num_bits = driver->last_event_id + 1;
886 memcpy(dest_buf, &rsp, header_len);
887 write_len += header_len;
888 memcpy(dest_buf + write_len, mask_info->ptr, mask_len);
889 write_len += mask_len;
890
891 for (i = 0; i < NUM_PERIPHERALS; i++) {
892 if (!diag_check_update(i))
893 continue;
894 diag_send_event_mask_update(i);
895 }
896
897 return write_len;
898}
899
900static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
901 unsigned char *dest_buf, int dest_len,
902 struct diag_md_session_t *info)
903{
904 int i;
905 int write_len = 0;
906 uint8_t toggle = 0;
907 struct diag_event_report_t header;
908 struct diag_mask_info *mask_info = NULL;
909
910 mask_info = (!info) ? &event_mask : info->event_mask;
911 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
912 !mask_info) {
913 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
914 __func__, src_buf, src_len, dest_buf, dest_len,
915 mask_info);
916 return -EINVAL;
917 }
918
919 toggle = *(src_buf + 1);
920 mutex_lock(&mask_info->lock);
921 if (toggle) {
922 mask_info->status = DIAG_CTRL_MASK_ALL_ENABLED;
923 memset(mask_info->ptr, 0xFF, mask_info->mask_len);
924 } else {
925 mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
926 memset(mask_info->ptr, 0, mask_info->mask_len);
927 }
928 mutex_unlock(&mask_info->lock);
929 if (diag_check_update(APPS_DATA))
930 diag_update_userspace_clients(EVENT_MASKS_TYPE);
931
932 /*
933 * Apps processor must send the response to this command. Frame the
934 * response.
935 */
936 header.cmd_code = DIAG_CMD_EVENT_TOGGLE;
937 header.padding = 0;
938 for (i = 0; i < NUM_PERIPHERALS; i++) {
939 if (!diag_check_update(i))
940 continue;
941 diag_send_event_mask_update(i);
942 }
943 memcpy(dest_buf, &header, sizeof(header));
944 write_len += sizeof(header);
945
946 return write_len;
947}
948
949static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
950 unsigned char *dest_buf, int dest_len,
951 struct diag_md_session_t *info)
952{
953 int i;
954 int status = LOG_STATUS_INVALID;
955 int write_len = 0;
956 int read_len = 0;
957 int req_header_len = sizeof(struct diag_log_config_req_t);
958 int rsp_header_len = sizeof(struct diag_log_config_rsp_t);
959 uint32_t mask_size = 0;
960 struct diag_log_mask_t *log_item = NULL;
961 struct diag_log_config_req_t *req;
962 struct diag_log_config_rsp_t rsp;
963 struct diag_mask_info *mask_info = NULL;
964
965 mask_info = (!info) ? &log_mask : info->log_mask;
966 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
967 !mask_info) {
968 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
969 __func__, src_buf, src_len, dest_buf, dest_len,
970 mask_info);
971 return -EINVAL;
972 }
973
974 if (!diag_apps_responds())
975 return 0;
976
977 req = (struct diag_log_config_req_t *)src_buf;
978 read_len += req_header_len;
979
980 rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
981 rsp.padding[0] = 0;
982 rsp.padding[1] = 0;
983 rsp.padding[2] = 0;
984 rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_MASK;
985 /*
986 * Don't copy the response header now. Copy at the end after
987 * calculating the status field value
988 */
989 write_len += rsp_header_len;
990
991 log_item = (struct diag_log_mask_t *)mask_info->ptr;
992 for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
993 if (log_item->equip_id != req->equip_id)
994 continue;
995 mutex_lock(&log_item->lock);
996 mask_size = LOG_ITEMS_TO_SIZE(log_item->num_items_tools);
997 /*
998 * Make sure we have space to fill the response in the buffer.
999 * Destination buffer should atleast be able to hold equip_id
1000 * (uint32_t), num_items(uint32_t), mask (mask_size) and the
1001 * response header.
1002 */
1003 if ((mask_size + (2 * sizeof(uint32_t)) + rsp_header_len) >
1004 dest_len) {
1005 pr_err("diag: In %s, invalid length: %d, max rsp_len: %d\n",
1006 __func__, mask_size, dest_len);
1007 status = LOG_STATUS_FAIL;
1008 mutex_unlock(&log_item->lock);
1009 break;
1010 }
1011 *(uint32_t *)(dest_buf + write_len) = log_item->equip_id;
1012 write_len += sizeof(uint32_t);
1013 *(uint32_t *)(dest_buf + write_len) = log_item->num_items_tools;
1014 write_len += sizeof(uint32_t);
1015 if (mask_size > 0) {
1016 memcpy(dest_buf + write_len, log_item->ptr, mask_size);
1017 write_len += mask_size;
1018 }
1019 DIAG_LOG(DIAG_DEBUG_MASKS,
1020 "sending log e %d num_items %d size %d\n",
1021 log_item->equip_id, log_item->num_items_tools,
1022 log_item->range_tools);
1023 mutex_unlock(&log_item->lock);
1024 status = LOG_STATUS_SUCCESS;
1025 break;
1026 }
1027
1028 rsp.status = status;
1029 memcpy(dest_buf, &rsp, rsp_header_len);
1030
1031 return write_len;
1032}
1033
1034static int diag_cmd_get_log_range(unsigned char *src_buf, int src_len,
1035 unsigned char *dest_buf, int dest_len,
1036 struct diag_md_session_t *info)
1037{
1038 int i;
1039 int write_len = 0;
1040 struct diag_log_config_rsp_t rsp;
1041 struct diag_mask_info *mask_info = NULL;
1042 struct diag_log_mask_t *mask = (struct diag_log_mask_t *)log_mask.ptr;
1043
1044 if (!diag_apps_responds())
1045 return 0;
1046
1047 mask_info = (!info) ? &log_mask : info->log_mask;
1048 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
1049 !mask_info) {
1050 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
1051 __func__, src_buf, src_len, dest_buf, dest_len,
1052 mask_info);
1053 return -EINVAL;
1054 }
1055
1056 rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
1057 rsp.padding[0] = 0;
1058 rsp.padding[1] = 0;
1059 rsp.padding[2] = 0;
1060 rsp.sub_cmd = DIAG_CMD_OP_GET_LOG_RANGE;
1061 rsp.status = LOG_STATUS_SUCCESS;
1062 memcpy(dest_buf, &rsp, sizeof(rsp));
1063 write_len += sizeof(rsp);
1064
1065 for (i = 0; i < MAX_EQUIP_ID && write_len < dest_len; i++, mask++) {
1066 *(uint32_t *)(dest_buf + write_len) = mask->num_items_tools;
1067 write_len += sizeof(uint32_t);
1068 }
1069
1070 return write_len;
1071}
1072
1073static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
1074 unsigned char *dest_buf, int dest_len,
1075 struct diag_md_session_t *info)
1076{
1077 int i;
1078 int write_len = 0;
1079 int status = LOG_STATUS_SUCCESS;
1080 int read_len = 0;
1081 int payload_len = 0;
1082 int req_header_len = sizeof(struct diag_log_config_req_t);
1083 int rsp_header_len = sizeof(struct diag_log_config_set_rsp_t);
1084 uint32_t mask_size = 0;
1085 struct diag_log_config_req_t *req;
1086 struct diag_log_config_set_rsp_t rsp;
1087 struct diag_log_mask_t *mask = NULL;
1088 unsigned char *temp_buf = NULL;
1089 struct diag_mask_info *mask_info = NULL;
1090
1091 mask_info = (!info) ? &log_mask : info->log_mask;
1092 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
1093 !mask_info) {
1094 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
1095 __func__, src_buf, src_len, dest_buf, dest_len,
1096 mask_info);
1097 return -EINVAL;
1098 }
1099
1100 req = (struct diag_log_config_req_t *)src_buf;
1101 read_len += req_header_len;
1102 mask = (struct diag_log_mask_t *)mask_info->ptr;
1103
1104 if (req->equip_id >= MAX_EQUIP_ID) {
1105 pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
1106 __func__, req->equip_id);
1107 status = LOG_STATUS_INVALID;
1108 }
1109
1110 if (req->num_items == 0) {
1111 pr_err("diag: In %s, Invalid number of items in log mask request, equip_id: %d\n",
1112 __func__, req->equip_id);
1113 status = LOG_STATUS_INVALID;
1114 }
1115
1116 mutex_lock(&mask_info->lock);
1117 for (i = 0; i < MAX_EQUIP_ID && !status; i++, mask++) {
1118 if (mask->equip_id != req->equip_id)
1119 continue;
1120 mutex_lock(&mask->lock);
1121
1122 DIAG_LOG(DIAG_DEBUG_MASKS, "e: %d current: %d %d new: %d %d",
1123 mask->equip_id, mask->num_items_tools,
1124 mask->range_tools, req->num_items,
1125 LOG_ITEMS_TO_SIZE(req->num_items));
1126 /*
1127 * If the size of the log mask cannot fit into our
1128 * buffer, trim till we have space left in the buffer.
1129 * num_items should then reflect the items that we have
1130 * in our buffer.
1131 */
1132 mask->num_items_tools = (req->num_items > MAX_ITEMS_ALLOWED) ?
1133 MAX_ITEMS_ALLOWED : req->num_items;
1134 mask_size = LOG_ITEMS_TO_SIZE(mask->num_items_tools);
1135 memset(mask->ptr, 0, mask->range_tools);
1136 if (mask_size > mask->range_tools) {
1137 DIAG_LOG(DIAG_DEBUG_MASKS,
1138 "log range mismatch, e: %d old: %d new: %d\n",
1139 req->equip_id, mask->range_tools,
1140 LOG_ITEMS_TO_SIZE(mask->num_items_tools));
1141 /* Change in the mask reported by tools */
1142 temp_buf = krealloc(mask->ptr, mask_size, GFP_KERNEL);
1143 if (!temp_buf) {
1144 mask_info->status = DIAG_CTRL_MASK_INVALID;
1145 mutex_unlock(&mask->lock);
1146 break;
1147 }
1148 mask->ptr = temp_buf;
1149 memset(mask->ptr, 0, mask_size);
1150 mask->range_tools = mask_size;
1151 }
1152 req->num_items = mask->num_items_tools;
1153 if (mask_size > 0)
1154 memcpy(mask->ptr, src_buf + read_len, mask_size);
1155 DIAG_LOG(DIAG_DEBUG_MASKS,
1156 "copying log mask, e %d num %d range %d size %d\n",
1157 req->equip_id, mask->num_items_tools,
1158 mask->range_tools, mask_size);
1159 mutex_unlock(&mask->lock);
1160 mask_info->status = DIAG_CTRL_MASK_VALID;
1161 break;
1162 }
1163 mutex_unlock(&mask_info->lock);
1164 if (diag_check_update(APPS_DATA))
1165 diag_update_userspace_clients(LOG_MASKS_TYPE);
1166
1167 /*
1168 * Apps processor must send the response to this command. Frame the
1169 * response.
1170 */
1171 payload_len = LOG_ITEMS_TO_SIZE(req->num_items);
1172 if ((payload_len + rsp_header_len > dest_len) || (payload_len == 0)) {
1173 pr_err("diag: In %s, invalid length, payload_len: %d, header_len: %d, dest_len: %d\n",
1174 __func__, payload_len, rsp_header_len, dest_len);
1175 status = LOG_STATUS_FAIL;
1176 }
1177 rsp.cmd_code = DIAG_CMD_LOG_CONFIG;
1178 rsp.padding[0] = 0;
1179 rsp.padding[1] = 0;
1180 rsp.padding[2] = 0;
1181 rsp.sub_cmd = DIAG_CMD_OP_SET_LOG_MASK;
1182 rsp.status = status;
1183 rsp.equip_id = req->equip_id;
1184 rsp.num_items = req->num_items;
1185 memcpy(dest_buf, &rsp, rsp_header_len);
1186 write_len += rsp_header_len;
1187 if (status != LOG_STATUS_SUCCESS)
1188 goto end;
1189 memcpy(dest_buf + write_len, src_buf + read_len, payload_len);
1190 write_len += payload_len;
1191
1192 for (i = 0; i < NUM_PERIPHERALS; i++) {
1193 if (!diag_check_update(i))
1194 continue;
1195 diag_send_log_mask_update(i, req->equip_id);
1196 }
1197end:
1198 return write_len;
1199}
1200
1201static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
1202 unsigned char *dest_buf, int dest_len,
1203 struct diag_md_session_t *info)
1204{
1205 struct diag_mask_info *mask_info = NULL;
1206 struct diag_log_mask_t *mask = NULL;
1207 struct diag_log_config_rsp_t header;
1208 int write_len = 0;
1209 int i;
1210
1211 mask_info = (!info) ? &log_mask : info->log_mask;
1212 if (!src_buf || !dest_buf || src_len <= 0 || dest_len <= 0 ||
1213 !mask_info) {
1214 pr_err("diag: Invalid input in %s, src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d, mask_info: %pK\n",
1215 __func__, src_buf, src_len, dest_buf, dest_len,
1216 mask_info);
1217 return -EINVAL;
1218 }
1219
1220 mask = (struct diag_log_mask_t *)mask_info->ptr;
1221
1222 for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
1223 mutex_lock(&mask->lock);
1224 memset(mask->ptr, 0, mask->range);
1225 mutex_unlock(&mask->lock);
1226 }
1227 mask_info->status = DIAG_CTRL_MASK_ALL_DISABLED;
1228 if (diag_check_update(APPS_DATA))
1229 diag_update_userspace_clients(LOG_MASKS_TYPE);
1230
1231 /*
1232 * Apps processor must send the response to this command. Frame the
1233 * response.
1234 */
1235 header.cmd_code = DIAG_CMD_LOG_CONFIG;
1236 header.padding[0] = 0;
1237 header.padding[1] = 0;
1238 header.padding[2] = 0;
1239 header.sub_cmd = DIAG_CMD_OP_LOG_DISABLE;
1240 header.status = LOG_STATUS_SUCCESS;
1241 memcpy(dest_buf, &header, sizeof(struct diag_log_config_rsp_t));
1242 write_len += sizeof(struct diag_log_config_rsp_t);
1243 for (i = 0; i < NUM_PERIPHERALS; i++) {
1244 if (!diag_check_update(i))
1245 continue;
1246 diag_send_log_mask_update(i, ALL_EQUIP_ID);
1247 }
1248
1249 return write_len;
1250}
1251
1252int diag_create_msg_mask_table_entry(struct diag_msg_mask_t *msg_mask,
1253 struct diag_ssid_range_t *range)
1254{
1255 if (!msg_mask || !range)
1256 return -EIO;
1257 if (range->ssid_last < range->ssid_first)
1258 return -EINVAL;
1259 msg_mask->ssid_first = range->ssid_first;
1260 msg_mask->ssid_last = range->ssid_last;
1261 msg_mask->ssid_last_tools = range->ssid_last;
1262 msg_mask->range = msg_mask->ssid_last - msg_mask->ssid_first + 1;
1263 if (msg_mask->range < MAX_SSID_PER_RANGE)
1264 msg_mask->range = MAX_SSID_PER_RANGE;
1265 msg_mask->range_tools = msg_mask->range;
1266 mutex_init(&msg_mask->lock);
1267 if (msg_mask->range > 0) {
1268 msg_mask->ptr = kcalloc(msg_mask->range, sizeof(uint32_t),
1269 GFP_KERNEL);
1270 if (!msg_mask->ptr)
1271 return -ENOMEM;
1272 kmemleak_not_leak(msg_mask->ptr);
1273 }
1274 return 0;
1275}
1276
1277static int diag_create_msg_mask_table(void)
1278{
1279 int i;
1280 int err = 0;
1281 struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
1282 struct diag_ssid_range_t range;
1283
1284 mutex_lock(&msg_mask.lock);
1285 driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
1286 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
1287 range.ssid_first = msg_mask_tbl[i].ssid_first;
1288 range.ssid_last = msg_mask_tbl[i].ssid_last;
1289 err = diag_create_msg_mask_table_entry(mask, &range);
1290 if (err)
1291 break;
1292 }
1293 mutex_unlock(&msg_mask.lock);
1294 return err;
1295}
1296
1297static int diag_create_build_time_mask(void)
1298{
1299 int i;
1300 int err = 0;
1301 const uint32_t *tbl = NULL;
1302 uint32_t tbl_size = 0;
1303 struct diag_msg_mask_t *build_mask = NULL;
1304 struct diag_ssid_range_t range;
1305
1306 mutex_lock(&msg_bt_mask.lock);
1307 build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
1308 for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
1309 range.ssid_first = msg_mask_tbl[i].ssid_first;
1310 range.ssid_last = msg_mask_tbl[i].ssid_last;
1311 err = diag_create_msg_mask_table_entry(build_mask, &range);
1312 if (err)
1313 break;
1314 switch (build_mask->ssid_first) {
1315 case MSG_SSID_0:
1316 tbl = msg_bld_masks_0;
1317 tbl_size = sizeof(msg_bld_masks_0);
1318 break;
1319 case MSG_SSID_1:
1320 tbl = msg_bld_masks_1;
1321 tbl_size = sizeof(msg_bld_masks_1);
1322 break;
1323 case MSG_SSID_2:
1324 tbl = msg_bld_masks_2;
1325 tbl_size = sizeof(msg_bld_masks_2);
1326 break;
1327 case MSG_SSID_3:
1328 tbl = msg_bld_masks_3;
1329 tbl_size = sizeof(msg_bld_masks_3);
1330 break;
1331 case MSG_SSID_4:
1332 tbl = msg_bld_masks_4;
1333 tbl_size = sizeof(msg_bld_masks_4);
1334 break;
1335 case MSG_SSID_5:
1336 tbl = msg_bld_masks_5;
1337 tbl_size = sizeof(msg_bld_masks_5);
1338 break;
1339 case MSG_SSID_6:
1340 tbl = msg_bld_masks_6;
1341 tbl_size = sizeof(msg_bld_masks_6);
1342 break;
1343 case MSG_SSID_7:
1344 tbl = msg_bld_masks_7;
1345 tbl_size = sizeof(msg_bld_masks_7);
1346 break;
1347 case MSG_SSID_8:
1348 tbl = msg_bld_masks_8;
1349 tbl_size = sizeof(msg_bld_masks_8);
1350 break;
1351 case MSG_SSID_9:
1352 tbl = msg_bld_masks_9;
1353 tbl_size = sizeof(msg_bld_masks_9);
1354 break;
1355 case MSG_SSID_10:
1356 tbl = msg_bld_masks_10;
1357 tbl_size = sizeof(msg_bld_masks_10);
1358 break;
1359 case MSG_SSID_11:
1360 tbl = msg_bld_masks_11;
1361 tbl_size = sizeof(msg_bld_masks_11);
1362 break;
1363 case MSG_SSID_12:
1364 tbl = msg_bld_masks_12;
1365 tbl_size = sizeof(msg_bld_masks_12);
1366 break;
1367 case MSG_SSID_13:
1368 tbl = msg_bld_masks_13;
1369 tbl_size = sizeof(msg_bld_masks_13);
1370 break;
1371 case MSG_SSID_14:
1372 tbl = msg_bld_masks_14;
1373 tbl_size = sizeof(msg_bld_masks_14);
1374 break;
1375 case MSG_SSID_15:
1376 tbl = msg_bld_masks_15;
1377 tbl_size = sizeof(msg_bld_masks_15);
1378 break;
1379 case MSG_SSID_16:
1380 tbl = msg_bld_masks_16;
1381 tbl_size = sizeof(msg_bld_masks_16);
1382 break;
1383 case MSG_SSID_17:
1384 tbl = msg_bld_masks_17;
1385 tbl_size = sizeof(msg_bld_masks_17);
1386 break;
1387 case MSG_SSID_18:
1388 tbl = msg_bld_masks_18;
1389 tbl_size = sizeof(msg_bld_masks_18);
1390 break;
1391 case MSG_SSID_19:
1392 tbl = msg_bld_masks_19;
1393 tbl_size = sizeof(msg_bld_masks_19);
1394 break;
1395 case MSG_SSID_20:
1396 tbl = msg_bld_masks_20;
1397 tbl_size = sizeof(msg_bld_masks_20);
1398 break;
1399 case MSG_SSID_21:
1400 tbl = msg_bld_masks_21;
1401 tbl_size = sizeof(msg_bld_masks_21);
1402 break;
1403 case MSG_SSID_22:
1404 tbl = msg_bld_masks_22;
1405 tbl_size = sizeof(msg_bld_masks_22);
1406 break;
1407 }
1408 if (!tbl)
1409 continue;
1410 if (tbl_size > build_mask->range * sizeof(uint32_t)) {
1411 pr_warn("diag: In %s, table %d has more ssid than max, ssid_first: %d, ssid_last: %d\n",
1412 __func__, i, build_mask->ssid_first,
1413 build_mask->ssid_last);
1414 tbl_size = build_mask->range * sizeof(uint32_t);
1415 }
1416 memcpy(build_mask->ptr, tbl, tbl_size);
1417 }
1418 mutex_unlock(&msg_bt_mask.lock);
1419
1420 return err;
1421}
1422
1423static int diag_create_log_mask_table(void)
1424{
1425 struct diag_log_mask_t *mask = NULL;
1426 uint8_t i;
1427 int err = 0;
1428
1429 mutex_lock(&log_mask.lock);
1430 mask = (struct diag_log_mask_t *)(log_mask.ptr);
1431 for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
1432 mask->equip_id = i;
1433 mask->num_items = LOG_GET_ITEM_NUM(log_code_last_tbl[i]);
1434 mask->num_items_tools = mask->num_items;
1435 mutex_init(&mask->lock);
1436 if (LOG_ITEMS_TO_SIZE(mask->num_items) > MAX_ITEMS_PER_EQUIP_ID)
1437 mask->range = LOG_ITEMS_TO_SIZE(mask->num_items);
1438 else
1439 mask->range = MAX_ITEMS_PER_EQUIP_ID;
1440 mask->range_tools = mask->range;
1441 mask->ptr = kzalloc(mask->range, GFP_KERNEL);
1442 if (!mask->ptr) {
1443 err = -ENOMEM;
1444 break;
1445 }
1446 kmemleak_not_leak(mask->ptr);
1447 }
1448 mutex_unlock(&log_mask.lock);
1449 return err;
1450}
1451
1452static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
1453 int update_buf_len)
1454{
1455 if (!mask_info || mask_len < 0 || update_buf_len < 0)
1456 return -EINVAL;
1457
1458 mask_info->status = DIAG_CTRL_MASK_INVALID;
1459 mask_info->mask_len = mask_len;
1460 mask_info->update_buf_len = update_buf_len;
1461 if (mask_len > 0) {
1462 mask_info->ptr = kzalloc(mask_len, GFP_KERNEL);
1463 if (!mask_info->ptr)
1464 return -ENOMEM;
1465 kmemleak_not_leak(mask_info->ptr);
1466 }
1467 if (update_buf_len > 0) {
1468 mask_info->update_buf = kzalloc(update_buf_len, GFP_KERNEL);
1469 if (!mask_info->update_buf) {
1470 kfree(mask_info->ptr);
1471 return -ENOMEM;
1472 }
1473 kmemleak_not_leak(mask_info->update_buf);
1474 }
1475 mutex_init(&mask_info->lock);
1476 return 0;
1477}
1478
1479static void __diag_mask_exit(struct diag_mask_info *mask_info)
1480{
1481 if (!mask_info)
1482 return;
1483
1484 mutex_lock(&mask_info->lock);
1485 kfree(mask_info->ptr);
1486 mask_info->ptr = NULL;
1487 kfree(mask_info->update_buf);
1488 mask_info->update_buf = NULL;
1489 mutex_unlock(&mask_info->lock);
1490}
1491
1492int diag_log_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
1493{
1494 int i;
1495 int err = 0;
1496 struct diag_log_mask_t *src_mask = NULL;
1497 struct diag_log_mask_t *dest_mask = NULL;
1498
1499 if (!src)
1500 return -EINVAL;
1501
1502 err = __diag_mask_init(dest, LOG_MASK_SIZE, APPS_BUF_SIZE);
1503 if (err)
1504 return err;
1505
1506 mutex_lock(&dest->lock);
1507 src_mask = (struct diag_log_mask_t *)(src->ptr);
1508 dest_mask = (struct diag_log_mask_t *)(dest->ptr);
1509
1510 dest->mask_len = src->mask_len;
1511 dest->status = src->status;
1512
1513 for (i = 0; i < MAX_EQUIP_ID; i++, src_mask++, dest_mask++) {
1514 dest_mask->equip_id = src_mask->equip_id;
1515 dest_mask->num_items = src_mask->num_items;
1516 dest_mask->num_items_tools = src_mask->num_items_tools;
1517 mutex_init(&dest_mask->lock);
1518 dest_mask->range = src_mask->range;
1519 dest_mask->range_tools = src_mask->range_tools;
1520 dest_mask->ptr = kzalloc(dest_mask->range_tools, GFP_KERNEL);
1521 if (!dest_mask->ptr) {
1522 err = -ENOMEM;
1523 break;
1524 }
1525 kmemleak_not_leak(dest_mask->ptr);
1526 memcpy(dest_mask->ptr, src_mask->ptr, dest_mask->range_tools);
1527 }
1528 mutex_unlock(&dest->lock);
1529
1530 return err;
1531}
1532
1533void diag_log_mask_free(struct diag_mask_info *mask_info)
1534{
1535 int i;
1536 struct diag_log_mask_t *mask = NULL;
1537
1538 if (!mask_info)
1539 return;
1540
1541 mutex_lock(&mask_info->lock);
1542 mask = (struct diag_log_mask_t *)mask_info->ptr;
1543 for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
1544 kfree(mask->ptr);
1545 mask->ptr = NULL;
1546 }
1547 mutex_unlock(&mask_info->lock);
1548
1549 __diag_mask_exit(mask_info);
1550
1551}
1552
1553static int diag_msg_mask_init(void)
1554{
1555 int err = 0;
1556 int i;
1557
1558 err = __diag_mask_init(&msg_mask, MSG_MASK_SIZE, APPS_BUF_SIZE);
1559 if (err)
1560 return err;
1561 err = diag_create_msg_mask_table();
1562 if (err) {
1563 pr_err("diag: Unable to create msg masks, err: %d\n", err);
1564 return err;
1565 }
1566 driver->msg_mask = &msg_mask;
1567
1568 for (i = 0; i < NUM_PERIPHERALS; i++)
1569 driver->max_ssid_count[i] = 0;
1570
1571 return 0;
1572}
1573
1574int diag_msg_mask_copy(struct diag_mask_info *dest, struct diag_mask_info *src)
1575{
1576 int i;
1577 int err = 0;
1578 struct diag_msg_mask_t *src_mask = NULL;
1579 struct diag_msg_mask_t *dest_mask = NULL;
1580 struct diag_ssid_range_t range;
1581
1582 if (!src || !dest)
1583 return -EINVAL;
1584
1585 err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
1586 if (err)
1587 return err;
1588
1589 mutex_lock(&dest->lock);
1590 src_mask = (struct diag_msg_mask_t *)src->ptr;
1591 dest_mask = (struct diag_msg_mask_t *)dest->ptr;
1592
1593 dest->mask_len = src->mask_len;
1594 dest->status = src->status;
1595 for (i = 0; i < driver->msg_mask_tbl_count; i++) {
1596 range.ssid_first = src_mask->ssid_first;
1597 range.ssid_last = src_mask->ssid_last;
1598 err = diag_create_msg_mask_table_entry(dest_mask, &range);
1599 if (err)
1600 break;
1601 memcpy(dest_mask->ptr, src_mask->ptr,
1602 dest_mask->range * sizeof(uint32_t));
1603 src_mask++;
1604 dest_mask++;
1605 }
1606 mutex_unlock(&dest->lock);
1607
1608 return err;
1609}
1610
1611void diag_msg_mask_free(struct diag_mask_info *mask_info)
1612{
1613 int i;
1614 struct diag_msg_mask_t *mask = NULL;
1615
1616 if (!mask_info)
1617 return;
1618
1619 mutex_lock(&mask_info->lock);
1620 mask = (struct diag_msg_mask_t *)mask_info->ptr;
1621 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
1622 kfree(mask->ptr);
1623 mask->ptr = NULL;
1624 }
1625 mutex_unlock(&mask_info->lock);
1626
1627 __diag_mask_exit(mask_info);
1628}
1629
1630static void diag_msg_mask_exit(void)
1631{
1632 int i;
1633 struct diag_msg_mask_t *mask = NULL;
1634
1635 mask = (struct diag_msg_mask_t *)(msg_mask.ptr);
1636 if (mask) {
1637 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
1638 kfree(mask->ptr);
1639 kfree(msg_mask.ptr);
1640 }
1641
1642 kfree(msg_mask.update_buf);
1643}
1644
1645static int diag_build_time_mask_init(void)
1646{
1647 int err = 0;
1648
1649 /* There is no need for update buffer for Build Time masks */
1650 err = __diag_mask_init(&msg_bt_mask, MSG_MASK_SIZE, 0);
1651 if (err)
1652 return err;
1653 err = diag_create_build_time_mask();
1654 if (err) {
1655 pr_err("diag: Unable to create msg build time masks, err: %d\n",
1656 err);
1657 return err;
1658 }
1659 driver->build_time_mask = &msg_bt_mask;
1660 return 0;
1661}
1662
1663static void diag_build_time_mask_exit(void)
1664{
1665 int i;
1666 struct diag_msg_mask_t *mask = NULL;
1667
1668 mask = (struct diag_msg_mask_t *)(msg_bt_mask.ptr);
1669 if (mask) {
1670 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++)
1671 kfree(mask->ptr);
1672 kfree(msg_mask.ptr);
1673 }
1674}
1675
1676static int diag_log_mask_init(void)
1677{
1678 int err = 0;
1679 int i;
1680
1681 err = __diag_mask_init(&log_mask, LOG_MASK_SIZE, APPS_BUF_SIZE);
1682 if (err)
1683 return err;
1684 err = diag_create_log_mask_table();
1685 if (err)
1686 return err;
1687 driver->log_mask = &log_mask;
1688
1689 for (i = 0; i < NUM_PERIPHERALS; i++)
1690 driver->num_equip_id[i] = 0;
1691
1692 return 0;
1693}
1694
1695static void diag_log_mask_exit(void)
1696{
1697 int i;
1698 struct diag_log_mask_t *mask = NULL;
1699
1700 mask = (struct diag_log_mask_t *)(log_mask.ptr);
1701 if (mask) {
1702 for (i = 0; i < MAX_EQUIP_ID; i++, mask++)
1703 kfree(mask->ptr);
1704 kfree(log_mask.ptr);
1705 }
1706
1707 kfree(log_mask.update_buf);
1708}
1709
1710static int diag_event_mask_init(void)
1711{
1712 int err = 0;
1713 int i;
1714
1715 err = __diag_mask_init(&event_mask, EVENT_MASK_SIZE, APPS_BUF_SIZE);
1716 if (err)
1717 return err;
1718 driver->event_mask_size = EVENT_MASK_SIZE;
1719 driver->last_event_id = APPS_EVENT_LAST_ID;
1720 driver->event_mask = &event_mask;
1721
1722 for (i = 0; i < NUM_PERIPHERALS; i++)
1723 driver->num_event_id[i] = 0;
1724
1725 return 0;
1726}
1727
1728int diag_event_mask_copy(struct diag_mask_info *dest,
1729 struct diag_mask_info *src)
1730{
1731 int err = 0;
1732
1733 if (!src || !dest)
1734 return -EINVAL;
1735
1736 err = __diag_mask_init(dest, EVENT_MASK_SIZE, APPS_BUF_SIZE);
1737 if (err)
1738 return err;
1739
1740 mutex_lock(&dest->lock);
1741 dest->mask_len = src->mask_len;
1742 dest->status = src->status;
1743 memcpy(dest->ptr, src->ptr, dest->mask_len);
1744 mutex_unlock(&dest->lock);
1745
1746 return err;
1747}
1748
1749void diag_event_mask_free(struct diag_mask_info *mask_info)
1750{
1751 if (!mask_info)
1752 return;
1753
1754 __diag_mask_exit(mask_info);
1755}
1756
1757static void diag_event_mask_exit(void)
1758{
1759 kfree(event_mask.ptr);
1760 kfree(event_mask.update_buf);
1761}
1762
1763int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
1764 struct diag_md_session_t *info)
1765{
1766 int i;
1767 int err = 0;
1768 int len = 0;
1769 int copy_len = 0;
1770 int total_len = 0;
1771 struct diag_msg_mask_userspace_t header;
1772 struct diag_mask_info *mask_info = NULL;
1773 struct diag_msg_mask_t *mask = NULL;
1774 unsigned char *ptr = NULL;
1775
1776 if (!buf || count == 0)
1777 return -EINVAL;
1778
1779 mask_info = (!info) ? &msg_mask : info->msg_mask;
1780 if (!mask_info)
1781 return -EIO;
1782
1783 mutex_lock(&driver->diag_maskclear_mutex);
1784 if (driver->mask_clear) {
1785 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
1786 "diag:%s: count = %zu\n", __func__, count);
1787 mutex_unlock(&driver->diag_maskclear_mutex);
1788 return -EIO;
1789 }
1790 mutex_unlock(&driver->diag_maskclear_mutex);
1791
1792 mutex_lock(&mask_info->lock);
1793 mask = (struct diag_msg_mask_t *)(mask_info->ptr);
1794 for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
1795 ptr = mask_info->update_buf;
1796 len = 0;
1797 mutex_lock(&mask->lock);
1798 header.ssid_first = mask->ssid_first;
1799 header.ssid_last = mask->ssid_last_tools;
1800 header.range = mask->range_tools;
1801 memcpy(ptr, &header, sizeof(header));
1802 len += sizeof(header);
1803 copy_len = (sizeof(uint32_t) * mask->range_tools);
1804 if ((len + copy_len) > mask_info->update_buf_len) {
1805 pr_err("diag: In %s, no space to update msg mask, first: %d, last: %d\n",
1806 __func__, mask->ssid_first,
1807 mask->ssid_last_tools);
1808 mutex_unlock(&mask->lock);
1809 continue;
1810 }
1811 memcpy(ptr + len, mask->ptr, copy_len);
1812 len += copy_len;
1813 mutex_unlock(&mask->lock);
1814 /* + sizeof(int) to account for data_type already in buf */
1815 if (total_len + sizeof(int) + len > count) {
1816 pr_err("diag: In %s, unable to send msg masks to user space, total_len: %d, count: %zu\n",
1817 __func__, total_len, count);
1818 err = -ENOMEM;
1819 break;
1820 }
1821 err = copy_to_user(buf + total_len, (void *)ptr, len);
1822 if (err) {
1823 pr_err("diag: In %s Unable to send msg masks to user space clients, err: %d\n",
1824 __func__, err);
1825 break;
1826 }
1827 total_len += len;
1828 }
1829 mutex_unlock(&mask_info->lock);
1830
1831 return err ? err : total_len;
1832}
1833
1834int diag_copy_to_user_log_mask(char __user *buf, size_t count,
1835 struct diag_md_session_t *info)
1836{
1837 int i;
1838 int err = 0;
1839 int len = 0;
1840 int copy_len = 0;
1841 int total_len = 0;
1842 struct diag_log_mask_userspace_t header;
1843 struct diag_log_mask_t *mask = NULL;
1844 struct diag_mask_info *mask_info = NULL;
1845 unsigned char *ptr = NULL;
1846
1847 if (!buf || count == 0)
1848 return -EINVAL;
1849
1850 mask_info = (!info) ? &log_mask : info->log_mask;
1851 if (!mask_info)
1852 return -EIO;
1853
1854 mutex_lock(&mask_info->lock);
1855 mask = (struct diag_log_mask_t *)(mask_info->ptr);
1856 for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
1857 ptr = mask_info->update_buf;
1858 len = 0;
1859 mutex_lock(&mask->lock);
1860 header.equip_id = mask->equip_id;
1861 header.num_items = mask->num_items_tools;
1862 memcpy(ptr, &header, sizeof(header));
1863 len += sizeof(header);
1864 copy_len = LOG_ITEMS_TO_SIZE(header.num_items);
1865 if ((len + copy_len) > mask_info->update_buf_len) {
1866 pr_err("diag: In %s, no space to update log mask, equip_id: %d\n",
1867 __func__, mask->equip_id);
1868 mutex_unlock(&mask->lock);
1869 continue;
1870 }
1871 memcpy(ptr + len, mask->ptr, copy_len);
1872 len += copy_len;
1873 mutex_unlock(&mask->lock);
1874 /* + sizeof(int) to account for data_type already in buf */
1875 if (total_len + sizeof(int) + len > count) {
1876 pr_err("diag: In %s, unable to send log masks to user space, total_len: %d, count: %zu\n",
1877 __func__, total_len, count);
1878 err = -ENOMEM;
1879 break;
1880 }
1881 err = copy_to_user(buf + total_len, (void *)ptr, len);
1882 if (err) {
1883 pr_err("diag: In %s Unable to send log masks to user space clients, err: %d\n",
1884 __func__, err);
1885 break;
1886 }
1887 total_len += len;
1888 }
1889 mutex_unlock(&mask_info->lock);
1890
1891 return err ? err : total_len;
1892}
1893
1894void diag_send_updates_peripheral(uint8_t peripheral)
1895{
1896 diag_send_feature_mask_update(peripheral);
1897 if (driver->time_sync_enabled)
1898 diag_send_time_sync_update(peripheral);
1899 diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
1900 diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
1901 diag_send_event_mask_update(peripheral);
1902 diag_send_real_time_update(peripheral,
1903 driver->real_time_mode[DIAG_LOCAL_PROC]);
1904 diag_send_peripheral_buffering_mode(
1905 &driver->buffering_mode[peripheral]);
1906}
1907
1908int diag_process_apps_masks(unsigned char *buf, int len,
1909 struct diag_md_session_t *info)
1910{
1911 int size = 0;
1912 int sub_cmd = 0;
1913 int (*hdlr)(unsigned char *src_buf, int src_len,
1914 unsigned char *dest_buf, int dest_len,
1915 struct diag_md_session_t *info) = NULL;
1916
1917 if (!buf || len <= 0)
1918 return -EINVAL;
1919
1920 if (*buf == DIAG_CMD_LOG_CONFIG) {
1921 sub_cmd = *(int *)(buf + sizeof(int));
1922 switch (sub_cmd) {
1923 case DIAG_CMD_OP_LOG_DISABLE:
1924 hdlr = diag_cmd_disable_log_mask;
1925 break;
1926 case DIAG_CMD_OP_GET_LOG_RANGE:
1927 hdlr = diag_cmd_get_log_range;
1928 break;
1929 case DIAG_CMD_OP_SET_LOG_MASK:
1930 hdlr = diag_cmd_set_log_mask;
1931 break;
1932 case DIAG_CMD_OP_GET_LOG_MASK:
1933 hdlr = diag_cmd_get_log_mask;
1934 break;
1935 }
1936 } else if (*buf == DIAG_CMD_MSG_CONFIG) {
1937 sub_cmd = *(uint8_t *)(buf + sizeof(uint8_t));
1938 switch (sub_cmd) {
1939 case DIAG_CMD_OP_GET_SSID_RANGE:
1940 hdlr = diag_cmd_get_ssid_range;
1941 break;
1942 case DIAG_CMD_OP_GET_BUILD_MASK:
1943 hdlr = diag_cmd_get_build_mask;
1944 break;
1945 case DIAG_CMD_OP_GET_MSG_MASK:
1946 hdlr = diag_cmd_get_msg_mask;
1947 break;
1948 case DIAG_CMD_OP_SET_MSG_MASK:
1949 hdlr = diag_cmd_set_msg_mask;
1950 break;
1951 case DIAG_CMD_OP_SET_ALL_MSG_MASK:
1952 hdlr = diag_cmd_set_all_msg_mask;
1953 break;
1954 }
1955 } else if (*buf == DIAG_CMD_GET_EVENT_MASK) {
1956 hdlr = diag_cmd_get_event_mask;
1957 } else if (*buf == DIAG_CMD_SET_EVENT_MASK) {
1958 hdlr = diag_cmd_update_event_mask;
1959 } else if (*buf == DIAG_CMD_EVENT_TOGGLE) {
1960 hdlr = diag_cmd_toggle_events;
1961 }
1962
1963 if (hdlr)
1964 size = hdlr(buf, len, driver->apps_rsp_buf,
1965 DIAG_MAX_RSP_SIZE, info);
1966
1967 return (size > 0) ? size : 0;
1968}
1969
1970int diag_masks_init(void)
1971{
1972 int err = 0;
1973
1974 err = diag_msg_mask_init();
1975 if (err)
1976 goto fail;
1977
1978 err = diag_build_time_mask_init();
1979 if (err)
1980 goto fail;
1981
1982 err = diag_log_mask_init();
1983 if (err)
1984 goto fail;
1985
1986 err = diag_event_mask_init();
1987 if (err)
1988 goto fail;
1989
1990 if (driver->buf_feature_mask_update == NULL) {
1991 driver->buf_feature_mask_update = kzalloc(sizeof(
1992 struct diag_ctrl_feature_mask) +
1993 FEATURE_MASK_LEN, GFP_KERNEL);
1994 if (driver->buf_feature_mask_update == NULL)
1995 goto fail;
1996 kmemleak_not_leak(driver->buf_feature_mask_update);
1997 }
1998
1999 return 0;
2000fail:
2001 pr_err("diag: Could not initialize diag mask buffers\n");
2002 diag_masks_exit();
2003 return -ENOMEM;
2004}
2005
2006void diag_masks_exit(void)
2007{
2008 diag_msg_mask_exit();
2009 diag_build_time_mask_exit();
2010 diag_log_mask_exit();
2011 diag_event_mask_exit();
2012 kfree(driver->buf_feature_mask_update);
2013}