blob: 4cbd9da3a265b1481265dd2c2ac5d1777bdfde78 [file] [log] [blame]
Sreelakshmi Gownipallicb8893d2016-10-19 16:02:34 -07001/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/slab.h>
14#include <linux/diagchar.h>
15#include <linux/kmemleak.h>
16#include <linux/delay.h>
17#include "diagchar.h"
18#include "diagfwd.h"
19#include "diagfwd_cntl.h"
20#include "diagfwd_peripheral.h"
21#include "diagfwd_bridge.h"
22#include "diag_dci.h"
23#include "diagmem.h"
24#include "diag_masks.h"
25#include "diag_ipc_logging.h"
26#include "diag_mux.h"
27
28#define FEATURE_SUPPORTED(x) ((feature_mask << (i * 8)) & (1 << x))
29
30/* tracks which peripheral is undergoing SSR */
31static uint16_t reg_dirty;
32static void diag_notify_md_client(uint8_t peripheral, int data);
33
34static void diag_mask_update_work_fn(struct work_struct *work)
35{
36 uint8_t peripheral;
37
38 for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
39 if (!(driver->mask_update & PERIPHERAL_MASK(peripheral)))
40 continue;
41 mutex_lock(&driver->cntl_lock);
42 driver->mask_update ^= PERIPHERAL_MASK(peripheral);
43 mutex_unlock(&driver->cntl_lock);
44 diag_send_updates_peripheral(peripheral);
45 }
46}
47
48void diag_cntl_channel_open(struct diagfwd_info *p_info)
49{
50 if (!p_info)
51 return;
52 driver->mask_update |= PERIPHERAL_MASK(p_info->peripheral);
53 queue_work(driver->cntl_wq, &driver->mask_update_work);
54 diag_notify_md_client(p_info->peripheral, DIAG_STATUS_OPEN);
55}
56
57void diag_cntl_channel_close(struct diagfwd_info *p_info)
58{
59 uint8_t peripheral;
60
61 if (!p_info)
62 return;
63
64 peripheral = p_info->peripheral;
65 if (peripheral >= NUM_PERIPHERALS)
66 return;
67
68 driver->feature[peripheral].sent_feature_mask = 0;
69 driver->feature[peripheral].rcvd_feature_mask = 0;
70 flush_workqueue(driver->cntl_wq);
71 reg_dirty |= PERIPHERAL_MASK(peripheral);
72 diag_cmd_remove_reg_by_proc(peripheral);
73 driver->feature[peripheral].stm_support = DISABLE_STM;
74 driver->feature[peripheral].log_on_demand = 0;
75 driver->stm_state[peripheral] = DISABLE_STM;
76 driver->stm_state_requested[peripheral] = DISABLE_STM;
77 reg_dirty ^= PERIPHERAL_MASK(peripheral);
78 diag_notify_md_client(peripheral, DIAG_STATUS_CLOSED);
79}
80
81static void diag_stm_update_work_fn(struct work_struct *work)
82{
83 uint8_t i;
84 uint16_t peripheral_mask = 0;
85 int err = 0;
86
87 mutex_lock(&driver->cntl_lock);
88 peripheral_mask = driver->stm_peripheral;
89 driver->stm_peripheral = 0;
90 mutex_unlock(&driver->cntl_lock);
91
92 if (peripheral_mask == 0)
93 return;
94
95 for (i = 0; i < NUM_PERIPHERALS; i++) {
96 if (!driver->feature[i].stm_support)
97 continue;
98 if (peripheral_mask & PERIPHERAL_MASK(i)) {
99 err = diag_send_stm_state(i,
100 (uint8_t)(driver->stm_state_requested[i]));
101 if (!err) {
102 driver->stm_state[i] =
103 driver->stm_state_requested[i];
104 }
105 }
106 }
107}
108
109void diag_notify_md_client(uint8_t peripheral, int data)
110{
111 int stat = 0;
112 struct siginfo info;
113
114 if (peripheral > NUM_PERIPHERALS)
115 return;
116
117 if (driver->logging_mode != DIAG_MEMORY_DEVICE_MODE)
118 return;
119
120 mutex_lock(&driver->md_session_lock);
121 memset(&info, 0, sizeof(struct siginfo));
122 info.si_code = SI_QUEUE;
123 info.si_int = (PERIPHERAL_MASK(peripheral) | data);
124 info.si_signo = SIGCONT;
125 if (driver->md_session_map[peripheral] &&
126 driver->md_session_map[peripheral]->task) {
127 if (driver->md_session_map[peripheral]->pid ==
128 driver->md_session_map[peripheral]->task->tgid) {
129 DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
130 "md_session %d pid = %d, md_session %d task tgid = %d\n",
131 peripheral,
132 driver->md_session_map[peripheral]->pid,
133 peripheral,
134 driver->md_session_map[peripheral]->task->tgid);
135 stat = send_sig_info(info.si_signo, &info,
136 driver->md_session_map[peripheral]->task);
137 if (stat)
138 pr_err("diag: Err sending signal to memory device client, signal data: 0x%x, stat: %d\n",
139 info.si_int, stat);
140 } else
141 pr_err("diag: md_session_map[%d] data is corrupted, signal data: 0x%x, stat: %d\n",
142 peripheral, info.si_int, stat);
143 }
144 mutex_unlock(&driver->md_session_lock);
145}
146
147static void process_pd_status(uint8_t *buf, uint32_t len,
148 uint8_t peripheral)
149{
150 struct diag_ctrl_msg_pd_status *pd_msg = NULL;
151 uint32_t pd;
152 int status = DIAG_STATUS_CLOSED;
153
154 if (!buf || peripheral >= NUM_PERIPHERALS || len < sizeof(*pd_msg))
155 return;
156
157 pd_msg = (struct diag_ctrl_msg_pd_status *)buf;
158 pd = pd_msg->pd_id;
159 status = (pd_msg->status == 0) ? DIAG_STATUS_OPEN : DIAG_STATUS_CLOSED;
160 diag_notify_md_client(peripheral, status);
161}
162
163static void enable_stm_feature(uint8_t peripheral)
164{
165 if (peripheral >= NUM_PERIPHERALS)
166 return;
167
168 mutex_lock(&driver->cntl_lock);
169 driver->feature[peripheral].stm_support = ENABLE_STM;
170 driver->stm_peripheral |= PERIPHERAL_MASK(peripheral);
171 mutex_unlock(&driver->cntl_lock);
172
173 queue_work(driver->cntl_wq, &(driver->stm_update_work));
174}
175
176static void enable_socket_feature(uint8_t peripheral)
177{
178 if (peripheral >= NUM_PERIPHERALS)
179 return;
180
181 if (driver->supports_sockets)
182 driver->feature[peripheral].sockets_enabled = 1;
183 else
184 driver->feature[peripheral].sockets_enabled = 0;
185}
186
187static void process_hdlc_encoding_feature(uint8_t peripheral)
188{
189 if (peripheral >= NUM_PERIPHERALS)
190 return;
191
192 if (driver->supports_apps_hdlc_encoding) {
193 driver->feature[peripheral].encode_hdlc =
194 ENABLE_APPS_HDLC_ENCODING;
195 } else {
196 driver->feature[peripheral].encode_hdlc =
197 DISABLE_APPS_HDLC_ENCODING;
198 }
199}
200
201static void process_command_deregistration(uint8_t *buf, uint32_t len,
202 uint8_t peripheral)
203{
204 uint8_t *ptr = buf;
205 int i;
206 int header_len = sizeof(struct diag_ctrl_cmd_dereg);
207 int read_len = 0;
208 struct diag_ctrl_cmd_dereg *dereg = NULL;
209 struct cmd_code_range *range = NULL;
210 struct diag_cmd_reg_entry_t del_entry;
211
212 /*
213 * Perform Basic sanity. The len field is the size of the data payload.
214 * This doesn't include the header size.
215 */
216 if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
217 return;
218
219 dereg = (struct diag_ctrl_cmd_dereg *)ptr;
220 ptr += header_len;
221 /* Don't account for pkt_id and length */
222 read_len += header_len - (2 * sizeof(uint32_t));
223
224 if (dereg->count_entries == 0) {
225 pr_debug("diag: In %s, received reg tbl with no entries\n",
226 __func__);
227 return;
228 }
229
230 for (i = 0; i < dereg->count_entries && read_len < len; i++) {
231 range = (struct cmd_code_range *)ptr;
232 ptr += sizeof(struct cmd_code_range) - sizeof(uint32_t);
233 read_len += sizeof(struct cmd_code_range) - sizeof(uint32_t);
234 del_entry.cmd_code = dereg->cmd_code;
235 del_entry.subsys_id = dereg->subsysid;
236 del_entry.cmd_code_hi = range->cmd_code_hi;
237 del_entry.cmd_code_lo = range->cmd_code_lo;
238 diag_cmd_remove_reg(&del_entry, peripheral);
239 }
240
241 if (i != dereg->count_entries) {
242 pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
243 __func__, read_len, len, dereg->count_entries);
244 }
245}
246static void process_command_registration(uint8_t *buf, uint32_t len,
247 uint8_t peripheral)
248{
249 uint8_t *ptr = buf;
250 int i;
251 int header_len = sizeof(struct diag_ctrl_cmd_reg);
252 int read_len = 0;
253 struct diag_ctrl_cmd_reg *reg = NULL;
254 struct cmd_code_range *range = NULL;
255 struct diag_cmd_reg_entry_t new_entry;
256
257 /*
258 * Perform Basic sanity. The len field is the size of the data payload.
259 * This doesn't include the header size.
260 */
261 if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
262 return;
263
264 reg = (struct diag_ctrl_cmd_reg *)ptr;
265 ptr += header_len;
266 /* Don't account for pkt_id and length */
267 read_len += header_len - (2 * sizeof(uint32_t));
268
269 if (reg->count_entries == 0) {
270 pr_debug("diag: In %s, received reg tbl with no entries\n",
271 __func__);
272 return;
273 }
274
275 for (i = 0; i < reg->count_entries && read_len < len; i++) {
276 range = (struct cmd_code_range *)ptr;
277 ptr += sizeof(struct cmd_code_range);
278 read_len += sizeof(struct cmd_code_range);
279 new_entry.cmd_code = reg->cmd_code;
280 new_entry.subsys_id = reg->subsysid;
281 new_entry.cmd_code_hi = range->cmd_code_hi;
282 new_entry.cmd_code_lo = range->cmd_code_lo;
283 diag_cmd_add_reg(&new_entry, peripheral, INVALID_PID);
284 }
285
286 if (i != reg->count_entries) {
287 pr_err("diag: In %s, reading less than available, read_len: %d, len: %d count: %d\n",
288 __func__, read_len, len, reg->count_entries);
289 }
290}
291
292static void diag_close_transport_work_fn(struct work_struct *work)
293{
294 uint8_t transport;
295 uint8_t peripheral;
296
297 mutex_lock(&driver->cntl_lock);
298 for (peripheral = 0; peripheral <= NUM_PERIPHERALS; peripheral++) {
299 if (!(driver->close_transport & PERIPHERAL_MASK(peripheral)))
300 continue;
301 driver->close_transport ^= PERIPHERAL_MASK(peripheral);
302 transport = driver->feature[peripheral].sockets_enabled ?
303 TRANSPORT_GLINK : TRANSPORT_SOCKET;
304 diagfwd_close_transport(transport, peripheral);
305 }
306 mutex_unlock(&driver->cntl_lock);
307}
308
309static void process_socket_feature(uint8_t peripheral)
310{
311 if (peripheral >= NUM_PERIPHERALS)
312 return;
313
314 mutex_lock(&driver->cntl_lock);
315 driver->close_transport |= PERIPHERAL_MASK(peripheral);
316 queue_work(driver->cntl_wq, &driver->close_transport_work);
317 mutex_unlock(&driver->cntl_lock);
318}
319
320static void process_log_on_demand_feature(uint8_t peripheral)
321{
322 /* Log On Demand command is registered only on Modem */
323 if (peripheral != PERIPHERAL_MODEM)
324 return;
325
326 if (driver->feature[PERIPHERAL_MODEM].log_on_demand)
327 driver->log_on_demand_support = 1;
328 else
329 driver->log_on_demand_support = 0;
330}
331
332static void process_incoming_feature_mask(uint8_t *buf, uint32_t len,
333 uint8_t peripheral)
334{
335 int i;
336 int header_len = sizeof(struct diag_ctrl_feature_mask);
337 int read_len = 0;
338 struct diag_ctrl_feature_mask *header = NULL;
339 uint32_t feature_mask_len = 0;
340 uint32_t feature_mask = 0;
341 uint8_t *ptr = buf;
342
343 if (!buf || peripheral >= NUM_PERIPHERALS || len == 0)
344 return;
345
346 header = (struct diag_ctrl_feature_mask *)ptr;
347 ptr += header_len;
348 feature_mask_len = header->feature_mask_len;
349
350 if (feature_mask_len == 0) {
351 pr_debug("diag: In %s, received invalid feature mask from peripheral %d\n",
352 __func__, peripheral);
353 return;
354 }
355
356 if (feature_mask_len > FEATURE_MASK_LEN) {
357 pr_alert("diag: Receiving feature mask length more than Apps support\n");
358 feature_mask_len = FEATURE_MASK_LEN;
359 }
360
361 driver->feature[peripheral].rcvd_feature_mask = 1;
362
363 for (i = 0; i < feature_mask_len && read_len < len; i++) {
364 feature_mask = *(uint8_t *)ptr;
365 driver->feature[peripheral].feature_mask[i] = feature_mask;
366 ptr += sizeof(uint8_t);
367 read_len += sizeof(uint8_t);
368
369 if (FEATURE_SUPPORTED(F_DIAG_LOG_ON_DEMAND_APPS))
370 driver->feature[peripheral].log_on_demand = 1;
371 if (FEATURE_SUPPORTED(F_DIAG_REQ_RSP_SUPPORT))
372 driver->feature[peripheral].separate_cmd_rsp = 1;
373 if (FEATURE_SUPPORTED(F_DIAG_APPS_HDLC_ENCODE))
374 process_hdlc_encoding_feature(peripheral);
375 if (FEATURE_SUPPORTED(F_DIAG_STM))
376 enable_stm_feature(peripheral);
377 if (FEATURE_SUPPORTED(F_DIAG_MASK_CENTRALIZATION))
378 driver->feature[peripheral].mask_centralization = 1;
379 if (FEATURE_SUPPORTED(F_DIAG_PERIPHERAL_BUFFERING))
380 driver->feature[peripheral].peripheral_buffering = 1;
381 if (FEATURE_SUPPORTED(F_DIAG_SOCKETS_ENABLED))
382 enable_socket_feature(peripheral);
383 }
384
385 process_socket_feature(peripheral);
386 process_log_on_demand_feature(peripheral);
387}
388
389static void process_last_event_report(uint8_t *buf, uint32_t len,
390 uint8_t peripheral)
391{
392 struct diag_ctrl_last_event_report *header = NULL;
393 uint8_t *ptr = buf;
394 uint8_t *temp = NULL;
395 uint32_t pkt_len = sizeof(uint32_t) + sizeof(uint16_t);
396 uint16_t event_size = 0;
397
398 if (!buf || peripheral >= NUM_PERIPHERALS || len != pkt_len)
399 return;
400
401 mutex_lock(&event_mask.lock);
402 header = (struct diag_ctrl_last_event_report *)ptr;
403 event_size = ((header->event_last_id / 8) + 1);
404 if (event_size >= driver->event_mask_size) {
405 pr_debug("diag: In %s, receiving event mask size more that Apps can handle\n",
406 __func__);
407 temp = krealloc(driver->event_mask->ptr, event_size,
408 GFP_KERNEL);
409 if (!temp) {
410 pr_err("diag: In %s, unable to reallocate event mask to support events from %d\n",
411 __func__, peripheral);
412 goto err;
413 }
414 driver->event_mask->ptr = temp;
415 driver->event_mask_size = event_size;
416 }
417
418 driver->num_event_id[peripheral] = header->event_last_id;
419 if (header->event_last_id > driver->last_event_id)
420 driver->last_event_id = header->event_last_id;
421err:
422 mutex_unlock(&event_mask.lock);
423}
424
425static void process_log_range_report(uint8_t *buf, uint32_t len,
426 uint8_t peripheral)
427{
428 int i;
429 int read_len = 0;
430 int header_len = sizeof(struct diag_ctrl_log_range_report);
431 uint8_t *ptr = buf;
432 struct diag_ctrl_log_range_report *header = NULL;
433 struct diag_ctrl_log_range *log_range = NULL;
434 struct diag_log_mask_t *mask_ptr = NULL;
435
436 if (!buf || peripheral >= NUM_PERIPHERALS || len < 0)
437 return;
438
439 header = (struct diag_ctrl_log_range_report *)ptr;
440 ptr += header_len;
441 /* Don't account for pkt_id and length */
442 read_len += header_len - (2 * sizeof(uint32_t));
443
444 driver->num_equip_id[peripheral] = header->num_ranges;
445 for (i = 0; i < header->num_ranges && read_len < len; i++) {
446 log_range = (struct diag_ctrl_log_range *)ptr;
447 ptr += sizeof(struct diag_ctrl_log_range);
448 read_len += sizeof(struct diag_ctrl_log_range);
449
450 if (log_range->equip_id >= MAX_EQUIP_ID) {
451 pr_err("diag: receiving log equip id %d more than supported equip id: %d from peripheral: %d\n",
452 log_range->equip_id, MAX_EQUIP_ID, peripheral);
453 continue;
454 }
455 mask_ptr = (struct diag_log_mask_t *)log_mask.ptr;
456 mask_ptr = &mask_ptr[log_range->equip_id];
457
458 mutex_lock(&(mask_ptr->lock));
459 mask_ptr->num_items = log_range->num_items;
460 mask_ptr->range = LOG_ITEMS_TO_SIZE(log_range->num_items);
461 mutex_unlock(&(mask_ptr->lock));
462 }
463}
464
465static int update_msg_mask_tbl_entry(struct diag_msg_mask_t *mask,
466 struct diag_ssid_range_t *range)
467{
468 uint32_t temp_range;
469
470 if (!mask || !range)
471 return -EIO;
472 if (range->ssid_last < range->ssid_first) {
473 pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
474 __func__, range->ssid_first, range->ssid_last);
475 return -EINVAL;
476 }
477 if (range->ssid_last >= mask->ssid_last) {
478 temp_range = range->ssid_last - mask->ssid_first + 1;
479 mask->ssid_last = range->ssid_last;
480 mask->range = temp_range;
481 }
482
483 return 0;
484}
485
486static void process_ssid_range_report(uint8_t *buf, uint32_t len,
487 uint8_t peripheral)
488{
489 int i;
490 int j;
491 int read_len = 0;
492 int found = 0;
493 int new_size = 0;
494 int err = 0;
495 struct diag_ctrl_ssid_range_report *header = NULL;
496 struct diag_ssid_range_t *ssid_range = NULL;
497 int header_len = sizeof(struct diag_ctrl_ssid_range_report);
498 struct diag_msg_mask_t *mask_ptr = NULL;
499 uint8_t *ptr = buf;
500 uint8_t *temp = NULL;
501 uint32_t min_len = header_len - sizeof(struct diag_ctrl_pkt_header_t);
502
503 if (!buf || peripheral >= NUM_PERIPHERALS || len < min_len)
504 return;
505
506 header = (struct diag_ctrl_ssid_range_report *)ptr;
507 ptr += header_len;
508 /* Don't account for pkt_id and length */
509 read_len += header_len - (2 * sizeof(uint32_t));
510
511 driver->max_ssid_count[peripheral] = header->count;
512 for (i = 0; i < header->count && read_len < len; i++) {
513 ssid_range = (struct diag_ssid_range_t *)ptr;
514 ptr += sizeof(struct diag_ssid_range_t);
515 read_len += sizeof(struct diag_ssid_range_t);
516 mask_ptr = (struct diag_msg_mask_t *)msg_mask.ptr;
517 found = 0;
518 for (j = 0; j < driver->msg_mask_tbl_count; j++, mask_ptr++) {
519 if (mask_ptr->ssid_first != ssid_range->ssid_first)
520 continue;
521 mutex_lock(&mask_ptr->lock);
522 err = update_msg_mask_tbl_entry(mask_ptr, ssid_range);
523 mutex_unlock(&mask_ptr->lock);
524 if (err == -ENOMEM) {
525 pr_err("diag: In %s, unable to increase the msg mask table range\n",
526 __func__);
527 }
528 found = 1;
529 break;
530 }
531
532 if (found)
533 continue;
534
535 new_size = (driver->msg_mask_tbl_count + 1) *
536 sizeof(struct diag_msg_mask_t);
537 temp = krealloc(msg_mask.ptr, new_size, GFP_KERNEL);
538 if (!temp) {
539 pr_err("diag: In %s, Unable to add new ssid table to msg mask, ssid first: %d, last: %d\n",
540 __func__, ssid_range->ssid_first,
541 ssid_range->ssid_last);
542 continue;
543 }
544 msg_mask.ptr = temp;
545 err = diag_create_msg_mask_table_entry(mask_ptr, ssid_range);
546 if (err) {
547 pr_err("diag: In %s, Unable to create a new msg mask table entry, first: %d last: %d err: %d\n",
548 __func__, ssid_range->ssid_first,
549 ssid_range->ssid_last, err);
550 continue;
551 }
552 driver->msg_mask_tbl_count += 1;
553 }
554}
555
556static void diag_build_time_mask_update(uint8_t *buf,
557 struct diag_ssid_range_t *range)
558{
559 int i;
560 int j;
561 int num_items = 0;
562 int err = 0;
563 int found = 0;
564 int new_size = 0;
565 uint8_t *temp = NULL;
566 uint32_t *mask_ptr = (uint32_t *)buf;
567 uint32_t *dest_ptr = NULL;
568 struct diag_msg_mask_t *build_mask = NULL;
569
570 if (!range || !buf)
571 return;
572
573 if (range->ssid_last < range->ssid_first) {
574 pr_err("diag: In %s, invalid ssid range, first: %d, last: %d\n",
575 __func__, range->ssid_first, range->ssid_last);
576 return;
577 }
578
579 build_mask = (struct diag_msg_mask_t *)(driver->build_time_mask->ptr);
580 num_items = range->ssid_last - range->ssid_first + 1;
581
582 for (i = 0; i < driver->msg_mask_tbl_count; i++, build_mask++) {
583 if (build_mask->ssid_first != range->ssid_first)
584 continue;
585 found = 1;
586 mutex_lock(&build_mask->lock);
587 err = update_msg_mask_tbl_entry(build_mask, range);
588 if (err == -ENOMEM) {
589 pr_err("diag: In %s, unable to increase the msg build mask table range\n",
590 __func__);
591 }
592 dest_ptr = build_mask->ptr;
593 for (j = 0; j < build_mask->range; j++, mask_ptr++, dest_ptr++)
594 *(uint32_t *)dest_ptr |= *mask_ptr;
595 mutex_unlock(&build_mask->lock);
596 break;
597 }
598
599 if (found)
600 goto end;
601 new_size = (driver->msg_mask_tbl_count + 1) *
602 sizeof(struct diag_msg_mask_t);
603 temp = krealloc(driver->build_time_mask->ptr, new_size, GFP_KERNEL);
604 if (!temp) {
605 pr_err("diag: In %s, unable to create a new entry for build time mask\n",
606 __func__);
607 goto end;
608 }
609 driver->build_time_mask->ptr = temp;
610 err = diag_create_msg_mask_table_entry(build_mask, range);
611 if (err) {
612 pr_err("diag: In %s, Unable to create a new msg mask table entry, err: %d\n",
613 __func__, err);
614 goto end;
615 }
616 driver->msg_mask_tbl_count += 1;
617end:
618 return;
619}
620
621static void process_build_mask_report(uint8_t *buf, uint32_t len,
622 uint8_t peripheral)
623{
624 int i;
625 int read_len = 0;
626 int num_items = 0;
627 int header_len = sizeof(struct diag_ctrl_build_mask_report);
628 uint8_t *ptr = buf;
629 struct diag_ctrl_build_mask_report *header = NULL;
630 struct diag_ssid_range_t *range = NULL;
631
632 if (!buf || peripheral >= NUM_PERIPHERALS || len < header_len)
633 return;
634
635 header = (struct diag_ctrl_build_mask_report *)ptr;
636 ptr += header_len;
637 /* Don't account for pkt_id and length */
638 read_len += header_len - (2 * sizeof(uint32_t));
639
640 for (i = 0; i < header->count && read_len < len; i++) {
641 range = (struct diag_ssid_range_t *)ptr;
642 ptr += sizeof(struct diag_ssid_range_t);
643 read_len += sizeof(struct diag_ssid_range_t);
644 num_items = range->ssid_last - range->ssid_first + 1;
645 diag_build_time_mask_update(ptr, range);
646 ptr += num_items * sizeof(uint32_t);
647 read_len += num_items * sizeof(uint32_t);
648 }
649}
650
651void diag_cntl_process_read_data(struct diagfwd_info *p_info, void *buf,
652 int len)
653{
654 uint32_t read_len = 0;
655 uint32_t header_len = sizeof(struct diag_ctrl_pkt_header_t);
656 uint8_t *ptr = buf;
657 struct diag_ctrl_pkt_header_t *ctrl_pkt = NULL;
658
659 if (!buf || len <= 0 || !p_info)
660 return;
661
662 if (reg_dirty & PERIPHERAL_MASK(p_info->peripheral)) {
663 pr_err_ratelimited("diag: dropping command registration from peripheral %d\n",
664 p_info->peripheral);
665 return;
666 }
667
668 while (read_len + header_len < len) {
669 ctrl_pkt = (struct diag_ctrl_pkt_header_t *)ptr;
670 switch (ctrl_pkt->pkt_id) {
671 case DIAG_CTRL_MSG_REG:
672 process_command_registration(ptr, ctrl_pkt->len,
673 p_info->peripheral);
674 break;
675 case DIAG_CTRL_MSG_DEREG:
676 process_command_deregistration(ptr, ctrl_pkt->len,
677 p_info->peripheral);
678 break;
679 case DIAG_CTRL_MSG_FEATURE:
680 process_incoming_feature_mask(ptr, ctrl_pkt->len,
681 p_info->peripheral);
682 break;
683 case DIAG_CTRL_MSG_LAST_EVENT_REPORT:
684 process_last_event_report(ptr, ctrl_pkt->len,
685 p_info->peripheral);
686 break;
687 case DIAG_CTRL_MSG_LOG_RANGE_REPORT:
688 process_log_range_report(ptr, ctrl_pkt->len,
689 p_info->peripheral);
690 break;
691 case DIAG_CTRL_MSG_SSID_RANGE_REPORT:
692 process_ssid_range_report(ptr, ctrl_pkt->len,
693 p_info->peripheral);
694 break;
695 case DIAG_CTRL_MSG_BUILD_MASK_REPORT:
696 process_build_mask_report(ptr, ctrl_pkt->len,
697 p_info->peripheral);
698 break;
699 case DIAG_CTRL_MSG_PD_STATUS:
700 process_pd_status(ptr, ctrl_pkt->len,
701 p_info->peripheral);
702 break;
703 default:
704 pr_debug("diag: Control packet %d not supported\n",
705 ctrl_pkt->pkt_id);
706 }
707 ptr += header_len + ctrl_pkt->len;
708 read_len += header_len + ctrl_pkt->len;
709 }
710}
711
712static int diag_compute_real_time(int idx)
713{
714 int real_time = MODE_REALTIME;
715
716 if (driver->proc_active_mask == 0) {
717 /*
718 * There are no DCI or Memory Device processes. Diag should
719 * be in Real Time mode irrespective of USB connection
720 */
721 real_time = MODE_REALTIME;
722 } else if (driver->proc_rt_vote_mask[idx] & driver->proc_active_mask) {
723 /*
724 * Atleast one process is alive and is voting for Real Time
725 * data - Diag should be in real time mode irrespective of USB
726 * connection.
727 */
728 real_time = MODE_REALTIME;
729 } else if (driver->usb_connected) {
730 /*
731 * If USB is connected, check individual process. If Memory
732 * Device Mode is active, set the mode requested by Memory
733 * Device process. Set to realtime mode otherwise.
734 */
735 if ((driver->proc_rt_vote_mask[idx] &
736 DIAG_PROC_MEMORY_DEVICE) == 0)
737 real_time = MODE_NONREALTIME;
738 else
739 real_time = MODE_REALTIME;
740 } else {
741 /*
742 * We come here if USB is not connected and the active
743 * processes are voting for Non realtime mode.
744 */
745 real_time = MODE_NONREALTIME;
746 }
747 return real_time;
748}
749
750static void diag_create_diag_mode_ctrl_pkt(unsigned char *dest_buf,
751 int real_time)
752{
753 struct diag_ctrl_msg_diagmode diagmode;
754 int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
755
756 if (!dest_buf)
757 return;
758
759 diagmode.ctrl_pkt_id = DIAG_CTRL_MSG_DIAGMODE;
760 diagmode.ctrl_pkt_data_len = DIAG_MODE_PKT_LEN;
761 diagmode.version = 1;
762 diagmode.sleep_vote = real_time ? 1 : 0;
763 /*
764 * 0 - Disables real-time logging (to prevent
765 * frequent APPS wake-ups, etc.).
766 * 1 - Enable real-time logging
767 */
768 diagmode.real_time = real_time;
769 diagmode.use_nrt_values = 0;
770 diagmode.commit_threshold = 0;
771 diagmode.sleep_threshold = 0;
772 diagmode.sleep_time = 0;
773 diagmode.drain_timer_val = 0;
774 diagmode.event_stale_timer_val = 0;
775
776 memcpy(dest_buf, &diagmode, msg_size);
777}
778
779void diag_update_proc_vote(uint16_t proc, uint8_t vote, int index)
780{
781 int i;
782
783 mutex_lock(&driver->real_time_mutex);
784 if (vote)
785 driver->proc_active_mask |= proc;
786 else {
787 driver->proc_active_mask &= ~proc;
788 if (index == ALL_PROC) {
789 for (i = 0; i < DIAG_NUM_PROC; i++)
790 driver->proc_rt_vote_mask[i] |= proc;
791 } else {
792 driver->proc_rt_vote_mask[index] |= proc;
793 }
794 }
795 mutex_unlock(&driver->real_time_mutex);
796}
797
798void diag_update_real_time_vote(uint16_t proc, uint8_t real_time, int index)
799{
800 int i;
801
802 if (index >= DIAG_NUM_PROC) {
803 pr_err("diag: In %s, invalid index %d\n", __func__, index);
804 return;
805 }
806
807 mutex_lock(&driver->real_time_mutex);
808 if (index == ALL_PROC) {
809 for (i = 0; i < DIAG_NUM_PROC; i++) {
810 if (real_time)
811 driver->proc_rt_vote_mask[i] |= proc;
812 else
813 driver->proc_rt_vote_mask[i] &= ~proc;
814 }
815 } else {
816 if (real_time)
817 driver->proc_rt_vote_mask[index] |= proc;
818 else
819 driver->proc_rt_vote_mask[index] &= ~proc;
820 }
821 mutex_unlock(&driver->real_time_mutex);
822}
823
824
825#ifdef CONFIG_DIAGFWD_BRIDGE_CODE
826static void diag_send_diag_mode_update_remote(int token, int real_time)
827{
828 unsigned char *buf = NULL;
829 int err = 0;
830 struct diag_dci_header_t dci_header;
831 int dci_header_size = sizeof(struct diag_dci_header_t);
832 int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
833 uint32_t write_len = 0;
834
835 if (token < 0 || token >= NUM_DCI_PROC) {
836 pr_err("diag: Invalid remote device channel in %s, token: %d\n",
837 __func__, token);
838 return;
839 }
840
841 if (real_time != MODE_REALTIME && real_time != MODE_NONREALTIME) {
842 pr_err("diag: Invalid real time value in %s, type: %d\n",
843 __func__, real_time);
844 return;
845 }
846
847 buf = dci_get_buffer_from_bridge(token);
848 if (!buf) {
849 pr_err("diag: In %s, unable to get dci buffers to write data\n",
850 __func__);
851 return;
852 }
853 /* Frame the DCI header */
854 dci_header.start = CONTROL_CHAR;
855 dci_header.version = 1;
856 dci_header.length = msg_size + 1;
857 dci_header.cmd_code = DCI_CONTROL_PKT_CODE;
858
859 memcpy(buf + write_len, &dci_header, dci_header_size);
860 write_len += dci_header_size;
861 diag_create_diag_mode_ctrl_pkt(buf + write_len, real_time);
862 write_len += msg_size;
863 *(buf + write_len) = CONTROL_CHAR; /* End Terminator */
864 write_len += sizeof(uint8_t);
865 err = diagfwd_bridge_write(TOKEN_TO_BRIDGE(token), buf, write_len);
866 if (err != write_len) {
867 pr_err("diag: cannot send nrt mode ctrl pkt, err: %d\n", err);
868 diagmem_free(driver, buf, dci_ops_tbl[token].mempool);
869 } else {
870 driver->real_time_mode[token + 1] = real_time;
871 }
872}
873#else
874static inline void diag_send_diag_mode_update_remote(int token, int real_time)
875{
876}
877#endif
878
879#ifdef CONFIG_DIAG_OVER_USB
880void diag_real_time_work_fn(struct work_struct *work)
881{
882 int temp_real_time = MODE_REALTIME, i, j;
883 uint8_t send_update = 1;
884
885 /*
886 * If any peripheral in the local processor is in either threshold or
887 * circular buffering mode, don't send the real time mode control
888 * packet.
889 */
890 for (i = 0; i < NUM_PERIPHERALS; i++) {
891 if (!driver->feature[i].peripheral_buffering)
892 continue;
893 switch (driver->buffering_mode[i].mode) {
894 case DIAG_BUFFERING_MODE_THRESHOLD:
895 case DIAG_BUFFERING_MODE_CIRCULAR:
896 send_update = 0;
897 break;
898 }
899 }
900
901 mutex_lock(&driver->mode_lock);
902 for (i = 0; i < DIAG_NUM_PROC; i++) {
903 temp_real_time = diag_compute_real_time(i);
904 if (temp_real_time == driver->real_time_mode[i]) {
905 pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
906 i, temp_real_time);
907 continue;
908 }
909
910 if (i == DIAG_LOCAL_PROC) {
911 if (!send_update) {
912 pr_debug("diag: In %s, cannot send real time mode pkt since one of the periperhal is in buffering mode\n",
913 __func__);
914 break;
915 }
916 for (j = 0; j < NUM_PERIPHERALS; j++)
917 diag_send_real_time_update(j,
918 temp_real_time);
919 } else {
920 diag_send_diag_mode_update_remote(i - 1,
921 temp_real_time);
922 }
923 }
924 mutex_unlock(&driver->mode_lock);
925
926 if (driver->real_time_update_busy > 0)
927 driver->real_time_update_busy--;
928}
929#else
930void diag_real_time_work_fn(struct work_struct *work)
931{
932 int temp_real_time = MODE_REALTIME, i, j;
933
934 for (i = 0; i < DIAG_NUM_PROC; i++) {
935 if (driver->proc_active_mask == 0) {
936 /*
937 * There are no DCI or Memory Device processes.
938 * Diag should be in Real Time mode.
939 */
940 temp_real_time = MODE_REALTIME;
941 } else if (!(driver->proc_rt_vote_mask[i] &
942 driver->proc_active_mask)) {
943 /* No active process is voting for real time mode */
944 temp_real_time = MODE_NONREALTIME;
945 }
946 if (temp_real_time == driver->real_time_mode[i]) {
947 pr_debug("diag: did not update real time mode on proc %d, already in the req mode %d",
948 i, temp_real_time);
949 continue;
950 }
951
952 if (i == DIAG_LOCAL_PROC) {
953 for (j = 0; j < NUM_PERIPHERALS; j++)
954 diag_send_real_time_update(
955 j, temp_real_time);
956 } else {
957 diag_send_diag_mode_update_remote(i - 1,
958 temp_real_time);
959 }
960 }
961
962 if (driver->real_time_update_busy > 0)
963 driver->real_time_update_busy--;
964}
965#endif
966
967static int __diag_send_real_time_update(uint8_t peripheral, int real_time)
968{
969 char buf[sizeof(struct diag_ctrl_msg_diagmode)];
970 int msg_size = sizeof(struct diag_ctrl_msg_diagmode);
971 int err = 0;
972
973 if (peripheral >= NUM_PERIPHERALS)
974 return -EINVAL;
975
976 if (!driver->diagfwd_cntl[peripheral] ||
977 !driver->diagfwd_cntl[peripheral]->ch_open) {
978 pr_debug("diag: In %s, control channel is not open, p: %d\n",
979 __func__, peripheral);
980 return err;
981 }
982
983 if (real_time != MODE_NONREALTIME && real_time != MODE_REALTIME) {
984 pr_err("diag: In %s, invalid real time mode %d, peripheral: %d\n",
985 __func__, real_time, peripheral);
986 return -EINVAL;
987 }
988
989 diag_create_diag_mode_ctrl_pkt(buf, real_time);
990
991 mutex_lock(&driver->diag_cntl_mutex);
992 err = diagfwd_write(peripheral, TYPE_CNTL, buf, msg_size);
993 if (err && err != -ENODEV) {
994 pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
995 __func__, peripheral, TYPE_CNTL,
996 msg_size, err);
997 } else {
998 driver->real_time_mode[DIAG_LOCAL_PROC] = real_time;
999 }
1000
1001 mutex_unlock(&driver->diag_cntl_mutex);
1002
1003 return err;
1004}
1005
1006int diag_send_real_time_update(uint8_t peripheral, int real_time)
1007{
1008 int i;
1009
1010 for (i = 0; i < NUM_PERIPHERALS; i++) {
1011 if (!driver->buffering_flag[i])
1012 continue;
1013 /*
1014 * One of the peripherals is in buffering mode. Don't set
1015 * the RT value.
1016 */
1017 return -EINVAL;
1018 }
1019
1020 return __diag_send_real_time_update(peripheral, real_time);
1021}
1022
1023int diag_send_peripheral_buffering_mode(struct diag_buffering_mode_t *params)
1024{
1025 int err = 0;
1026 int mode = MODE_REALTIME;
1027 uint8_t peripheral = 0;
1028
1029 if (!params)
1030 return -EIO;
1031
1032 peripheral = params->peripheral;
1033 if (peripheral >= NUM_PERIPHERALS) {
1034 pr_err("diag: In %s, invalid peripheral %d\n", __func__,
1035 peripheral);
1036 return -EINVAL;
1037 }
1038
1039 if (!driver->buffering_flag[peripheral])
1040 return -EINVAL;
1041
1042 switch (params->mode) {
1043 case DIAG_BUFFERING_MODE_STREAMING:
1044 mode = MODE_REALTIME;
1045 break;
1046 case DIAG_BUFFERING_MODE_THRESHOLD:
1047 case DIAG_BUFFERING_MODE_CIRCULAR:
1048 mode = MODE_NONREALTIME;
1049 break;
1050 default:
1051 pr_err("diag: In %s, invalid tx mode %d\n", __func__,
1052 params->mode);
1053 return -EINVAL;
1054 }
1055
1056 if (!driver->feature[peripheral].peripheral_buffering) {
1057 pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
1058 __func__, peripheral);
1059 driver->buffering_flag[peripheral] = 0;
1060 return -EIO;
1061 }
1062
1063 /*
1064 * Perform sanity on watermark values. These values must be
1065 * checked irrespective of the buffering mode.
1066 */
1067 if (((params->high_wm_val > DIAG_MAX_WM_VAL) ||
1068 (params->low_wm_val > DIAG_MAX_WM_VAL)) ||
1069 (params->low_wm_val > params->high_wm_val) ||
1070 ((params->low_wm_val == params->high_wm_val) &&
1071 (params->low_wm_val != DIAG_MIN_WM_VAL))) {
1072 pr_err("diag: In %s, invalid watermark values, high: %d, low: %d, peripheral: %d\n",
1073 __func__, params->high_wm_val, params->low_wm_val,
1074 peripheral);
1075 return -EINVAL;
1076 }
1077
1078 mutex_lock(&driver->mode_lock);
1079 err = diag_send_buffering_tx_mode_pkt(peripheral, params);
1080 if (err) {
1081 pr_err("diag: In %s, unable to send buffering mode packet to peripheral %d, err: %d\n",
1082 __func__, peripheral, err);
1083 goto fail;
1084 }
1085 err = diag_send_buffering_wm_values(peripheral, params);
1086 if (err) {
1087 pr_err("diag: In %s, unable to send buffering wm value packet to peripheral %d, err: %d\n",
1088 __func__, peripheral, err);
1089 goto fail;
1090 }
1091 err = __diag_send_real_time_update(peripheral, mode);
1092 if (err) {
1093 pr_err("diag: In %s, unable to send mode update to peripheral %d, mode: %d, err: %d\n",
1094 __func__, peripheral, mode, err);
1095 goto fail;
1096 }
1097 driver->buffering_mode[peripheral].peripheral = peripheral;
1098 driver->buffering_mode[peripheral].mode = params->mode;
1099 driver->buffering_mode[peripheral].low_wm_val = params->low_wm_val;
1100 driver->buffering_mode[peripheral].high_wm_val = params->high_wm_val;
1101 if (params->mode == DIAG_BUFFERING_MODE_STREAMING)
1102 driver->buffering_flag[peripheral] = 0;
1103fail:
1104 mutex_unlock(&driver->mode_lock);
1105 return err;
1106}
1107
1108int diag_send_stm_state(uint8_t peripheral, uint8_t stm_control_data)
1109{
1110 struct diag_ctrl_msg_stm stm_msg;
1111 int msg_size = sizeof(struct diag_ctrl_msg_stm);
1112 int err = 0;
1113
1114 if (peripheral >= NUM_PERIPHERALS)
1115 return -EIO;
1116
1117 if (!driver->diagfwd_cntl[peripheral] ||
1118 !driver->diagfwd_cntl[peripheral]->ch_open) {
1119 pr_debug("diag: In %s, control channel is not open, p: %d\n",
1120 __func__, peripheral);
1121 return -ENODEV;
1122 }
1123
1124 if (driver->feature[peripheral].stm_support == DISABLE_STM)
1125 return -EINVAL;
1126
1127 stm_msg.ctrl_pkt_id = 21;
1128 stm_msg.ctrl_pkt_data_len = 5;
1129 stm_msg.version = 1;
1130 stm_msg.control_data = stm_control_data;
1131 err = diagfwd_write(peripheral, TYPE_CNTL, &stm_msg, msg_size);
1132 if (err && err != -ENODEV) {
1133 pr_err("diag: In %s, unable to write to socket, peripheral: %d, type: %d, len: %d, err: %d\n",
1134 __func__, peripheral, TYPE_CNTL,
1135 msg_size, err);
1136 }
1137
1138 return err;
1139}
1140
1141int diag_send_peripheral_drain_immediate(uint8_t peripheral)
1142{
1143 int err = 0;
1144 struct diag_ctrl_drain_immediate ctrl_pkt;
1145
1146 if (!driver->feature[peripheral].peripheral_buffering) {
1147 pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
1148 __func__, peripheral);
1149 return -EINVAL;
1150 }
1151
1152 if (!driver->diagfwd_cntl[peripheral] ||
1153 !driver->diagfwd_cntl[peripheral]->ch_open) {
1154 pr_debug("diag: In %s, control channel is not open, p: %d\n",
1155 __func__, peripheral);
1156 return -ENODEV;
1157 }
1158
1159 ctrl_pkt.pkt_id = DIAG_CTRL_MSG_PERIPHERAL_BUF_DRAIN_IMM;
1160 /* The length of the ctrl pkt is size of version and stream id */
1161 ctrl_pkt.len = sizeof(uint32_t) + sizeof(uint8_t);
1162 ctrl_pkt.version = 1;
1163 ctrl_pkt.stream_id = 1;
1164
1165 err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
1166 if (err && err != -ENODEV) {
1167 pr_err("diag: Unable to send drain immediate ctrl packet to peripheral %d, err: %d\n",
1168 peripheral, err);
1169 }
1170
1171 return err;
1172}
1173
1174int diag_send_buffering_tx_mode_pkt(uint8_t peripheral,
1175 struct diag_buffering_mode_t *params)
1176{
1177 int err = 0;
1178 struct diag_ctrl_peripheral_tx_mode ctrl_pkt;
1179
1180 if (!params)
1181 return -EIO;
1182
1183 if (peripheral >= NUM_PERIPHERALS)
1184 return -EINVAL;
1185
1186 if (!driver->feature[peripheral].peripheral_buffering) {
1187 pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
1188 __func__, peripheral);
1189 return -EINVAL;
1190 }
1191
1192 if (params->peripheral != peripheral)
1193 return -EINVAL;
1194
1195 switch (params->mode) {
1196 case DIAG_BUFFERING_MODE_STREAMING:
1197 case DIAG_BUFFERING_MODE_THRESHOLD:
1198 case DIAG_BUFFERING_MODE_CIRCULAR:
1199 break;
1200 default:
1201 pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
1202 params->mode);
1203 return -EINVAL;
1204 }
1205
1206 ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_TX_MODE;
1207 /* Control packet length is size of version, stream_id and tx_mode */
1208 ctrl_pkt.len = sizeof(uint32_t) + (2 * sizeof(uint8_t));
1209 ctrl_pkt.version = 1;
1210 ctrl_pkt.stream_id = 1;
1211 ctrl_pkt.tx_mode = params->mode;
1212
1213 err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt, sizeof(ctrl_pkt));
1214 if (err && err != -ENODEV) {
1215 pr_err("diag: Unable to send tx_mode ctrl packet to peripheral %d, err: %d\n",
1216 peripheral, err);
1217 goto fail;
1218 }
1219 driver->buffering_mode[peripheral].mode = params->mode;
1220
1221fail:
1222 return err;
1223}
1224
1225int diag_send_buffering_wm_values(uint8_t peripheral,
1226 struct diag_buffering_mode_t *params)
1227{
1228 int err = 0;
1229 struct diag_ctrl_set_wq_val ctrl_pkt;
1230
1231 if (!params)
1232 return -EIO;
1233
1234 if (peripheral >= NUM_PERIPHERALS)
1235 return -EINVAL;
1236
1237 if (!driver->feature[peripheral].peripheral_buffering) {
1238 pr_debug("diag: In %s, peripheral %d doesn't support buffering\n",
1239 __func__, peripheral);
1240 return -EINVAL;
1241 }
1242
1243 if (!driver->diagfwd_cntl[peripheral] ||
1244 !driver->diagfwd_cntl[peripheral]->ch_open) {
1245 pr_debug("diag: In %s, control channel is not open, p: %d\n",
1246 __func__, peripheral);
1247 return -ENODEV;
1248 }
1249
1250 if (params->peripheral != peripheral)
1251 return -EINVAL;
1252
1253 switch (params->mode) {
1254 case DIAG_BUFFERING_MODE_STREAMING:
1255 case DIAG_BUFFERING_MODE_THRESHOLD:
1256 case DIAG_BUFFERING_MODE_CIRCULAR:
1257 break;
1258 default:
1259 pr_err("diag: In %s, invalid tx mode: %d\n", __func__,
1260 params->mode);
1261 return -EINVAL;
1262 }
1263
1264 ctrl_pkt.pkt_id = DIAG_CTRL_MSG_CONFIG_PERIPHERAL_WMQ_VAL;
1265 /* Control packet length is size of version, stream_id and wmq values */
1266 ctrl_pkt.len = sizeof(uint32_t) + (3 * sizeof(uint8_t));
1267 ctrl_pkt.version = 1;
1268 ctrl_pkt.stream_id = 1;
1269 ctrl_pkt.high_wm_val = params->high_wm_val;
1270 ctrl_pkt.low_wm_val = params->low_wm_val;
1271
1272 err = diagfwd_write(peripheral, TYPE_CNTL, &ctrl_pkt,
1273 sizeof(ctrl_pkt));
1274 if (err && err != -ENODEV) {
1275 pr_err("diag: Unable to send watermark values to peripheral %d, err: %d\n",
1276 peripheral, err);
1277 }
1278
1279 return err;
1280}
1281
1282int diagfwd_cntl_init(void)
1283{
1284 uint8_t peripheral = 0;
1285
1286 reg_dirty = 0;
1287 driver->polling_reg_flag = 0;
1288 driver->log_on_demand_support = 1;
1289 driver->stm_peripheral = 0;
1290 driver->close_transport = 0;
1291 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++)
1292 driver->buffering_flag[peripheral] = 0;
1293
1294 mutex_init(&driver->cntl_lock);
1295 INIT_WORK(&(driver->stm_update_work), diag_stm_update_work_fn);
1296 INIT_WORK(&(driver->mask_update_work), diag_mask_update_work_fn);
1297 INIT_WORK(&(driver->close_transport_work),
1298 diag_close_transport_work_fn);
1299
1300 driver->cntl_wq = create_singlethread_workqueue("diag_cntl_wq");
1301 if (!driver->cntl_wq)
1302 return -ENOMEM;
1303
1304 return 0;
1305}
1306
1307void diagfwd_cntl_channel_init(void)
1308{
1309 uint8_t peripheral;
1310
1311 for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
1312 diagfwd_early_open(peripheral);
1313 diagfwd_open(peripheral, TYPE_CNTL);
1314 }
1315}
1316
1317void diagfwd_cntl_exit(void)
1318{
1319 if (driver->cntl_wq)
1320 destroy_workqueue(driver->cntl_wq);
1321}