blob: 07c8df060a534ca63b12a40e9f5f3ac8f41ab492 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28/**
29 * File: cds_sched.c
30 *
31 * DOC: CDS Scheduler Implementation
32 */
33
34 /* Include Files */
35#include <cds_mq.h>
36#include <cds_api.h>
37#include <ani_global.h>
38#include <sir_types.h>
39#include <cdf_types.h>
40#include <lim_api.h>
41#include <sme_api.h>
42#include <wlan_qct_sys.h>
43#include "cds_sched.h"
44#include <wlan_hdd_power.h>
45#include "wma_types.h"
46#include <linux/spinlock.h>
47#include <linux/kthread.h>
48#include <linux/cpu.h>
49#if defined(QCA_CONFIG_SMP) && defined(CONFIG_CNSS)
50#include <net/cnss.h>
51#endif
52/* Preprocessor Definitions and Constants */
53#define CDS_SCHED_THREAD_HEART_BEAT INFINITE
54/* Milli seconds to delay SSR thread when an Entry point is Active */
55#define SSR_WAIT_SLEEP_TIME 200
56/* MAX iteration count to wait for Entry point to exit before
57 * we proceed with SSR in WD Thread
58 */
59#define MAX_SSR_WAIT_ITERATIONS 200
60#define MAX_SSR_PROTECT_LOG (16)
61
62static atomic_t ssr_protect_entry_count;
63
64/**
65 * struct ssr_protect - sub system restart(ssr) protection tracking table
66 * @func: Function which needs ssr protection
67 * @free: Flag to tell whether entry is free in table or not
68 * @pid: Process id which needs ssr protection
69 */
70struct ssr_protect {
71 const char *func;
72 bool free;
73 uint32_t pid;
74};
75
76static spinlock_t ssr_protect_lock;
77static struct ssr_protect ssr_protect_log[MAX_SSR_PROTECT_LOG];
78
79static p_cds_sched_context gp_cds_sched_context;
80
81static int cds_mc_thread(void *Arg);
82#ifdef QCA_CONFIG_SMP
83static int cds_ol_rx_thread(void *arg);
84static unsigned long affine_cpu;
85static CDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext);
86#endif
87
88#ifdef QCA_CONFIG_SMP
89#define CDS_CORE_PER_CLUSTER (4)
90static int cds_set_cpus_allowed_ptr(struct task_struct *task, unsigned long cpu)
91{
92#ifdef WLAN_OPEN_SOURCE
93 return set_cpus_allowed_ptr(task, cpumask_of(cpu));
94#elif defined(CONFIG_CNSS)
95 return cnss_set_cpus_allowed_ptr(task, cpu);
96#else
97 return 0;
98#endif
99}
100
101/**
102 * cds_cpu_hotplug_notify() - hot plug notify
103 * @block: Pointer to block
104 * @state: State
105 * @hcpu: Pointer to hotplug cpu
106 *
107 * Return: NOTIFY_OK
108 */
109static int
110cds_cpu_hotplug_notify(struct notifier_block *block,
111 unsigned long state, void *hcpu)
112{
113 unsigned long cpu = (unsigned long)hcpu;
114 unsigned long pref_cpu = 0;
115 p_cds_sched_context pSchedContext = get_cds_sched_ctxt();
116 int i;
117 unsigned int multi_cluster;
118 unsigned int num_cpus;
119
120 if ((NULL == pSchedContext) || (NULL == pSchedContext->ol_rx_thread))
121 return NOTIFY_OK;
122
123 if (cds_is_load_unload_in_progress())
124 return NOTIFY_OK;
125
126 num_cpus = num_possible_cpus();
127 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_LOW,
128 "%s: RX CORE %d, STATE %d, NUM CPUS %d",
129 __func__, (int)affine_cpu, (int)state, num_cpus);
130 multi_cluster = (num_cpus > CDS_CORE_PER_CLUSTER) ? 1 : 0;
131
132 switch (state) {
133 case CPU_ONLINE:
134 if ((!multi_cluster) && (affine_cpu != 0))
135 return NOTIFY_OK;
136
137 for_each_online_cpu(i) {
138 if (i == 0)
139 continue;
140 pref_cpu = i;
141 if (!multi_cluster)
142 break;
143 }
144 break;
145 case CPU_DEAD:
146 if (cpu != affine_cpu)
147 return NOTIFY_OK;
148
149 affine_cpu = 0;
150 for_each_online_cpu(i) {
151 if (i == 0)
152 continue;
153 pref_cpu = i;
154 if (!multi_cluster)
155 break;
156 }
157 }
158
159 if (pref_cpu == 0)
160 return NOTIFY_OK;
161
162 if (!cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
163 affine_cpu = pref_cpu;
164
165 return NOTIFY_OK;
166}
167
168static struct notifier_block cds_cpu_hotplug_notifier = {
169 .notifier_call = cds_cpu_hotplug_notify,
170};
171#endif
172
173/**
174 * cds_sched_open() - initialize the CDS Scheduler
175 * @p_cds_context: Pointer to the global CDS Context
176 * @pSchedContext: Pointer to a previously allocated buffer big
177 * enough to hold a scheduler context.
178 * @SchedCtxSize: CDS scheduler context size
179 *
180 * This function initializes the CDS Scheduler
181 * Upon successful initialization:
182 * - All the message queues are initialized
183 * - The Main Controller thread is created and ready to receive and
184 * dispatch messages.
185 *
186 *
187 * Return: CDF status
188 */
189CDF_STATUS cds_sched_open(void *p_cds_context,
190 p_cds_sched_context pSchedContext,
191 uint32_t SchedCtxSize)
192{
193 CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
194 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
195 "%s: Opening the CDS Scheduler", __func__);
196 /* Sanity checks */
197 if ((p_cds_context == NULL) || (pSchedContext == NULL)) {
198 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
199 "%s: Null params being passed", __func__);
200 return CDF_STATUS_E_FAILURE;
201 }
202 if (sizeof(cds_sched_context) != SchedCtxSize) {
203 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
204 "%s: Incorrect CDS Sched Context size passed",
205 __func__);
206 return CDF_STATUS_E_INVAL;
207 }
208 cdf_mem_zero(pSchedContext, sizeof(cds_sched_context));
209 pSchedContext->pVContext = p_cds_context;
210 vStatus = cds_sched_init_mqs(pSchedContext);
211 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
212 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
213 "%s: Failed to initialize CDS Scheduler MQs",
214 __func__);
215 return vStatus;
216 }
217 /* Initialize the helper events and event queues */
218 init_completion(&pSchedContext->McStartEvent);
219 init_completion(&pSchedContext->McShutdown);
220 init_completion(&pSchedContext->ResumeMcEvent);
221
222 spin_lock_init(&pSchedContext->McThreadLock);
223#ifdef QCA_CONFIG_SMP
224 spin_lock_init(&pSchedContext->ol_rx_thread_lock);
225#endif
226
227 init_waitqueue_head(&pSchedContext->mcWaitQueue);
228 pSchedContext->mcEventFlag = 0;
229
230#ifdef QCA_CONFIG_SMP
231 init_waitqueue_head(&pSchedContext->ol_rx_wait_queue);
232 init_completion(&pSchedContext->ol_rx_start_event);
233 init_completion(&pSchedContext->ol_suspend_rx_event);
234 init_completion(&pSchedContext->ol_resume_rx_event);
235 init_completion(&pSchedContext->ol_rx_shutdown);
236 pSchedContext->ol_rx_event_flag = 0;
237 spin_lock_init(&pSchedContext->ol_rx_queue_lock);
238 spin_lock_init(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
239 INIT_LIST_HEAD(&pSchedContext->ol_rx_thread_queue);
240 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
241 INIT_LIST_HEAD(&pSchedContext->cds_ol_rx_pkt_freeq);
242 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
243 if (cds_alloc_ol_rx_pkt_freeq(pSchedContext) != CDF_STATUS_SUCCESS) {
244 return CDF_STATUS_E_FAILURE;
245 }
246 register_hotcpu_notifier(&cds_cpu_hotplug_notifier);
247 pSchedContext->cpu_hot_plug_notifier = &cds_cpu_hotplug_notifier;
248#endif
249 gp_cds_sched_context = pSchedContext;
250
251 /* Create the CDS Main Controller thread */
252 pSchedContext->McThread = kthread_create(cds_mc_thread, pSchedContext,
253 "cds_mc_thread");
254 if (IS_ERR(pSchedContext->McThread)) {
255 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
256 "%s: Could not Create CDS Main Thread Controller",
257 __func__);
258 goto MC_THREAD_START_FAILURE;
259 }
260 wake_up_process(pSchedContext->McThread);
261 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
262 "%s: CDS Main Controller thread Created", __func__);
263
264#ifdef QCA_CONFIG_SMP
265 pSchedContext->ol_rx_thread = kthread_create(cds_ol_rx_thread,
266 pSchedContext,
267 "cds_ol_rx_thread");
268 if (IS_ERR(pSchedContext->ol_rx_thread)) {
269
270 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_FATAL,
271 "%s: Could not Create CDS OL RX Thread",
272 __func__);
273 goto OL_RX_THREAD_START_FAILURE;
274
275 }
276 wake_up_process(pSchedContext->ol_rx_thread);
277 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
278 ("CDS OL RX thread Created"));
279#endif
280 /*
281 * Now make sure all threads have started before we exit.
282 * Each thread should normally ACK back when it starts.
283 */
284 wait_for_completion_interruptible(&pSchedContext->McStartEvent);
285 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
286 "%s: CDS MC Thread has started", __func__);
287#ifdef QCA_CONFIG_SMP
288 wait_for_completion_interruptible(&pSchedContext->ol_rx_start_event);
289 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
290 "%s: CDS OL Rx Thread has started", __func__);
291#endif
292 /* We're good now: Let's get the ball rolling!!! */
293 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
294 "%s: CDS Scheduler successfully Opened", __func__);
295 return CDF_STATUS_SUCCESS;
296
297#ifdef QCA_CONFIG_SMP
298OL_RX_THREAD_START_FAILURE:
299 /* Try and force the Main thread controller to exit */
300 set_bit(MC_SHUTDOWN_EVENT_MASK, &pSchedContext->mcEventFlag);
301 set_bit(MC_POST_EVENT_MASK, &pSchedContext->mcEventFlag);
302 wake_up_interruptible(&pSchedContext->mcWaitQueue);
303 /* Wait for MC to exit */
304 wait_for_completion_interruptible(&pSchedContext->McShutdown);
305#endif
306
307MC_THREAD_START_FAILURE:
308 /* De-initialize all the message queues */
309 cds_sched_deinit_mqs(pSchedContext);
310
311#ifdef QCA_CONFIG_SMP
312 unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
313 cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
314#endif
315
316 return CDF_STATUS_E_RESOURCES;
317
318} /* cds_sched_open() */
319
320/**
321 * cds_mc_thread() - cds main controller thread execution handler
322 * @Arg: Pointer to the global CDS Sched Context
323 *
324 * Return: thread exit code
325 */
326static int cds_mc_thread(void *Arg)
327{
328 p_cds_sched_context pSchedContext = (p_cds_sched_context) Arg;
329 p_cds_msg_wrapper pMsgWrapper = NULL;
330 tpAniSirGlobal pMacContext = NULL;
331 tSirRetStatus macStatus = eSIR_SUCCESS;
332 CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
333 int retWaitStatus = 0;
334 bool shutdown = false;
335 hdd_context_t *pHddCtx = NULL;
336 v_CONTEXT_t p_cds_context = NULL;
337
338 if (Arg == NULL) {
339 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
340 "%s: Bad Args passed", __func__);
341 return 0;
342 }
343 set_user_nice(current, -2);
344
345#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
346 daemonize("MC_Thread");
347#endif
348
349 /* Ack back to the context from which the main controller thread
350 * has been created
351 */
352 complete(&pSchedContext->McStartEvent);
353 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
354 "%s: MC Thread %d (%s) starting up", __func__, current->pid,
355 current->comm);
356
357 /* Get the Global CDS Context */
358 p_cds_context = cds_get_global_context();
359 if (!p_cds_context) {
360 hddLog(CDF_TRACE_LEVEL_FATAL, "%s: Global CDS context is Null",
361 __func__);
362 return 0;
363 }
364
365 pHddCtx = cds_get_context(CDF_MODULE_ID_HDD);
366 if (!pHddCtx) {
367 hddLog(CDF_TRACE_LEVEL_FATAL, "%s: HDD context is Null",
368 __func__);
369 return 0;
370 }
371
372 while (!shutdown) {
373 /* This implements the execution model algorithm */
374 retWaitStatus =
375 wait_event_interruptible(pSchedContext->mcWaitQueue,
376 test_bit(MC_POST_EVENT_MASK,
377 &pSchedContext->mcEventFlag)
378 || test_bit(MC_SUSPEND_EVENT_MASK,
379 &pSchedContext->mcEventFlag));
380
381 if (retWaitStatus == -ERESTARTSYS) {
382 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
383 "%s: wait_event_interruptible returned -ERESTARTSYS",
384 __func__);
385 CDF_BUG(0);
386 }
387 clear_bit(MC_POST_EVENT_MASK, &pSchedContext->mcEventFlag);
388
389 while (1) {
390 /* Check if MC needs to shutdown */
391 if (test_bit
392 (MC_SHUTDOWN_EVENT_MASK,
393 &pSchedContext->mcEventFlag)) {
394 CDF_TRACE(CDF_MODULE_ID_CDF,
395 CDF_TRACE_LEVEL_INFO,
396 "%s: MC thread signaled to shutdown",
397 __func__);
398 shutdown = true;
399 /* Check for any Suspend Indication */
400 if (test_bit
401 (MC_SUSPEND_EVENT_MASK,
402 &pSchedContext->mcEventFlag)) {
403 clear_bit(MC_SUSPEND_EVENT_MASK,
404 &pSchedContext->mcEventFlag);
405
406 /* Unblock anyone waiting on suspend */
407 complete(&pHddCtx->mc_sus_event_var);
408 }
409 break;
410 }
411 /* Check the SYS queue first */
412 if (!cds_is_mq_empty(&pSchedContext->sysMcMq)) {
413 /* Service the SYS message queue */
414 CDF_TRACE(CDF_MODULE_ID_CDF,
415 CDF_TRACE_LEVEL_INFO,
416 "%s: Servicing the CDS SYS MC Message queue",
417 __func__);
418 pMsgWrapper =
419 cds_mq_get(&pSchedContext->sysMcMq);
420 if (pMsgWrapper == NULL) {
421 CDF_TRACE(CDF_MODULE_ID_CDF,
422 CDF_TRACE_LEVEL_ERROR,
423 "%s: pMsgWrapper is NULL",
424 __func__);
425 CDF_ASSERT(0);
426 break;
427 }
428 vStatus =
429 sys_mc_process_msg(pSchedContext->pVContext,
430 pMsgWrapper->pVosMsg);
431 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
432 CDF_TRACE(CDF_MODULE_ID_CDF,
433 CDF_TRACE_LEVEL_ERROR,
434 "%s: Issue Processing SYS message",
435 __func__);
436 }
437 /* return message to the Core */
438 cds_core_return_msg(pSchedContext->pVContext,
439 pMsgWrapper);
440 continue;
441 }
442 /* Check the WMA queue */
443 if (!cds_is_mq_empty(&pSchedContext->wmaMcMq)) {
444 /* Service the WMA message queue */
445 CDF_TRACE(CDF_MODULE_ID_CDF,
446 CDF_TRACE_LEVEL_INFO,
447 "%s: Servicing the CDS WMA MC Message queue",
448 __func__);
449 pMsgWrapper =
450 cds_mq_get(&pSchedContext->wmaMcMq);
451 if (pMsgWrapper == NULL) {
452 CDF_TRACE(CDF_MODULE_ID_CDF,
453 CDF_TRACE_LEVEL_ERROR,
454 "%s: pMsgWrapper is NULL",
455 __func__);
456 CDF_ASSERT(0);
457 break;
458 }
459 vStatus =
460 wma_mc_process_msg(pSchedContext->pVContext,
461 pMsgWrapper->pVosMsg);
462 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
463 CDF_TRACE(CDF_MODULE_ID_CDF,
464 CDF_TRACE_LEVEL_ERROR,
465 "%s: Issue Processing WMA message",
466 __func__);
467 }
468 /* return message to the Core */
469 cds_core_return_msg(pSchedContext->pVContext,
470 pMsgWrapper);
471 continue;
472 }
473 /* Check the PE queue */
474 if (!cds_is_mq_empty(&pSchedContext->peMcMq)) {
475 /* Service the PE message queue */
476 CDF_TRACE(CDF_MODULE_ID_CDF,
477 CDF_TRACE_LEVEL_INFO,
478 "%s: Servicing the CDS PE MC Message queue",
479 __func__);
480 pMsgWrapper =
481 cds_mq_get(&pSchedContext->peMcMq);
482 if (NULL == pMsgWrapper) {
483 CDF_TRACE(CDF_MODULE_ID_CDF,
484 CDF_TRACE_LEVEL_ERROR,
485 "%s: pMsgWrapper is NULL",
486 __func__);
487 CDF_ASSERT(0);
488 break;
489 }
490
491 /* Need some optimization */
492 pMacContext =
493 cds_get_context(CDF_MODULE_ID_PE);
494 if (NULL == pMacContext) {
495 CDF_TRACE(CDF_MODULE_ID_CDF,
496 CDF_TRACE_LEVEL_INFO,
497 "MAC Context not ready yet");
498 cds_core_return_msg
499 (pSchedContext->pVContext,
500 pMsgWrapper);
501 continue;
502 }
503
504 macStatus =
505 pe_process_messages(pMacContext,
506 (tSirMsgQ *)
507 pMsgWrapper->pVosMsg);
508 if (eSIR_SUCCESS != macStatus) {
509 CDF_TRACE(CDF_MODULE_ID_CDF,
510 CDF_TRACE_LEVEL_ERROR,
511 "%s: Issue Processing PE message",
512 __func__);
513 }
514 /* return message to the Core */
515 cds_core_return_msg(pSchedContext->pVContext,
516 pMsgWrapper);
517 continue;
518 }
519 /** Check the SME queue **/
520 if (!cds_is_mq_empty(&pSchedContext->smeMcMq)) {
521 /* Service the SME message queue */
522 CDF_TRACE(CDF_MODULE_ID_CDF,
523 CDF_TRACE_LEVEL_INFO,
524 "%s: Servicing the CDS SME MC Message queue",
525 __func__);
526 pMsgWrapper =
527 cds_mq_get(&pSchedContext->smeMcMq);
528 if (NULL == pMsgWrapper) {
529 CDF_TRACE(CDF_MODULE_ID_CDF,
530 CDF_TRACE_LEVEL_ERROR,
531 "%s: pMsgWrapper is NULL",
532 __func__);
533 CDF_ASSERT(0);
534 break;
535 }
536
537 /* Need some optimization */
538 pMacContext =
539 cds_get_context(CDF_MODULE_ID_SME);
540 if (NULL == pMacContext) {
541 CDF_TRACE(CDF_MODULE_ID_CDF,
542 CDF_TRACE_LEVEL_INFO,
543 "MAC Context not ready yet");
544 cds_core_return_msg
545 (pSchedContext->pVContext,
546 pMsgWrapper);
547 continue;
548 }
549
550 vStatus =
551 sme_process_msg((tHalHandle) pMacContext,
552 pMsgWrapper->pVosMsg);
553 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
554 CDF_TRACE(CDF_MODULE_ID_CDF,
555 CDF_TRACE_LEVEL_ERROR,
556 "%s: Issue Processing SME message",
557 __func__);
558 }
559 /* return message to the Core */
560 cds_core_return_msg(pSchedContext->pVContext,
561 pMsgWrapper);
562 continue;
563 }
564 /* Check for any Suspend Indication */
565 if (test_bit
566 (MC_SUSPEND_EVENT_MASK,
567 &pSchedContext->mcEventFlag)) {
568 clear_bit(MC_SUSPEND_EVENT_MASK,
569 &pSchedContext->mcEventFlag);
570 spin_lock(&pSchedContext->McThreadLock);
571
572 /* Mc Thread Suspended */
573 complete(&pHddCtx->mc_sus_event_var);
574
575 INIT_COMPLETION(pSchedContext->ResumeMcEvent);
576 spin_unlock(&pSchedContext->McThreadLock);
577
578 /* Wait foe Resume Indication */
579 wait_for_completion_interruptible
580 (&pSchedContext->ResumeMcEvent);
581 }
582 break; /* All queues are empty now */
583 } /* while message loop processing */
584 } /* while true */
585 /* If we get here the MC thread must exit */
586 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
587 "%s: MC Thread exiting!!!!", __func__);
588 complete_and_exit(&pSchedContext->McShutdown, 0);
589} /* cds_mc_thread() */
590
591#ifdef QCA_CONFIG_SMP
592/**
593 * cds_free_ol_rx_pkt_freeq() - free cds buffer free queue
594 * @pSchedContext - pointer to the global CDS Sched Context
595 *
596 * This API does mem free of the buffers available in free cds buffer
597 * queue which is used for Data rx processing.
598 *
599 * Return: none
600 */
601void cds_free_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
602{
603 struct cds_ol_rx_pkt *pkt, *tmp;
604
605 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
606 list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
607 list) {
608 list_del(&pkt->list);
609 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
610 cdf_mem_free(pkt);
611 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
612 }
613 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
614}
615
616/**
617 * cds_alloc_ol_rx_pkt_freeq() - Function to allocate free buffer queue
618 * @pSchedContext - pointer to the global CDS Sched Context
619 *
620 * This API allocates CDS_MAX_OL_RX_PKT number of cds message buffers
621 * which are used for Rx data processing.
622 *
623 * Return: status of memory allocation
624 */
625static CDF_STATUS cds_alloc_ol_rx_pkt_freeq(p_cds_sched_context pSchedContext)
626{
627 struct cds_ol_rx_pkt *pkt, *tmp;
628 int i;
629
630 for (i = 0; i < CDS_MAX_OL_RX_PKT; i++) {
631 pkt = cdf_mem_malloc(sizeof(*pkt));
632 if (!pkt) {
633 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
634 "%s Vos packet allocation for ol rx thread failed",
635 __func__);
636 goto free;
637 }
638 memset(pkt, 0, sizeof(*pkt));
639 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
640 list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
641 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
642 }
643
644 return CDF_STATUS_SUCCESS;
645
646free:
647 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
648 list_for_each_entry_safe(pkt, tmp, &pSchedContext->cds_ol_rx_pkt_freeq,
649 list) {
650 list_del(&pkt->list);
651 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
652 cdf_mem_free(pkt);
653 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
654 }
655 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
656 return CDF_STATUS_E_NOMEM;
657}
658
659/**
660 * cds_free_ol_rx_pkt() - api to release cds message to the freeq
661 * This api returns the cds message used for Rx data to the free queue
662 * @pSchedContext: Pointer to the global CDS Sched Context
663 * @pkt: CDS message buffer to be returned to free queue.
664 *
665 * Return: none
666 */
667void
668cds_free_ol_rx_pkt(p_cds_sched_context pSchedContext,
669 struct cds_ol_rx_pkt *pkt)
670{
671 memset(pkt, 0, sizeof(*pkt));
672 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
673 list_add_tail(&pkt->list, &pSchedContext->cds_ol_rx_pkt_freeq);
674 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
675}
676
677/**
678 * cds_alloc_ol_rx_pkt() - API to return next available cds message
679 * @pSchedContext: Pointer to the global CDS Sched Context
680 *
681 * This api returns next available cds message buffer used for rx data
682 * processing
683 *
684 * Return: Pointer to cds message buffer
685 */
686struct cds_ol_rx_pkt *cds_alloc_ol_rx_pkt(p_cds_sched_context pSchedContext)
687{
688 struct cds_ol_rx_pkt *pkt;
689
690 spin_lock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
691 if (list_empty(&pSchedContext->cds_ol_rx_pkt_freeq)) {
692 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
693 return NULL;
694 }
695 pkt = list_first_entry(&pSchedContext->cds_ol_rx_pkt_freeq,
696 struct cds_ol_rx_pkt, list);
697 list_del(&pkt->list);
698 spin_unlock_bh(&pSchedContext->cds_ol_rx_pkt_freeq_lock);
699 return pkt;
700}
701
702/**
703 * cds_indicate_rxpkt() - indicate rx data packet
704 * @Arg: Pointer to the global CDS Sched Context
705 * @pkt: CDS data message buffer
706 *
707 * This api enqueues the rx packet into ol_rx_thread_queue and notifies
708 * cds_ol_rx_thread()
709 *
710 * Return: none
711 */
712void
713cds_indicate_rxpkt(p_cds_sched_context pSchedContext,
714 struct cds_ol_rx_pkt *pkt)
715{
716 spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
717 list_add_tail(&pkt->list, &pSchedContext->ol_rx_thread_queue);
718 spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
719 set_bit(RX_POST_EVENT_MASK, &pSchedContext->ol_rx_event_flag);
720 wake_up_interruptible(&pSchedContext->ol_rx_wait_queue);
721}
722
723/**
724 * cds_drop_rxpkt_by_staid() - api to drop pending rx packets for a sta
725 * @pSchedContext: Pointer to the global CDS Sched Context
726 * @staId: Station Id
727 *
728 * This api drops queued packets for a station, to drop all the pending
729 * packets the caller has to send WLAN_MAX_STA_COUNT as staId.
730 *
731 * Return: none
732 */
733void cds_drop_rxpkt_by_staid(p_cds_sched_context pSchedContext, uint16_t staId)
734{
735 struct list_head local_list;
736 struct cds_ol_rx_pkt *pkt, *tmp;
737 cdf_nbuf_t buf, next_buf;
738
739 INIT_LIST_HEAD(&local_list);
740 spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
741 if (list_empty(&pSchedContext->ol_rx_thread_queue)) {
742 spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
743 return;
744 }
745 list_for_each_entry_safe(pkt, tmp, &pSchedContext->ol_rx_thread_queue,
746 list) {
747 if (pkt->staId == staId || staId == WLAN_MAX_STA_COUNT)
748 list_move_tail(&pkt->list, &local_list);
749 }
750 spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
751
752 list_for_each_entry(pkt, &local_list, list) {
753 list_del(&pkt->list);
754 buf = pkt->Rxpkt;
755 while (buf) {
756 next_buf = cdf_nbuf_queue_next(buf);
757 cdf_nbuf_free(buf);
758 buf = next_buf;
759 }
760 cds_free_ol_rx_pkt(pSchedContext, pkt);
761 }
762}
763
764/**
765 * cds_rx_from_queue() - function to process pending Rx packets
766 * @pSchedContext: Pointer to the global CDS Sched Context
767 *
768 * This api traverses the pending buffer list and calling the callback.
769 * This callback would essentially send the packet to HDD.
770 *
771 * Return: none
772 */
773static void cds_rx_from_queue(p_cds_sched_context pSchedContext)
774{
775 struct cds_ol_rx_pkt *pkt;
776 uint16_t sta_id;
777
778 spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
779 while (!list_empty(&pSchedContext->ol_rx_thread_queue)) {
780 pkt = list_first_entry(&pSchedContext->ol_rx_thread_queue,
781 struct cds_ol_rx_pkt, list);
782 list_del(&pkt->list);
783 spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
784 sta_id = pkt->staId;
785 pkt->callback(pkt->context, pkt->Rxpkt, sta_id);
786 cds_free_ol_rx_pkt(pSchedContext, pkt);
787 spin_lock_bh(&pSchedContext->ol_rx_queue_lock);
788 }
789 spin_unlock_bh(&pSchedContext->ol_rx_queue_lock);
790}
791
792/**
793 * cds_ol_rx_thread() - cds main tlshim rx thread
794 * @Arg: pointer to the global CDS Sched Context
795 *
796 * This api is the thread handler for Tlshim Data packet processing.
797 *
798 * Return: thread exit code
799 */
800static int cds_ol_rx_thread(void *arg)
801{
802 p_cds_sched_context pSchedContext = (p_cds_sched_context) arg;
803 unsigned long pref_cpu = 0;
804 bool shutdown = false;
805 int status, i;
806 unsigned int num_cpus;
807
808 set_user_nice(current, -1);
809#ifdef MSM_PLATFORM
810 set_wake_up_idle(true);
811#endif
812
813 num_cpus = num_possible_cpus();
814 /* Find the available cpu core other than cpu 0 and
815 * bind the thread
816 */
817 for_each_online_cpu(i) {
818 if (i == 0)
819 continue;
820 pref_cpu = i;
821 if (num_cpus <= CDS_CORE_PER_CLUSTER)
822 break;
823 }
824 if (pref_cpu != 0 && (!cds_set_cpus_allowed_ptr(current, pref_cpu)))
825 affine_cpu = pref_cpu;
826
827 if (!arg) {
828 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
829 "%s: Bad Args passed", __func__);
830 return 0;
831 }
832
833 complete(&pSchedContext->ol_rx_start_event);
834
835 while (!shutdown) {
836 status =
837 wait_event_interruptible(pSchedContext->ol_rx_wait_queue,
838 test_bit(RX_POST_EVENT_MASK,
839 &pSchedContext->ol_rx_event_flag)
840 || test_bit(RX_SUSPEND_EVENT_MASK,
841 &pSchedContext->ol_rx_event_flag));
842 if (status == -ERESTARTSYS)
843 break;
844
845 clear_bit(RX_POST_EVENT_MASK, &pSchedContext->ol_rx_event_flag);
846 while (true) {
847 if (test_bit(RX_SHUTDOWN_EVENT_MASK,
848 &pSchedContext->ol_rx_event_flag)) {
849 clear_bit(RX_SHUTDOWN_EVENT_MASK,
850 &pSchedContext->ol_rx_event_flag);
851 if (test_bit(RX_SUSPEND_EVENT_MASK,
852 &pSchedContext->ol_rx_event_flag)) {
853 clear_bit(RX_SUSPEND_EVENT_MASK,
854 &pSchedContext->ol_rx_event_flag);
855 complete
856 (&pSchedContext->ol_suspend_rx_event);
857 }
858 CDF_TRACE(CDF_MODULE_ID_CDF,
859 CDF_TRACE_LEVEL_INFO,
860 "%s: Shutting down OL RX Thread",
861 __func__);
862 shutdown = true;
863 break;
864 }
865 cds_rx_from_queue(pSchedContext);
866
867 if (test_bit(RX_SUSPEND_EVENT_MASK,
868 &pSchedContext->ol_rx_event_flag)) {
869 clear_bit(RX_SUSPEND_EVENT_MASK,
870 &pSchedContext->ol_rx_event_flag);
871 spin_lock(&pSchedContext->ol_rx_thread_lock);
872 complete(&pSchedContext->ol_suspend_rx_event);
873 INIT_COMPLETION
874 (pSchedContext->ol_resume_rx_event);
875 spin_unlock(&pSchedContext->ol_rx_thread_lock);
876 wait_for_completion_interruptible
877 (&pSchedContext->ol_resume_rx_event);
878 }
879 break;
880 }
881 }
882
883 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
884 "%s: Exiting CDS OL rx thread", __func__);
885 complete_and_exit(&pSchedContext->ol_rx_shutdown, 0);
886}
887#endif
888
889/**
890 * cds_sched_close() - close the cds scheduler
891 * @p_cds_context: Pointer to the global CDS Context
892 *
893 * This api closes the CDS Scheduler upon successful closing:
894 * - All the message queues are flushed
895 * - The Main Controller thread is closed
896 * - The Tx thread is closed
897 *
898 *
899 * Return: cdf status
900 */
901CDF_STATUS cds_sched_close(void *p_cds_context)
902{
903 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
904 "%s: invoked", __func__);
905 if (gp_cds_sched_context == NULL) {
906 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
907 "%s: gp_cds_sched_context == NULL", __func__);
908 return CDF_STATUS_E_FAILURE;
909 }
910 /* shut down MC Thread */
911 set_bit(MC_SHUTDOWN_EVENT_MASK, &gp_cds_sched_context->mcEventFlag);
912 set_bit(MC_POST_EVENT_MASK, &gp_cds_sched_context->mcEventFlag);
913 wake_up_interruptible(&gp_cds_sched_context->mcWaitQueue);
914 /* Wait for MC to exit */
915 wait_for_completion(&gp_cds_sched_context->McShutdown);
916 gp_cds_sched_context->McThread = 0;
917
918 /* Clean up message queues of MC thread */
919 cds_sched_flush_mc_mqs(gp_cds_sched_context);
920
921 /* Deinit all the queues */
922 cds_sched_deinit_mqs(gp_cds_sched_context);
923
924#ifdef QCA_CONFIG_SMP
925 /* Shut down Tlshim Rx thread */
926 set_bit(RX_SHUTDOWN_EVENT_MASK, &gp_cds_sched_context->ol_rx_event_flag);
927 set_bit(RX_POST_EVENT_MASK, &gp_cds_sched_context->ol_rx_event_flag);
928 wake_up_interruptible(&gp_cds_sched_context->ol_rx_wait_queue);
929 wait_for_completion_interruptible
930 (&gp_cds_sched_context->ol_rx_shutdown);
931 gp_cds_sched_context->ol_rx_thread = NULL;
932 cds_drop_rxpkt_by_staid(gp_cds_sched_context, WLAN_MAX_STA_COUNT);
933 cds_free_ol_rx_pkt_freeq(gp_cds_sched_context);
934 unregister_hotcpu_notifier(&cds_cpu_hotplug_notifier);
935#endif
936 return CDF_STATUS_SUCCESS;
937} /* cds_sched_close() */
938
939/**
940 * cds_sched_init_mqs() - initialize the cds scheduler message queues
941 * @p_cds_sched_context: Pointer to the Scheduler Context.
942 *
943 * This api initializes the cds scheduler message queues.
944 *
945 * Return: CDF status
946 */
947CDF_STATUS cds_sched_init_mqs(p_cds_sched_context pSchedContext)
948{
949 CDF_STATUS vStatus = CDF_STATUS_SUCCESS;
950 /* Now intialize all the message queues */
951 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
952 "%s: Initializing the WMA MC Message queue", __func__);
953 vStatus = cds_mq_init(&pSchedContext->wmaMcMq);
954 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
955 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
956 "%s: Failed to init WMA MC Message queue", __func__);
957 CDF_ASSERT(0);
958 return vStatus;
959 }
960 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
961 "%s: Initializing the PE MC Message queue", __func__);
962 vStatus = cds_mq_init(&pSchedContext->peMcMq);
963 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
964 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
965 "%s: Failed to init PE MC Message queue", __func__);
966 CDF_ASSERT(0);
967 return vStatus;
968 }
969 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
970 "%s: Initializing the SME MC Message queue", __func__);
971 vStatus = cds_mq_init(&pSchedContext->smeMcMq);
972 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
973 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
974 "%s: Failed to init SME MC Message queue", __func__);
975 CDF_ASSERT(0);
976 return vStatus;
977 }
978 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
979 "%s: Initializing the SYS MC Message queue", __func__);
980 vStatus = cds_mq_init(&pSchedContext->sysMcMq);
981 if (!CDF_IS_STATUS_SUCCESS(vStatus)) {
982 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
983 "%s: Failed to init SYS MC Message queue", __func__);
984 CDF_ASSERT(0);
985 return vStatus;
986 }
987
988 return CDF_STATUS_SUCCESS;
989} /* cds_sched_init_mqs() */
990
991/**
992 * cds_sched_deinit_mqs() - Deinitialize the cds scheduler message queues
993 * @p_cds_sched_context: Pointer to the Scheduler Context.
994 *
995 * Return: none
996 */
997void cds_sched_deinit_mqs(p_cds_sched_context pSchedContext)
998{
999 /* Now de-intialize all message queues */
1000
1001 /* MC WMA */
1002 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
1003 "%s De-Initializing the WMA MC Message queue", __func__);
1004 cds_mq_deinit(&pSchedContext->wmaMcMq);
1005 /* MC PE */
1006 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
1007 "%s De-Initializing the PE MC Message queue", __func__);
1008 cds_mq_deinit(&pSchedContext->peMcMq);
1009 /* MC SME */
1010 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
1011 "%s De-Initializing the SME MC Message queue", __func__);
1012 cds_mq_deinit(&pSchedContext->smeMcMq);
1013 /* MC SYS */
1014 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO_HIGH,
1015 "%s De-Initializing the SYS MC Message queue", __func__);
1016 cds_mq_deinit(&pSchedContext->sysMcMq);
1017
1018} /* cds_sched_deinit_mqs() */
1019
1020/**
1021 * cds_sched_flush_mc_mqs() - flush all the MC thread message queues
1022 * @pSchedContext: Pointer to global cds context
1023 *
1024 * Return: none
1025 */
1026void cds_sched_flush_mc_mqs(p_cds_sched_context pSchedContext)
1027{
1028 p_cds_msg_wrapper pMsgWrapper = NULL;
1029 p_cds_contextType cds_ctx;
1030
1031 /* Here each of the MC thread MQ shall be drained and returned to the
1032 * Core. Before returning a wrapper to the Core, the CDS message shall
1033 * be freed first
1034 */
1035 CDF_TRACE(CDF_MODULE_ID_CDF,
1036 CDF_TRACE_LEVEL_INFO,
1037 ("Flushing the MC Thread message queue"));
1038
1039 if (NULL == pSchedContext) {
1040 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
1041 "%s: pSchedContext is NULL", __func__);
1042 return;
1043 }
1044
1045 cds_ctx = (p_cds_contextType) (pSchedContext->pVContext);
1046 if (NULL == cds_ctx) {
1047 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
1048 "%s: cds_ctx is NULL", __func__);
1049 return;
1050 }
1051
1052 /* Flush the SYS Mq */
1053 while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->sysMcMq))) {
1054 CDF_TRACE(CDF_MODULE_ID_CDF,
1055 CDF_TRACE_LEVEL_INFO,
1056 "%s: Freeing MC SYS message type %d ", __func__,
1057 pMsgWrapper->pVosMsg->type);
1058 cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
1059 }
1060 /* Flush the WMA Mq */
1061 while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->wmaMcMq))) {
1062 if (pMsgWrapper->pVosMsg != NULL) {
1063 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_INFO,
1064 "%s: Freeing MC WMA MSG message type %d",
1065 __func__, pMsgWrapper->pVosMsg->type);
1066 if (pMsgWrapper->pVosMsg->bodyptr) {
1067 cdf_mem_free((void *)pMsgWrapper->
1068 pVosMsg->bodyptr);
1069 }
1070
1071 pMsgWrapper->pVosMsg->bodyptr = NULL;
1072 pMsgWrapper->pVosMsg->bodyval = 0;
1073 pMsgWrapper->pVosMsg->type = 0;
1074 }
1075 cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
1076 }
1077
1078 /* Flush the PE Mq */
1079 while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->peMcMq))) {
1080 CDF_TRACE(CDF_MODULE_ID_CDF,
1081 CDF_TRACE_LEVEL_INFO,
1082 "%s: Freeing MC PE MSG message type %d", __func__,
1083 pMsgWrapper->pVosMsg->type);
1084 pe_free_msg(cds_ctx->pMACContext,
1085 (tSirMsgQ *) pMsgWrapper->pVosMsg);
1086 cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
1087 }
1088 /* Flush the SME Mq */
1089 while (NULL != (pMsgWrapper = cds_mq_get(&pSchedContext->smeMcMq))) {
1090 CDF_TRACE(CDF_MODULE_ID_CDF,
1091 CDF_TRACE_LEVEL_INFO,
1092 "%s: Freeing MC SME MSG message type %d", __func__,
1093 pMsgWrapper->pVosMsg->type);
1094 sme_free_msg(cds_ctx->pMACContext, pMsgWrapper->pVosMsg);
1095 cds_core_return_msg(pSchedContext->pVContext, pMsgWrapper);
1096 }
1097} /* cds_sched_flush_mc_mqs() */
1098
1099/**
1100 * get_cds_sched_ctxt() - get cds scheduler context
1101 *
1102 * Return: none
1103 */
1104p_cds_sched_context get_cds_sched_ctxt(void)
1105{
1106 /* Make sure that Vos Scheduler context has been initialized */
1107 if (gp_cds_sched_context == NULL)
1108 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
1109 "%s: gp_cds_sched_context == NULL", __func__);
1110
1111 return gp_cds_sched_context;
1112}
1113
1114/**
1115 * cds_ssr_protect_init() - initialize ssr protection debug functionality
1116 *
1117 * Return:
1118 * void
1119 */
1120void cds_ssr_protect_init(void)
1121{
1122 int i = 0;
1123
1124 spin_lock_init(&ssr_protect_lock);
1125
1126 while (i < MAX_SSR_PROTECT_LOG) {
1127 ssr_protect_log[i].func = NULL;
1128 ssr_protect_log[i].free = true;
1129 ssr_protect_log[i].pid = 0;
1130 i++;
1131 }
1132}
1133
1134/**
1135 * cds_print_external_threads() - print external threads stuck in driver
1136 *
1137 * Return:
1138 * void
1139 */
1140
1141static void cds_print_external_threads(void)
1142{
1143 int i = 0;
1144 unsigned long irq_flags;
1145
1146 spin_lock_irqsave(&ssr_protect_lock, irq_flags);
1147
1148 while (i < MAX_SSR_PROTECT_LOG) {
1149 if (!ssr_protect_log[i].free) {
1150 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1151 "PID %d is stuck at %s", ssr_protect_log[i].pid,
1152 ssr_protect_log[i].func);
1153 }
1154 i++;
1155 }
1156
1157 spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
1158}
1159
1160/**
1161 * cds_ssr_protect() - start ssr protection
1162 * @caller_func: name of calling function.
1163 *
1164 * This function is called to keep track of active driver entry points
1165 *
1166 * Return: none
1167 */
1168void cds_ssr_protect(const char *caller_func)
1169{
1170 int count;
1171 int i = 0;
1172 bool status = false;
1173 unsigned long irq_flags;
1174
1175 count = atomic_inc_return(&ssr_protect_entry_count);
1176
1177 spin_lock_irqsave(&ssr_protect_lock, irq_flags);
1178
1179 while (i < MAX_SSR_PROTECT_LOG) {
1180 if (ssr_protect_log[i].free) {
1181 ssr_protect_log[i].func = caller_func;
1182 ssr_protect_log[i].free = false;
1183 ssr_protect_log[i].pid = current->pid;
1184 status = true;
1185 break;
1186 }
1187 i++;
1188 }
1189
1190 spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
1191
1192 if (!status)
1193 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1194 "Could not track PID %d call %s: log is full",
1195 current->pid, caller_func);
1196}
1197
1198/**
1199 * cds_ssr_unprotect() - stop ssr protection
1200 * @caller_func: name of calling function.
1201 *
1202 * Return: none
1203 */
1204void cds_ssr_unprotect(const char *caller_func)
1205{
1206 int count;
1207 int i = 0;
1208 bool status = false;
1209 unsigned long irq_flags;
1210
1211 count = atomic_dec_return(&ssr_protect_entry_count);
1212
1213 spin_lock_irqsave(&ssr_protect_lock, irq_flags);
1214
1215 while (i < MAX_SSR_PROTECT_LOG) {
1216 if (!ssr_protect_log[i].free) {
1217 if ((ssr_protect_log[i].pid == current->pid) &&
1218 !strcmp(ssr_protect_log[i].func, caller_func)) {
1219 ssr_protect_log[i].func = NULL;
1220 ssr_protect_log[i].free = true;
1221 ssr_protect_log[i].pid = 0;
1222 status = true;
1223 break;
1224 }
1225 }
1226 i++;
1227 }
1228
1229 spin_unlock_irqrestore(&ssr_protect_lock, irq_flags);
1230
1231 if (!status)
1232 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_ERROR,
1233 "Untracked call %s", caller_func);
1234}
1235
1236/**
1237 * cds_is_ssr_ready() - check if the calling execution can proceed with ssr
1238 *
1239 * @caller_func: name of calling function.
1240 *
1241 * Return: true if there is no active entry points in driver
1242 * false if there is at least one active entry in driver
1243 */
1244bool cds_is_ssr_ready(const char *caller_func)
1245{
1246 int count = MAX_SSR_WAIT_ITERATIONS;
1247
1248 while (count) {
1249
1250 if (!atomic_read(&ssr_protect_entry_count))
1251 break;
1252
1253 if (--count) {
1254 CDF_TRACE(CDF_MODULE_ID_CDF, CDF_TRACE_LEVEL_ERROR,
1255 "%s: Waiting for active entry points to exit",
1256 __func__);
1257 msleep(SSR_WAIT_SLEEP_TIME);
1258 }
1259 }
1260 /* at least one external thread is executing */
1261 if (!count) {
1262 cds_print_external_threads();
1263 return false;
1264 }
1265
1266 CDF_TRACE(CDF_MODULE_ID_HDD, CDF_TRACE_LEVEL_INFO,
1267 "Allowing SSR for %s", caller_func);
1268
1269 return true;
1270}