blob: 634728db05d39d4b186fae8806f5c754c6abb6d1 [file] [log] [blame]
Prakash Dhavali7090c5f2015-11-02 17:55:19 -08001/*
2 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
3 *
4 * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
5 *
6 *
7 * Permission to use, copy, modify, and/or distribute this software for
8 * any purpose with or without fee is hereby granted, provided that the
9 * above copyright notice and this permission notice appear in all
10 * copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
13 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
14 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
15 * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
16 * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
17 * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
18 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
19 * PERFORMANCE OF THIS SOFTWARE.
20 */
21
22/*
23 * This file was originally distributed by Qualcomm Atheros, Inc.
24 * under proprietary terms before Copyright ownership was assigned
25 * to the Linux Foundation.
26 */
27
28#if !defined(__I_CDF_LOCK_H)
29#define __I_CDF_LOCK_H
30
31/**
32 * DOC: i_cdf_lock.h
33 *
34 * Linux-specific definitions for CDF Locks
35 *
36 */
37
38/* Include Files */
39#include <cdf_types.h>
40#include <linux/mutex.h>
41#include <linux/spinlock.h>
42#include <linux/sched.h>
43#include <linux/device.h>
44#include <linux/semaphore.h>
45#include <linux/interrupt.h>
46#if defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
47#include <linux/wakelock.h>
48#endif
49
50/* Preprocessor definitions and constants */
51
52/* define for flag */
53#define ADF_OS_LINUX_UNLOCK_BH 1
54
55#ifdef __cplusplus
56extern "C" {
57#endif /* __cplusplus */
58
59/**
60 * typedef struct - cdf_mutex_t
61 * @m_lock: Mutex lock
62 * @cookie: Lock cookie
63 * @processID: Process ID to track lock
64 * @state: Lock status
65 * @refcount: Reference count for recursive lock
66 */
67typedef struct cdf_lock_s {
68 struct mutex m_lock;
69 uint32_t cookie;
70 int processID;
71 uint32_t state;
72 uint8_t refcount;
73} cdf_mutex_t;
74
75/**
76 * typedef struct - cdf_spinlock_t
77 * @spinlock: Spin lock
78 * @flags: Lock flag
79 * @_flags: Internal lock flag
80 */
81typedef struct __cdf_spinlock {
82 spinlock_t spinlock;
83 unsigned int flags;
84 unsigned long _flags;
85} cdf_spinlock_t;
86
87typedef cdf_spinlock_t __cdf_spinlock_t;
88typedef struct semaphore __cdf_semaphore_t;
89
90#if defined CONFIG_CNSS
91typedef struct wakeup_source cdf_wake_lock_t;
92#elif defined(WLAN_OPEN_SOURCE) && defined(CONFIG_HAS_WAKELOCK)
93typedef struct wake_lock cdf_wake_lock_t;
94#else
95typedef int cdf_wake_lock_t;
96#endif
97
98/* Function declarations and documenation */
99
100/**
101 * __cdf_semaphore_init() - initialize the semaphore
102 * @m: Semaphore object
103 *
104 * Return: CDF_STATUS_SUCCESS
105 */
106static inline CDF_STATUS __cdf_semaphore_init(struct semaphore *m)
107{
108 sema_init(m, 1);
109 return CDF_STATUS_SUCCESS;
110}
111
112/**
113 * __cdf_semaphore_acquire() - acquire semaphore
114 * @m: Semaphore object
115 *
116 * Return: 0
117 */
118static inline int
119__cdf_semaphore_acquire(cdf_device_t osdev, struct semaphore *m)
120{
121 down(m);
122 return 0;
123}
124
125/**
126 * __cdf_semaphore_release() - release semaphore
127 * @m: Semaphore object
128 *
129 * Return: result of UP operation in integer
130 */
131static inline void
132__cdf_semaphore_release(cdf_device_t osdev, struct semaphore *m)
133{
134 up(m);
135}
136
137/**
138 * __cdf_spinlock_init() - initialize spin lock
139 * @lock: Spin lock object
140 *
141 * Return: CDF_STATUS_SUCCESS
142 */
143static inline CDF_STATUS __cdf_spinlock_init(__cdf_spinlock_t *lock)
144{
145 spin_lock_init(&lock->spinlock);
146 lock->flags = 0;
147
148 return CDF_STATUS_SUCCESS;
149}
150
151#define __cdf_spinlock_destroy(lock)
152/**
153 * __cdf_spin_lock() - Acquire a Spinlock(SMP) & disable Preemption (Preemptive)
154 * @lock: Lock object
155 *
156 * Return: none
157 */
158static inline void
159__cdf_spin_lock(__cdf_spinlock_t *lock)
160{
161 spin_lock(&lock->spinlock);
162}
163
164/**
165 * __cdf_spin_unlock() - Unlock the spinlock and enables the Preemption
166 * @lock: Lock object
167 *
168 * Return: none
169 */
170static inline void
171__cdf_spin_unlock(__cdf_spinlock_t *lock)
172{
173 spin_unlock(&lock->spinlock);
174}
175
176/**
177 * __cdf_spin_lock_irqsave() - Acquire a Spinlock (SMP) & disable Preemption
178 * (Preemptive) and disable IRQs
179 * @lock: Lock object
180 *
181 * Return: none
182 */
183static inline void
184__cdf_spin_lock_irqsave(__cdf_spinlock_t *lock)
185{
186 spin_lock_irqsave(&lock->spinlock, lock->_flags);
187}
188/**
189 * __cdf_spin_unlock_irqrestore() - Unlock the spinlock and enables the
190 * Preemption and enable IRQ
191 * @lock: Lock object
192 *
193 * Return: none
194 */
195static inline void
196__cdf_spin_unlock_irqrestore(__cdf_spinlock_t *lock)
197{
198 spin_unlock_irqrestore(&lock->spinlock, lock->_flags);
199}
200
201/*
202 * Synchronous versions - only for OS' that have interrupt disable
203 */
204#define __cdf_spin_lock_irq(_pLock, _flags) spin_lock_irqsave(_pLock, _flags)
205#define __cdf_spin_unlock_irq(_pLock, _flags) spin_unlock_irqrestore(_pLock, _flags)
206
207/**
208 * __cdf_spin_lock_bh() - Acquire the spinlock and disable bottom halves
209 * @lock: Lock object
210 *
211 * Return: none
212 */
213static inline void
214__cdf_spin_lock_bh(__cdf_spinlock_t *lock)
215{
216 if (likely(irqs_disabled() || in_softirq())) {
217 spin_lock(&lock->spinlock);
218 } else {
219 spin_lock_bh(&lock->spinlock);
220 lock->flags |= ADF_OS_LINUX_UNLOCK_BH;
221 }
222
223}
224
225/**
226 * __cdf_spin_unlock_bh() - Release the spinlock and enable bottom halves
227 * @lock: Lock object
228 *
229 * Return: none
230 */
231static inline void
232__cdf_spin_unlock_bh(__cdf_spinlock_t *lock)
233{
234 if (unlikely(lock->flags & ADF_OS_LINUX_UNLOCK_BH)) {
235 lock->flags &= ~ADF_OS_LINUX_UNLOCK_BH;
236 spin_unlock_bh(&lock->spinlock);
237 } else
238 spin_unlock(&lock->spinlock);
239}
240
241/**
242 * __cdf_in_softirq() - in soft irq context
243 *
244 * Return: true if in softirs context else false
245 */
246static inline bool __cdf_in_softirq(void)
247{
248 return in_softirq();
249}
250
251#ifdef __cplusplus
252}
253#endif /* __cplusplus */
254
255#endif /* __I_CDF_LOCK_H */