Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 1 | /* periodic_work.c |
| 2 | * |
Benjamin Romer | 6f14cc1 | 2015-07-16 12:40:48 -0400 | [diff] [blame] | 3 | * Copyright (C) 2010 - 2015 UNISYS CORPORATION |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 4 | * All rights reserved. |
| 5 | * |
Benjamin Romer | 6f14cc1 | 2015-07-16 12:40:48 -0400 | [diff] [blame] | 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms and conditions of the GNU General Public License, |
| 8 | * version 2, as published by the Free Software Foundation. |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 13 | * NON INFRINGEMENT. See the GNU General Public License for more |
| 14 | * details. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * Helper functions to schedule periodic work in Linux kernel mode. |
| 19 | */ |
Erik Arfvidson | d5b3f1d | 2015-05-05 18:37:04 -0400 | [diff] [blame] | 20 | #include <linux/sched.h> |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 21 | |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 22 | #include "periodic_work.h" |
| 23 | |
| 24 | #define MYDRVNAME "periodic_work" |
| 25 | |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 26 | struct periodic_work { |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 27 | rwlock_t lock; |
| 28 | struct delayed_work work; |
| 29 | void (*workfunc)(void *); |
| 30 | void *workfuncarg; |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 31 | bool is_scheduled; |
| 32 | bool want_to_stop; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 33 | ulong jiffy_interval; |
| 34 | struct workqueue_struct *workqueue; |
| 35 | const char *devnam; |
| 36 | }; |
| 37 | |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 38 | static void periodic_work_func(struct work_struct *work) |
| 39 | { |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 40 | struct periodic_work *pw; |
| 41 | |
| 42 | pw = container_of(work, struct periodic_work, work.work); |
| 43 | (*pw->workfunc)(pw->workfuncarg); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 44 | } |
| 45 | |
Erik Arfvidson | 3d05734 | 2015-11-17 13:34:58 -0500 | [diff] [blame] | 46 | struct periodic_work |
| 47 | *visor_periodic_work_create(ulong jiffy_interval, |
| 48 | struct workqueue_struct *workqueue, |
| 49 | void (*workfunc)(void *), |
| 50 | void *workfuncarg, |
| 51 | const char *devnam) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 52 | { |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 53 | struct periodic_work *pw; |
| 54 | |
| 55 | pw = kzalloc(sizeof(*pw), GFP_KERNEL | __GFP_NORETRY); |
| 56 | if (!pw) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 57 | return NULL; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 58 | |
| 59 | rwlock_init(&pw->lock); |
| 60 | pw->jiffy_interval = jiffy_interval; |
| 61 | pw->workqueue = workqueue; |
| 62 | pw->workfunc = workfunc; |
| 63 | pw->workfuncarg = workfuncarg; |
| 64 | pw->devnam = devnam; |
| 65 | return pw; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 66 | } |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 67 | EXPORT_SYMBOL_GPL(visor_periodic_work_create); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 68 | |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 69 | void visor_periodic_work_destroy(struct periodic_work *pw) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 70 | { |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 71 | kfree(pw); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 72 | } |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 73 | EXPORT_SYMBOL_GPL(visor_periodic_work_destroy); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 74 | |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 75 | /** Call this from your periodic work worker function to schedule the next |
| 76 | * call. |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 77 | * If this function returns false, there was a failure and the |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 78 | * periodic work is no longer scheduled |
| 79 | */ |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 80 | bool visor_periodic_work_nextperiod(struct periodic_work *pw) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 81 | { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 82 | bool rc = false; |
Masaru Nomura | e03e1e3 | 2014-05-17 21:32:53 +0100 | [diff] [blame] | 83 | |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 84 | write_lock(&pw->lock); |
| 85 | if (pw->want_to_stop) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 86 | pw->is_scheduled = false; |
| 87 | pw->want_to_stop = false; |
| 88 | rc = true; /* yes, true; see visor_periodic_work_stop() */ |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 89 | goto unlock; |
Benjamin Romer | f84bd62 | 2015-10-01 11:52:30 -0400 | [diff] [blame] | 90 | } else if (!queue_delayed_work(pw->workqueue, &pw->work, |
| 91 | pw->jiffy_interval)) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 92 | pw->is_scheduled = false; |
| 93 | rc = false; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 94 | goto unlock; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 95 | } |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 96 | rc = true; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 97 | unlock: |
| 98 | write_unlock(&pw->lock); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 99 | return rc; |
| 100 | } |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 101 | EXPORT_SYMBOL_GPL(visor_periodic_work_nextperiod); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 102 | |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 103 | /** This function returns true iff new periodic work was actually started. |
| 104 | * If this function returns false, then no work was started |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 105 | * (either because it was already started, or because of a failure). |
| 106 | */ |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 107 | bool visor_periodic_work_start(struct periodic_work *pw) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 108 | { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 109 | bool rc = false; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 110 | |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 111 | write_lock(&pw->lock); |
| 112 | if (pw->is_scheduled) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 113 | rc = false; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 114 | goto unlock; |
Ken Cox | 61e03b4 | 2014-03-19 13:06:21 -0500 | [diff] [blame] | 115 | } |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 116 | if (pw->want_to_stop) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 117 | rc = false; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 118 | goto unlock; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 119 | } |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 120 | INIT_DELAYED_WORK(&pw->work, &periodic_work_func); |
Benjamin Romer | f84bd62 | 2015-10-01 11:52:30 -0400 | [diff] [blame] | 121 | if (!queue_delayed_work(pw->workqueue, &pw->work, |
| 122 | pw->jiffy_interval)) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 123 | rc = false; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 124 | goto unlock; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 125 | } |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 126 | pw->is_scheduled = true; |
| 127 | rc = true; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 128 | unlock: |
| 129 | write_unlock(&pw->lock); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 130 | return rc; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 131 | } |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 132 | EXPORT_SYMBOL_GPL(visor_periodic_work_start); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 133 | |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 134 | /** This function returns true iff your call actually stopped the periodic |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 135 | * work. |
| 136 | * |
| 137 | * -- PAY ATTENTION... this is important -- |
| 138 | * |
| 139 | * NO NO #1 |
| 140 | * |
| 141 | * Do NOT call this function from some function that is running on the |
| 142 | * same workqueue as the work you are trying to stop might be running |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 143 | * on! If you violate this rule, visor_periodic_work_stop() MIGHT work, |
| 144 | * but it also MIGHT get hung up in an infinite loop saying |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 145 | * "waiting for delayed work...". This will happen if the delayed work |
| 146 | * you are trying to cancel has been put in the workqueue list, but can't |
| 147 | * run yet because we are running that same workqueue thread right now. |
| 148 | * |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 149 | * Bottom line: If you need to call visor_periodic_work_stop() from a |
| 150 | * workitem, be sure the workitem is on a DIFFERENT workqueue than the |
| 151 | * workitem that you are trying to cancel. |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 152 | * |
| 153 | * If I could figure out some way to check for this "no no" condition in |
| 154 | * the code, I would. It would have saved me the trouble of writing this |
| 155 | * long comment. And also, don't think this is some "theoretical" race |
| 156 | * condition. It is REAL, as I have spent the day chasing it. |
| 157 | * |
| 158 | * NO NO #2 |
| 159 | * |
| 160 | * Take close note of the locks that you own when you call this function. |
| 161 | * You must NOT own any locks that are needed by the periodic work |
| 162 | * function that is currently installed. If you DO, a deadlock may result, |
| 163 | * because stopping the periodic work often involves waiting for the last |
| 164 | * iteration of the periodic work function to complete. Again, if you hit |
| 165 | * this deadlock, you will get hung up in an infinite loop saying |
| 166 | * "waiting for delayed work...". |
| 167 | */ |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 168 | bool visor_periodic_work_stop(struct periodic_work *pw) |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 169 | { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 170 | bool stopped_something = false; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 171 | |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 172 | write_lock(&pw->lock); |
| 173 | stopped_something = pw->is_scheduled && (!pw->want_to_stop); |
| 174 | while (pw->is_scheduled) { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 175 | pw->want_to_stop = true; |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 176 | if (cancel_delayed_work(&pw->work)) { |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 177 | /* We get here if the delayed work was pending as |
| 178 | * delayed work, but was NOT run. |
| 179 | */ |
Benjamin Romer | 1a84fec | 2015-03-04 12:14:23 -0500 | [diff] [blame] | 180 | WARN_ON(!pw->is_scheduled); |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 181 | pw->is_scheduled = false; |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 182 | } else { |
| 183 | /* If we get here, either the delayed work: |
| 184 | * - was run, OR, |
| 185 | * - is running RIGHT NOW on another processor, OR, |
| 186 | * - wasn't even scheduled (there is a miniscule |
| 187 | * timing window where this could be the case) |
| 188 | * flush_workqueue() would make sure it is finished |
| 189 | * executing, but that still isn't very useful, which |
| 190 | * explains the loop... |
| 191 | */ |
| 192 | } |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 193 | if (pw->is_scheduled) { |
| 194 | write_unlock(&pw->lock); |
Nicholas Mc Guire | 0848611 | 2015-05-29 17:31:16 +0200 | [diff] [blame] | 195 | schedule_timeout_interruptible(msecs_to_jiffies(10)); |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 196 | write_lock(&pw->lock); |
Benjamin Romer | f0b5c6d | 2014-11-04 11:25:22 -0500 | [diff] [blame] | 197 | } else { |
Prarit Bhargava | 779d075 | 2015-05-05 18:37:01 -0400 | [diff] [blame] | 198 | pw->want_to_stop = false; |
Benjamin Romer | f0b5c6d | 2014-11-04 11:25:22 -0500 | [diff] [blame] | 199 | } |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 200 | } |
Benjamin Romer | 2c5653b | 2014-09-30 12:07:46 -0400 | [diff] [blame] | 201 | write_unlock(&pw->lock); |
Ken Cox | 9d9baad | 2014-03-04 07:58:05 -0600 | [diff] [blame] | 202 | return stopped_something; |
| 203 | } |
Ken Cox | 927c792 | 2014-03-05 14:52:25 -0600 | [diff] [blame] | 204 | EXPORT_SYMBOL_GPL(visor_periodic_work_stop); |