blob: a3aa374799dcf99bd334b6e49c8d7753838fbdc3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Michael Holzheu62b749422009-06-16 10:30:40 +02002 * core function to access sclp interface
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Michael Holzheu62b749422009-06-16 10:30:40 +02004 * Copyright IBM Corp. 1999, 2009
5 *
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Heiko Carstens052ff462011-01-05 12:47:28 +010010#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/timer.h>
16#include <linux/reboot.h>
17#include <linux/jiffies.h>
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +020018#include <linux/init.h>
Michael Holzheu62b749422009-06-16 10:30:40 +020019#include <linux/suspend.h>
20#include <linux/completion.h>
21#include <linux/platform_device.h>
Heiko Carstens052ff462011-01-05 12:47:28 +010022#include <asm/types.h>
23#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include "sclp.h"
26
27#define SCLP_HEADER "sclp: "
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* Lock to protect internal data consistency. */
30static DEFINE_SPINLOCK(sclp_lock);
31
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010032/* Mask of events that we can send to the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033static sccb_mask_t sclp_receive_mask;
34
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010035/* Mask of events that we can receive from the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static sccb_mask_t sclp_send_mask;
37
38/* List of registered event listeners and senders. */
39static struct list_head sclp_reg_list;
40
41/* List of queued requests. */
42static struct list_head sclp_req_queue;
43
44/* Data for read and and init requests. */
45static struct sclp_req sclp_read_req;
46static struct sclp_req sclp_init_req;
47static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49
Michael Holzheu62b749422009-06-16 10:30:40 +020050/* Suspend request */
51static DECLARE_COMPLETION(sclp_request_queue_flushed);
52
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020053/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
54int sclp_console_pages = SCLP_CONSOLE_PAGES;
55/* Flag to indicate if buffer pages are dropped on buffer full condition */
56int sclp_console_drop = 0;
57/* Number of times the console dropped buffer pages */
58unsigned long sclp_console_full;
59
Michael Holzheu62b749422009-06-16 10:30:40 +020060static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
61{
62 complete(&sclp_request_queue_flushed);
63}
64
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020065static int __init sclp_setup_console_pages(char *str)
66{
67 int pages, rc;
68
69 rc = kstrtoint(str, 0, &pages);
70 if (!rc && pages >= SCLP_CONSOLE_PAGES)
71 sclp_console_pages = pages;
72 return 1;
73}
74
75__setup("sclp_con_pages=", sclp_setup_console_pages);
76
77static int __init sclp_setup_console_drop(char *str)
78{
79 int drop, rc;
80
81 rc = kstrtoint(str, 0, &drop);
82 if (!rc && drop)
83 sclp_console_drop = 1;
84 return 1;
85}
86
87__setup("sclp_con_drop=", sclp_setup_console_drop);
88
Michael Holzheu62b749422009-06-16 10:30:40 +020089static struct sclp_req sclp_suspend_req;
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091/* Timer for request retries. */
92static struct timer_list sclp_request_timer;
93
94/* Internal state: is the driver initialized? */
95static volatile enum sclp_init_state_t {
96 sclp_init_state_uninitialized,
97 sclp_init_state_initializing,
98 sclp_init_state_initialized
99} sclp_init_state = sclp_init_state_uninitialized;
100
101/* Internal state: is a request active at the sclp? */
102static volatile enum sclp_running_state_t {
103 sclp_running_state_idle,
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100104 sclp_running_state_running,
105 sclp_running_state_reset_pending
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106} sclp_running_state = sclp_running_state_idle;
107
108/* Internal state: is a read request pending? */
109static volatile enum sclp_reading_state_t {
110 sclp_reading_state_idle,
111 sclp_reading_state_reading
112} sclp_reading_state = sclp_reading_state_idle;
113
114/* Internal state: is the driver currently serving requests? */
115static volatile enum sclp_activation_state_t {
116 sclp_activation_state_active,
117 sclp_activation_state_deactivating,
118 sclp_activation_state_inactive,
119 sclp_activation_state_activating
120} sclp_activation_state = sclp_activation_state_active;
121
122/* Internal state: is an init mask request pending? */
123static volatile enum sclp_mask_state_t {
124 sclp_mask_state_idle,
125 sclp_mask_state_initializing
126} sclp_mask_state = sclp_mask_state_idle;
127
Michael Holzheu62b749422009-06-16 10:30:40 +0200128/* Internal state: is the driver suspended? */
129static enum sclp_suspend_state_t {
130 sclp_suspend_state_running,
131 sclp_suspend_state_suspended,
132} sclp_suspend_state = sclp_suspend_state_running;
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/* Maximum retry counts */
135#define SCLP_INIT_RETRY 3
136#define SCLP_MASK_RETRY 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138/* Timeout intervals in seconds.*/
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800139#define SCLP_BUSY_INTERVAL 10
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100140#define SCLP_RETRY_INTERVAL 30
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142static void sclp_process_queue(void);
Heiko Carstens364c8552007-10-12 16:11:35 +0200143static void __sclp_make_read_req(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static int sclp_init_mask(int calculate);
145static int sclp_init(void);
146
147/* Perform service call. Return 0 on success, non-zero otherwise. */
Heiko Carstensab14de62007-02-05 21:18:37 +0100148int
149sclp_service_call(sclp_cmdw_t command, void *sccb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
Michael Holzheud475f942013-06-06 09:52:08 +0200151 int cc = 4; /* Initialize for program check handling */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200153 asm volatile(
Michael Holzheud475f942013-06-06 09:52:08 +0200154 "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
155 "1: ipm %0\n"
156 " srl %0,28\n"
157 "2:\n"
158 EX_TABLE(0b, 2b)
159 EX_TABLE(1b, 2b)
160 : "+&d" (cc) : "d" (command), "a" (__pa(sccb))
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200161 : "cc", "memory");
Michael Holzheud475f942013-06-06 09:52:08 +0200162 if (cc == 4)
163 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (cc == 3)
165 return -EIO;
166 if (cc == 2)
167 return -EBUSY;
168 return 0;
169}
170
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100172static void
173__sclp_queue_read_req(void)
174{
175 if (sclp_reading_state == sclp_reading_state_idle) {
176 sclp_reading_state = sclp_reading_state_reading;
177 __sclp_make_read_req();
178 /* Add request to head of queue */
179 list_add(&sclp_read_req.list, &sclp_req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183/* Set up request retry timer. Called while sclp_lock is locked. */
184static inline void
185__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
186 unsigned long data)
187{
188 del_timer(&sclp_request_timer);
189 sclp_request_timer.function = function;
190 sclp_request_timer.data = data;
191 sclp_request_timer.expires = jiffies + time;
192 add_timer(&sclp_request_timer);
193}
194
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100195/* Request timeout handler. Restart the request queue. If DATA is non-zero,
196 * force restart of running request. */
197static void
198sclp_request_timeout(unsigned long data)
199{
200 unsigned long flags;
201
202 spin_lock_irqsave(&sclp_lock, flags);
203 if (data) {
204 if (sclp_running_state == sclp_running_state_running) {
205 /* Break running state and queue NOP read event request
206 * to get a defined interface state. */
207 __sclp_queue_read_req();
208 sclp_running_state = sclp_running_state_idle;
209 }
210 } else {
211 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
212 sclp_request_timeout, 0);
213 }
214 spin_unlock_irqrestore(&sclp_lock, flags);
215 sclp_process_queue();
216}
217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218/* Try to start a request. Return zero if the request was successfully
219 * started or if it will be started at a later time. Return non-zero otherwise.
220 * Called while sclp_lock is locked. */
221static int
222__sclp_start_request(struct sclp_req *req)
223{
224 int rc;
225
226 if (sclp_running_state != sclp_running_state_idle)
227 return 0;
228 del_timer(&sclp_request_timer);
Heiko Carstensab14de62007-02-05 21:18:37 +0100229 rc = sclp_service_call(req->command, req->sccb);
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800230 req->start_count++;
231
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 if (rc == 0) {
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800233 /* Successfully started request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 req->status = SCLP_REQ_RUNNING;
235 sclp_running_state = sclp_running_state_running;
236 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
237 sclp_request_timeout, 1);
238 return 0;
239 } else if (rc == -EBUSY) {
240 /* Try again later */
241 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
242 sclp_request_timeout, 0);
243 return 0;
244 }
245 /* Request failed */
246 req->status = SCLP_REQ_FAILED;
247 return rc;
248}
249
250/* Try to start queued requests. */
251static void
252sclp_process_queue(void)
253{
254 struct sclp_req *req;
255 int rc;
256 unsigned long flags;
257
258 spin_lock_irqsave(&sclp_lock, flags);
259 if (sclp_running_state != sclp_running_state_idle) {
260 spin_unlock_irqrestore(&sclp_lock, flags);
261 return;
262 }
263 del_timer(&sclp_request_timer);
264 while (!list_empty(&sclp_req_queue)) {
265 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
Michael Holzheu62b749422009-06-16 10:30:40 +0200266 if (!req->sccb)
267 goto do_post;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 rc = __sclp_start_request(req);
269 if (rc == 0)
270 break;
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100271 /* Request failed */
272 if (req->start_count > 1) {
273 /* Cannot abort already submitted request - could still
274 * be active at the SCLP */
275 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
276 sclp_request_timeout, 0);
277 break;
278 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200279do_post:
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100280 /* Post-processing for aborted request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 list_del(&req->list);
282 if (req->callback) {
283 spin_unlock_irqrestore(&sclp_lock, flags);
284 req->callback(req, req->callback_data);
285 spin_lock_irqsave(&sclp_lock, flags);
286 }
287 }
288 spin_unlock_irqrestore(&sclp_lock, flags);
289}
290
Michael Holzheu62b749422009-06-16 10:30:40 +0200291static int __sclp_can_add_request(struct sclp_req *req)
292{
293 if (req == &sclp_suspend_req || req == &sclp_init_req)
294 return 1;
295 if (sclp_suspend_state != sclp_suspend_state_running)
296 return 0;
297 if (sclp_init_state != sclp_init_state_initialized)
298 return 0;
299 if (sclp_activation_state != sclp_activation_state_active)
300 return 0;
301 return 1;
302}
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/* Queue a new request. Return zero on success, non-zero otherwise. */
305int
306sclp_add_request(struct sclp_req *req)
307{
308 unsigned long flags;
309 int rc;
310
311 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +0200312 if (!__sclp_can_add_request(req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 spin_unlock_irqrestore(&sclp_lock, flags);
314 return -EIO;
315 }
316 req->status = SCLP_REQ_QUEUED;
317 req->start_count = 0;
318 list_add_tail(&req->list, &sclp_req_queue);
319 rc = 0;
320 /* Start if request is first in list */
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100321 if (sclp_running_state == sclp_running_state_idle &&
322 req->list.prev == &sclp_req_queue) {
Michael Holzheu62b749422009-06-16 10:30:40 +0200323 if (!req->sccb) {
324 list_del(&req->list);
325 rc = -ENODATA;
326 goto out;
327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 rc = __sclp_start_request(req);
329 if (rc)
330 list_del(&req->list);
331 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200332out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 spin_unlock_irqrestore(&sclp_lock, flags);
334 return rc;
335}
336
337EXPORT_SYMBOL(sclp_add_request);
338
339/* Dispatch events found in request buffer to registered listeners. Return 0
340 * if all events were dispatched, non-zero otherwise. */
341static int
342sclp_dispatch_evbufs(struct sccb_header *sccb)
343{
344 unsigned long flags;
345 struct evbuf_header *evbuf;
346 struct list_head *l;
347 struct sclp_register *reg;
348 int offset;
349 int rc;
350
351 spin_lock_irqsave(&sclp_lock, flags);
352 rc = 0;
353 for (offset = sizeof(struct sccb_header); offset < sccb->length;
354 offset += evbuf->length) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
Peter Oberparleitere2e5a0f2009-02-19 15:18:59 +0100356 /* Check for malformed hardware response */
357 if (evbuf->length == 0)
358 break;
359 /* Search for event handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 reg = NULL;
361 list_for_each(l, &sclp_reg_list) {
362 reg = list_entry(l, struct sclp_register, list);
363 if (reg->receive_mask & (1 << (32 - evbuf->type)))
364 break;
365 else
366 reg = NULL;
367 }
368 if (reg && reg->receiver_fn) {
369 spin_unlock_irqrestore(&sclp_lock, flags);
370 reg->receiver_fn(evbuf);
371 spin_lock_irqsave(&sclp_lock, flags);
372 } else if (reg == NULL)
Heiko Carstensd06cbda2012-09-06 15:00:07 +0200373 rc = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375 spin_unlock_irqrestore(&sclp_lock, flags);
376 return rc;
377}
378
379/* Read event data request callback. */
380static void
381sclp_read_cb(struct sclp_req *req, void *data)
382{
383 unsigned long flags;
384 struct sccb_header *sccb;
385
386 sccb = (struct sccb_header *) req->sccb;
387 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
388 sccb->response_code == 0x220))
389 sclp_dispatch_evbufs(sccb);
390 spin_lock_irqsave(&sclp_lock, flags);
391 sclp_reading_state = sclp_reading_state_idle;
392 spin_unlock_irqrestore(&sclp_lock, flags);
393}
394
395/* Prepare read event data request. Called while sclp_lock is locked. */
Heiko Carstens364c8552007-10-12 16:11:35 +0200396static void __sclp_make_read_req(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397{
398 struct sccb_header *sccb;
399
400 sccb = (struct sccb_header *) sclp_read_sccb;
401 clear_page(sccb);
402 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100403 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 sclp_read_req.status = SCLP_REQ_QUEUED;
405 sclp_read_req.start_count = 0;
406 sclp_read_req.callback = sclp_read_cb;
407 sclp_read_req.sccb = sccb;
408 sccb->length = PAGE_SIZE;
409 sccb->function_code = 0;
410 sccb->control_mask[2] = 0x80;
411}
412
413/* Search request list for request with matching sccb. Return request if found,
414 * NULL otherwise. Called while sclp_lock is locked. */
415static inline struct sclp_req *
416__sclp_find_req(u32 sccb)
417{
418 struct list_head *l;
419 struct sclp_req *req;
420
421 list_for_each(l, &sclp_req_queue) {
422 req = list_entry(l, struct sclp_req, list);
423 if (sccb == (u32) (addr_t) req->sccb)
424 return req;
425 }
426 return NULL;
427}
428
429/* Handler for external interruption. Perform request post-processing.
430 * Prepare read event data request if necessary. Start processing of next
431 * request on queue. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400432static void sclp_interrupt_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200433 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434{
435 struct sclp_req *req;
436 u32 finished_sccb;
437 u32 evbuf_pending;
438
Heiko Carstens420f42e2013-01-02 15:18:18 +0100439 inc_irq_stat(IRQEXT_SCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 spin_lock(&sclp_lock);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200441 finished_sccb = param32 & 0xfffffff8;
442 evbuf_pending = param32 & 0x3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 if (finished_sccb) {
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100444 del_timer(&sclp_request_timer);
445 sclp_running_state = sclp_running_state_reset_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 req = __sclp_find_req(finished_sccb);
447 if (req) {
448 /* Request post-processing */
449 list_del(&req->list);
450 req->status = SCLP_REQ_DONE;
451 if (req->callback) {
452 spin_unlock(&sclp_lock);
453 req->callback(req, req->callback_data);
454 spin_lock(&sclp_lock);
455 }
456 }
457 sclp_running_state = sclp_running_state_idle;
458 }
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100459 if (evbuf_pending &&
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100460 sclp_activation_state == sclp_activation_state_active)
461 __sclp_queue_read_req();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 spin_unlock(&sclp_lock);
463 sclp_process_queue();
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466/* Convert interval in jiffies to TOD ticks. */
467static inline u64
468sclp_tod_from_jiffies(unsigned long jiffies)
469{
470 return (u64) (jiffies / HZ) << 32;
471}
472
473/* Wait until a currently running request finished. Note: while this function
474 * is running, no timers are served on the calling CPU. */
475void
476sclp_sync_wait(void)
477{
Heiko Carstens934b2852008-08-01 16:39:11 +0200478 unsigned long long old_tick;
Heiko Carstens1f194a42006-07-03 00:24:46 -0700479 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 unsigned long cr0, cr0_sync;
481 u64 timeout;
Heiko Carstensc59d7442007-02-05 21:17:16 +0100482 int irq_context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 /* We'll be disabling timer interrupts, so we need a custom timeout
485 * mechanism */
486 timeout = 0;
487 if (timer_pending(&sclp_request_timer)) {
488 /* Get timeout TOD value */
Heiko Carstens1aae0562013-01-30 09:49:40 +0100489 timeout = get_tod_clock() +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 sclp_tod_from_jiffies(sclp_request_timer.expires -
491 jiffies);
492 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700493 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 /* Prevent bottom half from executing once we force interrupts open */
Heiko Carstensc59d7442007-02-05 21:17:16 +0100495 irq_context = in_interrupt();
496 if (!irq_context)
497 local_bh_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 /* Enable service-signal interruption, disable timer interrupts */
Heiko Carstens934b2852008-08-01 16:39:11 +0200499 old_tick = local_tick_disable();
Heiko Carstens1f194a42006-07-03 00:24:46 -0700500 trace_hardirqs_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 __ctl_store(cr0, 0, 0);
502 cr0_sync = cr0;
Heiko Carstens934b2852008-08-01 16:39:11 +0200503 cr0_sync &= 0xffff00a0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 cr0_sync |= 0x00000200;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 __ctl_load(cr0_sync, 0, 0);
David Howellsdf9ee292010-10-07 14:08:55 +0100506 __arch_local_irq_stosm(0x01);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 /* Loop until driver state indicates finished request */
508 while (sclp_running_state != sclp_running_state_idle) {
509 /* Check for expired request timer */
510 if (timer_pending(&sclp_request_timer) &&
Heiko Carstens1aae0562013-01-30 09:49:40 +0100511 get_tod_clock() > timeout &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 del_timer(&sclp_request_timer))
513 sclp_request_timer.function(sclp_request_timer.data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 cpu_relax();
515 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700516 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 __ctl_load(cr0, 0, 0);
Heiko Carstensc59d7442007-02-05 21:17:16 +0100518 if (!irq_context)
519 _local_bh_enable();
Heiko Carstens934b2852008-08-01 16:39:11 +0200520 local_tick_enable(old_tick);
Heiko Carstens1f194a42006-07-03 00:24:46 -0700521 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523EXPORT_SYMBOL(sclp_sync_wait);
524
525/* Dispatch changes in send and receive mask to registered listeners. */
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100526static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527sclp_dispatch_state_change(void)
528{
529 struct list_head *l;
530 struct sclp_register *reg;
531 unsigned long flags;
532 sccb_mask_t receive_mask;
533 sccb_mask_t send_mask;
534
535 do {
536 spin_lock_irqsave(&sclp_lock, flags);
537 reg = NULL;
538 list_for_each(l, &sclp_reg_list) {
539 reg = list_entry(l, struct sclp_register, list);
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100540 receive_mask = reg->send_mask & sclp_receive_mask;
541 send_mask = reg->receive_mask & sclp_send_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 if (reg->sclp_receive_mask != receive_mask ||
543 reg->sclp_send_mask != send_mask) {
544 reg->sclp_receive_mask = receive_mask;
545 reg->sclp_send_mask = send_mask;
546 break;
547 } else
548 reg = NULL;
549 }
550 spin_unlock_irqrestore(&sclp_lock, flags);
551 if (reg && reg->state_change_fn)
552 reg->state_change_fn(reg);
553 } while (reg);
554}
555
556struct sclp_statechangebuf {
557 struct evbuf_header header;
558 u8 validity_sclp_active_facility_mask : 1;
559 u8 validity_sclp_receive_mask : 1;
560 u8 validity_sclp_send_mask : 1;
561 u8 validity_read_data_function_mask : 1;
562 u16 _zeros : 12;
563 u16 mask_length;
564 u64 sclp_active_facility_mask;
565 sccb_mask_t sclp_receive_mask;
566 sccb_mask_t sclp_send_mask;
567 u32 read_data_function_mask;
568} __attribute__((packed));
569
570
571/* State change event callback. Inform listeners of changes. */
572static void
573sclp_state_change_cb(struct evbuf_header *evbuf)
574{
575 unsigned long flags;
576 struct sclp_statechangebuf *scbuf;
577
578 scbuf = (struct sclp_statechangebuf *) evbuf;
579 if (scbuf->mask_length != sizeof(sccb_mask_t))
580 return;
581 spin_lock_irqsave(&sclp_lock, flags);
582 if (scbuf->validity_sclp_receive_mask)
583 sclp_receive_mask = scbuf->sclp_receive_mask;
584 if (scbuf->validity_sclp_send_mask)
585 sclp_send_mask = scbuf->sclp_send_mask;
586 spin_unlock_irqrestore(&sclp_lock, flags);
Heiko Carstens887d9352008-07-14 09:57:26 +0200587 if (scbuf->validity_sclp_active_facility_mask)
588 sclp_facilities = scbuf->sclp_active_facility_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 sclp_dispatch_state_change();
590}
591
592static struct sclp_register sclp_state_change_event = {
Stefan Haberland6d4740c2007-04-27 16:01:53 +0200593 .receive_mask = EVTYP_STATECHANGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 .receiver_fn = sclp_state_change_cb
595};
596
597/* Calculate receive and send mask of currently registered listeners.
598 * Called while sclp_lock is locked. */
599static inline void
600__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
601{
602 struct list_head *l;
603 struct sclp_register *t;
604
605 *receive_mask = 0;
606 *send_mask = 0;
607 list_for_each(l, &sclp_reg_list) {
608 t = list_entry(l, struct sclp_register, list);
609 *receive_mask |= t->receive_mask;
610 *send_mask |= t->send_mask;
611 }
612}
613
614/* Register event listener. Return 0 on success, non-zero otherwise. */
615int
616sclp_register(struct sclp_register *reg)
617{
618 unsigned long flags;
619 sccb_mask_t receive_mask;
620 sccb_mask_t send_mask;
621 int rc;
622
623 rc = sclp_init();
624 if (rc)
625 return rc;
626 spin_lock_irqsave(&sclp_lock, flags);
627 /* Check event mask for collisions */
628 __sclp_get_mask(&receive_mask, &send_mask);
629 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
630 spin_unlock_irqrestore(&sclp_lock, flags);
631 return -EBUSY;
632 }
633 /* Trigger initial state change callback */
634 reg->sclp_receive_mask = 0;
635 reg->sclp_send_mask = 0;
Michael Holzheu62b749422009-06-16 10:30:40 +0200636 reg->pm_event_posted = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 list_add(&reg->list, &sclp_reg_list);
638 spin_unlock_irqrestore(&sclp_lock, flags);
639 rc = sclp_init_mask(1);
640 if (rc) {
641 spin_lock_irqsave(&sclp_lock, flags);
642 list_del(&reg->list);
643 spin_unlock_irqrestore(&sclp_lock, flags);
644 }
645 return rc;
646}
647
648EXPORT_SYMBOL(sclp_register);
649
650/* Unregister event listener. */
651void
652sclp_unregister(struct sclp_register *reg)
653{
654 unsigned long flags;
655
656 spin_lock_irqsave(&sclp_lock, flags);
657 list_del(&reg->list);
658 spin_unlock_irqrestore(&sclp_lock, flags);
659 sclp_init_mask(1);
660}
661
662EXPORT_SYMBOL(sclp_unregister);
663
664/* Remove event buffers which are marked processed. Return the number of
665 * remaining event buffers. */
666int
667sclp_remove_processed(struct sccb_header *sccb)
668{
669 struct evbuf_header *evbuf;
670 int unprocessed;
671 u16 remaining;
672
673 evbuf = (struct evbuf_header *) (sccb + 1);
674 unprocessed = 0;
675 remaining = sccb->length - sizeof(struct sccb_header);
676 while (remaining > 0) {
677 remaining -= evbuf->length;
678 if (evbuf->flags & 0x80) {
679 sccb->length -= evbuf->length;
680 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
681 remaining);
682 } else {
683 unprocessed++;
684 evbuf = (struct evbuf_header *)
685 ((addr_t) evbuf + evbuf->length);
686 }
687 }
688 return unprocessed;
689}
690
691EXPORT_SYMBOL(sclp_remove_processed);
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693/* Prepare init mask request. Called while sclp_lock is locked. */
694static inline void
695__sclp_make_init_req(u32 receive_mask, u32 send_mask)
696{
697 struct init_sccb *sccb;
698
699 sccb = (struct init_sccb *) sclp_init_sccb;
700 clear_page(sccb);
701 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100702 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 sclp_init_req.status = SCLP_REQ_FILLED;
704 sclp_init_req.start_count = 0;
705 sclp_init_req.callback = NULL;
706 sclp_init_req.callback_data = NULL;
707 sclp_init_req.sccb = sccb;
708 sccb->header.length = sizeof(struct init_sccb);
709 sccb->mask_length = sizeof(sccb_mask_t);
710 sccb->receive_mask = receive_mask;
711 sccb->send_mask = send_mask;
712 sccb->sclp_receive_mask = 0;
713 sccb->sclp_send_mask = 0;
714}
715
716/* Start init mask request. If calculate is non-zero, calculate the mask as
717 * requested by registered listeners. Use zero mask otherwise. Return 0 on
718 * success, non-zero otherwise. */
719static int
720sclp_init_mask(int calculate)
721{
722 unsigned long flags;
723 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
724 sccb_mask_t receive_mask;
725 sccb_mask_t send_mask;
726 int retry;
727 int rc;
728 unsigned long wait;
729
730 spin_lock_irqsave(&sclp_lock, flags);
731 /* Check if interface is in appropriate state */
732 if (sclp_mask_state != sclp_mask_state_idle) {
733 spin_unlock_irqrestore(&sclp_lock, flags);
734 return -EBUSY;
735 }
736 if (sclp_activation_state == sclp_activation_state_inactive) {
737 spin_unlock_irqrestore(&sclp_lock, flags);
738 return -EINVAL;
739 }
740 sclp_mask_state = sclp_mask_state_initializing;
741 /* Determine mask */
742 if (calculate)
743 __sclp_get_mask(&receive_mask, &send_mask);
744 else {
745 receive_mask = 0;
746 send_mask = 0;
747 }
748 rc = -EIO;
749 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
750 /* Prepare request */
751 __sclp_make_init_req(receive_mask, send_mask);
752 spin_unlock_irqrestore(&sclp_lock, flags);
753 if (sclp_add_request(&sclp_init_req)) {
754 /* Try again later */
755 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
756 while (time_before(jiffies, wait))
757 sclp_sync_wait();
758 spin_lock_irqsave(&sclp_lock, flags);
759 continue;
760 }
761 while (sclp_init_req.status != SCLP_REQ_DONE &&
762 sclp_init_req.status != SCLP_REQ_FAILED)
763 sclp_sync_wait();
764 spin_lock_irqsave(&sclp_lock, flags);
765 if (sclp_init_req.status == SCLP_REQ_DONE &&
766 sccb->header.response_code == 0x20) {
767 /* Successful request */
768 if (calculate) {
769 sclp_receive_mask = sccb->sclp_receive_mask;
770 sclp_send_mask = sccb->sclp_send_mask;
771 } else {
772 sclp_receive_mask = 0;
773 sclp_send_mask = 0;
774 }
775 spin_unlock_irqrestore(&sclp_lock, flags);
776 sclp_dispatch_state_change();
777 spin_lock_irqsave(&sclp_lock, flags);
778 rc = 0;
779 break;
780 }
781 }
782 sclp_mask_state = sclp_mask_state_idle;
783 spin_unlock_irqrestore(&sclp_lock, flags);
784 return rc;
785}
786
787/* Deactivate SCLP interface. On success, new requests will be rejected,
788 * events will no longer be dispatched. Return 0 on success, non-zero
789 * otherwise. */
790int
791sclp_deactivate(void)
792{
793 unsigned long flags;
794 int rc;
795
796 spin_lock_irqsave(&sclp_lock, flags);
797 /* Deactivate can only be called when active */
798 if (sclp_activation_state != sclp_activation_state_active) {
799 spin_unlock_irqrestore(&sclp_lock, flags);
800 return -EINVAL;
801 }
802 sclp_activation_state = sclp_activation_state_deactivating;
803 spin_unlock_irqrestore(&sclp_lock, flags);
804 rc = sclp_init_mask(0);
805 spin_lock_irqsave(&sclp_lock, flags);
806 if (rc == 0)
807 sclp_activation_state = sclp_activation_state_inactive;
808 else
809 sclp_activation_state = sclp_activation_state_active;
810 spin_unlock_irqrestore(&sclp_lock, flags);
811 return rc;
812}
813
814EXPORT_SYMBOL(sclp_deactivate);
815
816/* Reactivate SCLP interface after sclp_deactivate. On success, new
817 * requests will be accepted, events will be dispatched again. Return 0 on
818 * success, non-zero otherwise. */
819int
820sclp_reactivate(void)
821{
822 unsigned long flags;
823 int rc;
824
825 spin_lock_irqsave(&sclp_lock, flags);
826 /* Reactivate can only be called when inactive */
827 if (sclp_activation_state != sclp_activation_state_inactive) {
828 spin_unlock_irqrestore(&sclp_lock, flags);
829 return -EINVAL;
830 }
831 sclp_activation_state = sclp_activation_state_activating;
832 spin_unlock_irqrestore(&sclp_lock, flags);
833 rc = sclp_init_mask(1);
834 spin_lock_irqsave(&sclp_lock, flags);
835 if (rc == 0)
836 sclp_activation_state = sclp_activation_state_active;
837 else
838 sclp_activation_state = sclp_activation_state_inactive;
839 spin_unlock_irqrestore(&sclp_lock, flags);
840 return rc;
841}
842
843EXPORT_SYMBOL(sclp_reactivate);
844
845/* Handler for external interruption used during initialization. Modify
846 * request state to done. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400847static void sclp_check_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200848 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849{
850 u32 finished_sccb;
851
Heiko Carstens420f42e2013-01-02 15:18:18 +0100852 inc_irq_stat(IRQEXT_SCP);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200853 finished_sccb = param32 & 0xfffffff8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 /* Is this the interrupt we are waiting for? */
855 if (finished_sccb == 0)
856 return;
Martin Schwidefskya12c53f2008-07-14 09:59:28 +0200857 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
858 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
859 finished_sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 spin_lock(&sclp_lock);
861 if (sclp_running_state == sclp_running_state_running) {
862 sclp_init_req.status = SCLP_REQ_DONE;
863 sclp_running_state = sclp_running_state_idle;
864 }
865 spin_unlock(&sclp_lock);
866}
867
868/* Initial init mask request timed out. Modify request state to failed. */
869static void
870sclp_check_timeout(unsigned long data)
871{
872 unsigned long flags;
873
874 spin_lock_irqsave(&sclp_lock, flags);
875 if (sclp_running_state == sclp_running_state_running) {
876 sclp_init_req.status = SCLP_REQ_FAILED;
877 sclp_running_state = sclp_running_state_idle;
878 }
879 spin_unlock_irqrestore(&sclp_lock, flags);
880}
881
882/* Perform a check of the SCLP interface. Return zero if the interface is
883 * available and there are no pending requests from a previous instance.
884 * Return non-zero otherwise. */
885static int
886sclp_check_interface(void)
887{
888 struct init_sccb *sccb;
889 unsigned long flags;
890 int retry;
891 int rc;
892
893 spin_lock_irqsave(&sclp_lock, flags);
894 /* Prepare init mask command */
Heiko Carstens98b79982011-01-05 12:47:40 +0100895 rc = register_external_interrupt(0x2401, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (rc) {
897 spin_unlock_irqrestore(&sclp_lock, flags);
898 return rc;
899 }
900 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
901 __sclp_make_init_req(0, 0);
902 sccb = (struct init_sccb *) sclp_init_req.sccb;
Heiko Carstensab14de62007-02-05 21:18:37 +0100903 rc = sclp_service_call(sclp_init_req.command, sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 if (rc == -EIO)
905 break;
906 sclp_init_req.status = SCLP_REQ_RUNNING;
907 sclp_running_state = sclp_running_state_running;
908 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
909 sclp_check_timeout, 0);
910 spin_unlock_irqrestore(&sclp_lock, flags);
911 /* Enable service-signal interruption - needs to happen
912 * with IRQs enabled. */
Heiko Carstens82003c32013-09-04 13:35:45 +0200913 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 /* Wait for signal from interrupt or timeout */
915 sclp_sync_wait();
916 /* Disable service-signal interruption - needs to happen
917 * with IRQs enabled. */
Heiko Carstens82003c32013-09-04 13:35:45 +0200918 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 spin_lock_irqsave(&sclp_lock, flags);
920 del_timer(&sclp_request_timer);
921 if (sclp_init_req.status == SCLP_REQ_DONE &&
922 sccb->header.response_code == 0x20) {
923 rc = 0;
924 break;
925 } else
926 rc = -EBUSY;
927 }
Heiko Carstens98b79982011-01-05 12:47:40 +0100928 unregister_external_interrupt(0x2401, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 spin_unlock_irqrestore(&sclp_lock, flags);
930 return rc;
931}
932
933/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
934 * events from interfering with rebooted system. */
935static int
936sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
937{
938 sclp_deactivate();
939 return NOTIFY_DONE;
940}
941
942static struct notifier_block sclp_reboot_notifier = {
943 .notifier_call = sclp_reboot_event
944};
945
Michael Holzheu62b749422009-06-16 10:30:40 +0200946/*
947 * Suspend/resume SCLP notifier implementation
948 */
949
950static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
951{
952 struct sclp_register *reg;
953 unsigned long flags;
954
955 if (!rollback) {
956 spin_lock_irqsave(&sclp_lock, flags);
957 list_for_each_entry(reg, &sclp_reg_list, list)
958 reg->pm_event_posted = 0;
959 spin_unlock_irqrestore(&sclp_lock, flags);
960 }
961 do {
962 spin_lock_irqsave(&sclp_lock, flags);
963 list_for_each_entry(reg, &sclp_reg_list, list) {
964 if (rollback && reg->pm_event_posted)
965 goto found;
966 if (!rollback && !reg->pm_event_posted)
967 goto found;
968 }
969 spin_unlock_irqrestore(&sclp_lock, flags);
970 return;
971found:
972 spin_unlock_irqrestore(&sclp_lock, flags);
973 if (reg->pm_event_fn)
974 reg->pm_event_fn(reg, sclp_pm_event);
975 reg->pm_event_posted = rollback ? 0 : 1;
976 } while (1);
977}
978
979/*
980 * Susend/resume callbacks for platform device
981 */
982
983static int sclp_freeze(struct device *dev)
984{
985 unsigned long flags;
986 int rc;
987
988 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
989
990 spin_lock_irqsave(&sclp_lock, flags);
991 sclp_suspend_state = sclp_suspend_state_suspended;
992 spin_unlock_irqrestore(&sclp_lock, flags);
993
994 /* Init supend data */
995 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
996 sclp_suspend_req.callback = sclp_suspend_req_cb;
997 sclp_suspend_req.status = SCLP_REQ_FILLED;
998 init_completion(&sclp_request_queue_flushed);
999
1000 rc = sclp_add_request(&sclp_suspend_req);
1001 if (rc == 0)
1002 wait_for_completion(&sclp_request_queue_flushed);
1003 else if (rc != -ENODATA)
1004 goto fail_thaw;
1005
1006 rc = sclp_deactivate();
1007 if (rc)
1008 goto fail_thaw;
1009 return 0;
1010
1011fail_thaw:
1012 spin_lock_irqsave(&sclp_lock, flags);
1013 sclp_suspend_state = sclp_suspend_state_running;
1014 spin_unlock_irqrestore(&sclp_lock, flags);
1015 sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1016 return rc;
1017}
1018
1019static int sclp_undo_suspend(enum sclp_pm_event event)
1020{
1021 unsigned long flags;
1022 int rc;
1023
1024 rc = sclp_reactivate();
1025 if (rc)
1026 return rc;
1027
1028 spin_lock_irqsave(&sclp_lock, flags);
1029 sclp_suspend_state = sclp_suspend_state_running;
1030 spin_unlock_irqrestore(&sclp_lock, flags);
1031
1032 sclp_pm_event(event, 0);
1033 return 0;
1034}
1035
1036static int sclp_thaw(struct device *dev)
1037{
1038 return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1039}
1040
1041static int sclp_restore(struct device *dev)
1042{
1043 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1044}
1045
Alexey Dobriyan47145212009-12-14 18:00:08 -08001046static const struct dev_pm_ops sclp_pm_ops = {
Michael Holzheu62b749422009-06-16 10:30:40 +02001047 .freeze = sclp_freeze,
1048 .thaw = sclp_thaw,
1049 .restore = sclp_restore,
1050};
1051
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001052static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
1053{
1054 return sprintf(buf, "%i\n", sclp_console_pages);
1055}
1056
1057static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
1058
1059static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
1060{
1061 return sprintf(buf, "%i\n", sclp_console_drop);
1062}
1063
1064static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
1065
1066static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
1067{
1068 return sprintf(buf, "%lu\n", sclp_console_full);
1069}
1070
1071static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
1072
1073static struct attribute *sclp_drv_attrs[] = {
1074 &driver_attr_con_pages.attr,
1075 &driver_attr_con_drop.attr,
1076 &driver_attr_con_full.attr,
1077 NULL,
1078};
1079static struct attribute_group sclp_drv_attr_group = {
1080 .attrs = sclp_drv_attrs,
1081};
1082static const struct attribute_group *sclp_drv_attr_groups[] = {
1083 &sclp_drv_attr_group,
1084 NULL,
1085};
1086
Michael Holzheu62b749422009-06-16 10:30:40 +02001087static struct platform_driver sclp_pdrv = {
1088 .driver = {
1089 .name = "sclp",
1090 .owner = THIS_MODULE,
1091 .pm = &sclp_pm_ops,
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001092 .groups = sclp_drv_attr_groups,
Michael Holzheu62b749422009-06-16 10:30:40 +02001093 },
1094};
1095
1096static struct platform_device *sclp_pdev;
1097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098/* Initialize SCLP driver. Return zero if driver is operational, non-zero
1099 * otherwise. */
1100static int
1101sclp_init(void)
1102{
1103 unsigned long flags;
Michael Holzheu62b749422009-06-16 10:30:40 +02001104 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 spin_lock_irqsave(&sclp_lock, flags);
1107 /* Check for previous or running initialization */
Michael Holzheu62b749422009-06-16 10:30:40 +02001108 if (sclp_init_state != sclp_init_state_uninitialized)
1109 goto fail_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 sclp_init_state = sclp_init_state_initializing;
1111 /* Set up variables */
1112 INIT_LIST_HEAD(&sclp_req_queue);
1113 INIT_LIST_HEAD(&sclp_reg_list);
1114 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1115 init_timer(&sclp_request_timer);
1116 /* Check interface */
1117 spin_unlock_irqrestore(&sclp_lock, flags);
1118 rc = sclp_check_interface();
1119 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +02001120 if (rc)
1121 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 /* Register reboot handler */
1123 rc = register_reboot_notifier(&sclp_reboot_notifier);
Michael Holzheu62b749422009-06-16 10:30:40 +02001124 if (rc)
1125 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 /* Register interrupt handler */
Heiko Carstens98b79982011-01-05 12:47:40 +01001127 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
Michael Holzheu62b749422009-06-16 10:30:40 +02001128 if (rc)
1129 goto fail_unregister_reboot_notifier;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 sclp_init_state = sclp_init_state_initialized;
1131 spin_unlock_irqrestore(&sclp_lock, flags);
1132 /* Enable service-signal external interruption - needs to happen with
1133 * IRQs enabled. */
Heiko Carstens82003c32013-09-04 13:35:45 +02001134 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 sclp_init_mask(1);
1136 return 0;
Michael Holzheu62b749422009-06-16 10:30:40 +02001137
1138fail_unregister_reboot_notifier:
1139 unregister_reboot_notifier(&sclp_reboot_notifier);
1140fail_init_state_uninitialized:
1141 sclp_init_state = sclp_init_state_uninitialized;
1142fail_unlock:
1143 spin_unlock_irqrestore(&sclp_lock, flags);
1144 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001146
Michael Holzheu62b749422009-06-16 10:30:40 +02001147/*
1148 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1149 * to print the panic message.
1150 */
1151static int sclp_panic_notify(struct notifier_block *self,
1152 unsigned long event, void *data)
1153{
1154 if (sclp_suspend_state == sclp_suspend_state_suspended)
1155 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1156 return NOTIFY_OK;
1157}
1158
1159static struct notifier_block sclp_on_panic_nb = {
1160 .notifier_call = sclp_panic_notify,
1161 .priority = SCLP_PANIC_PRIO,
1162};
1163
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001164static __init int sclp_initcall(void)
1165{
Michael Holzheu62b749422009-06-16 10:30:40 +02001166 int rc;
1167
1168 rc = platform_driver_register(&sclp_pdrv);
1169 if (rc)
1170 return rc;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001171
Michael Holzheu62b749422009-06-16 10:30:40 +02001172 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
Thomas Meyer4bdb6132013-06-01 11:58:09 +02001173 rc = PTR_RET(sclp_pdev);
Michael Holzheu62b749422009-06-16 10:30:40 +02001174 if (rc)
1175 goto fail_platform_driver_unregister;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001176
Michael Holzheu62b749422009-06-16 10:30:40 +02001177 rc = atomic_notifier_chain_register(&panic_notifier_list,
1178 &sclp_on_panic_nb);
1179 if (rc)
1180 goto fail_platform_device_unregister;
1181
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001182 return sclp_init();
Michael Holzheu62b749422009-06-16 10:30:40 +02001183
1184fail_platform_device_unregister:
1185 platform_device_unregister(sclp_pdev);
1186fail_platform_driver_unregister:
1187 platform_driver_unregister(&sclp_pdrv);
1188 return rc;
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001189}
1190
1191arch_initcall(sclp_initcall);