blob: a77febeead1fdd4fea04eb56969d377d6a18caca [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Michael Holzheu62b749422009-06-16 10:30:40 +02002 * core function to access sclp interface
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
Michael Holzheu62b749422009-06-16 10:30:40 +02004 * Copyright IBM Corp. 1999, 2009
5 *
6 * Author(s): Martin Peschke <mpeschke@de.ibm.com>
7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
Heiko Carstens052ff462011-01-05 12:47:28 +010010#include <linux/kernel_stat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/module.h>
12#include <linux/err.h>
13#include <linux/spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/timer.h>
16#include <linux/reboot.h>
17#include <linux/jiffies.h>
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +020018#include <linux/init.h>
Michael Holzheu62b749422009-06-16 10:30:40 +020019#include <linux/suspend.h>
20#include <linux/completion.h>
21#include <linux/platform_device.h>
Heiko Carstens052ff462011-01-05 12:47:28 +010022#include <asm/types.h>
23#include <asm/irq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25#include "sclp.h"
26
27#define SCLP_HEADER "sclp: "
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* Lock to protect internal data consistency. */
30static DEFINE_SPINLOCK(sclp_lock);
31
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010032/* Mask of events that we can send to the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033static sccb_mask_t sclp_receive_mask;
34
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +010035/* Mask of events that we can receive from the sclp interface. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070036static sccb_mask_t sclp_send_mask;
37
38/* List of registered event listeners and senders. */
39static struct list_head sclp_reg_list;
40
41/* List of queued requests. */
42static struct list_head sclp_req_queue;
43
44/* Data for read and and init requests. */
45static struct sclp_req sclp_read_req;
46static struct sclp_req sclp_init_req;
47static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49
Michael Holzheu62b749422009-06-16 10:30:40 +020050/* Suspend request */
51static DECLARE_COMPLETION(sclp_request_queue_flushed);
52
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020053/* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
54int sclp_console_pages = SCLP_CONSOLE_PAGES;
55/* Flag to indicate if buffer pages are dropped on buffer full condition */
56int sclp_console_drop = 0;
57/* Number of times the console dropped buffer pages */
58unsigned long sclp_console_full;
59
Michael Holzheu62b749422009-06-16 10:30:40 +020060static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
61{
62 complete(&sclp_request_queue_flushed);
63}
64
Martin Schwidefsky25b41a72013-05-24 12:30:03 +020065static int __init sclp_setup_console_pages(char *str)
66{
67 int pages, rc;
68
69 rc = kstrtoint(str, 0, &pages);
70 if (!rc && pages >= SCLP_CONSOLE_PAGES)
71 sclp_console_pages = pages;
72 return 1;
73}
74
75__setup("sclp_con_pages=", sclp_setup_console_pages);
76
77static int __init sclp_setup_console_drop(char *str)
78{
79 int drop, rc;
80
81 rc = kstrtoint(str, 0, &drop);
82 if (!rc && drop)
83 sclp_console_drop = 1;
84 return 1;
85}
86
87__setup("sclp_con_drop=", sclp_setup_console_drop);
88
Michael Holzheu62b749422009-06-16 10:30:40 +020089static struct sclp_req sclp_suspend_req;
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091/* Timer for request retries. */
92static struct timer_list sclp_request_timer;
93
94/* Internal state: is the driver initialized? */
95static volatile enum sclp_init_state_t {
96 sclp_init_state_uninitialized,
97 sclp_init_state_initializing,
98 sclp_init_state_initialized
99} sclp_init_state = sclp_init_state_uninitialized;
100
101/* Internal state: is a request active at the sclp? */
102static volatile enum sclp_running_state_t {
103 sclp_running_state_idle,
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100104 sclp_running_state_running,
105 sclp_running_state_reset_pending
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106} sclp_running_state = sclp_running_state_idle;
107
108/* Internal state: is a read request pending? */
109static volatile enum sclp_reading_state_t {
110 sclp_reading_state_idle,
111 sclp_reading_state_reading
112} sclp_reading_state = sclp_reading_state_idle;
113
114/* Internal state: is the driver currently serving requests? */
115static volatile enum sclp_activation_state_t {
116 sclp_activation_state_active,
117 sclp_activation_state_deactivating,
118 sclp_activation_state_inactive,
119 sclp_activation_state_activating
120} sclp_activation_state = sclp_activation_state_active;
121
122/* Internal state: is an init mask request pending? */
123static volatile enum sclp_mask_state_t {
124 sclp_mask_state_idle,
125 sclp_mask_state_initializing
126} sclp_mask_state = sclp_mask_state_idle;
127
Michael Holzheu62b749422009-06-16 10:30:40 +0200128/* Internal state: is the driver suspended? */
129static enum sclp_suspend_state_t {
130 sclp_suspend_state_running,
131 sclp_suspend_state_suspended,
132} sclp_suspend_state = sclp_suspend_state_running;
133
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134/* Maximum retry counts */
135#define SCLP_INIT_RETRY 3
136#define SCLP_MASK_RETRY 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137
138/* Timeout intervals in seconds.*/
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800139#define SCLP_BUSY_INTERVAL 10
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100140#define SCLP_RETRY_INTERVAL 30
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142static void sclp_process_queue(void);
Heiko Carstens364c8552007-10-12 16:11:35 +0200143static void __sclp_make_read_req(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144static int sclp_init_mask(int calculate);
145static int sclp_init(void);
146
147/* Perform service call. Return 0 on success, non-zero otherwise. */
Heiko Carstensab14de62007-02-05 21:18:37 +0100148int
149sclp_service_call(sclp_cmdw_t command, void *sccb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 int cc;
152
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200153 asm volatile(
154 " .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
155 " ipm %0\n"
156 " srl %0,28"
157 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
158 : "cc", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 if (cc == 3)
160 return -EIO;
161 if (cc == 2)
162 return -EBUSY;
163 return 0;
164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100167static void
168__sclp_queue_read_req(void)
169{
170 if (sclp_reading_state == sclp_reading_state_idle) {
171 sclp_reading_state = sclp_reading_state_reading;
172 __sclp_make_read_req();
173 /* Add request to head of queue */
174 list_add(&sclp_read_req.list, &sclp_req_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
178/* Set up request retry timer. Called while sclp_lock is locked. */
179static inline void
180__sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
181 unsigned long data)
182{
183 del_timer(&sclp_request_timer);
184 sclp_request_timer.function = function;
185 sclp_request_timer.data = data;
186 sclp_request_timer.expires = jiffies + time;
187 add_timer(&sclp_request_timer);
188}
189
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100190/* Request timeout handler. Restart the request queue. If DATA is non-zero,
191 * force restart of running request. */
192static void
193sclp_request_timeout(unsigned long data)
194{
195 unsigned long flags;
196
197 spin_lock_irqsave(&sclp_lock, flags);
198 if (data) {
199 if (sclp_running_state == sclp_running_state_running) {
200 /* Break running state and queue NOP read event request
201 * to get a defined interface state. */
202 __sclp_queue_read_req();
203 sclp_running_state = sclp_running_state_idle;
204 }
205 } else {
206 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
207 sclp_request_timeout, 0);
208 }
209 spin_unlock_irqrestore(&sclp_lock, flags);
210 sclp_process_queue();
211}
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213/* Try to start a request. Return zero if the request was successfully
214 * started or if it will be started at a later time. Return non-zero otherwise.
215 * Called while sclp_lock is locked. */
216static int
217__sclp_start_request(struct sclp_req *req)
218{
219 int rc;
220
221 if (sclp_running_state != sclp_running_state_idle)
222 return 0;
223 del_timer(&sclp_request_timer);
Heiko Carstensab14de62007-02-05 21:18:37 +0100224 rc = sclp_service_call(req->command, req->sccb);
Peter Oberparleiter25fab9e2006-02-11 17:55:59 -0800225 req->start_count++;
226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 if (rc == 0) {
Daniel Mack3ad2f3f2010-02-03 08:01:28 +0800228 /* Successfully started request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 req->status = SCLP_REQ_RUNNING;
230 sclp_running_state = sclp_running_state_running;
231 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
232 sclp_request_timeout, 1);
233 return 0;
234 } else if (rc == -EBUSY) {
235 /* Try again later */
236 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
237 sclp_request_timeout, 0);
238 return 0;
239 }
240 /* Request failed */
241 req->status = SCLP_REQ_FAILED;
242 return rc;
243}
244
245/* Try to start queued requests. */
246static void
247sclp_process_queue(void)
248{
249 struct sclp_req *req;
250 int rc;
251 unsigned long flags;
252
253 spin_lock_irqsave(&sclp_lock, flags);
254 if (sclp_running_state != sclp_running_state_idle) {
255 spin_unlock_irqrestore(&sclp_lock, flags);
256 return;
257 }
258 del_timer(&sclp_request_timer);
259 while (!list_empty(&sclp_req_queue)) {
260 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
Michael Holzheu62b749422009-06-16 10:30:40 +0200261 if (!req->sccb)
262 goto do_post;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 rc = __sclp_start_request(req);
264 if (rc == 0)
265 break;
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100266 /* Request failed */
267 if (req->start_count > 1) {
268 /* Cannot abort already submitted request - could still
269 * be active at the SCLP */
270 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
271 sclp_request_timeout, 0);
272 break;
273 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200274do_post:
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100275 /* Post-processing for aborted request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 list_del(&req->list);
277 if (req->callback) {
278 spin_unlock_irqrestore(&sclp_lock, flags);
279 req->callback(req, req->callback_data);
280 spin_lock_irqsave(&sclp_lock, flags);
281 }
282 }
283 spin_unlock_irqrestore(&sclp_lock, flags);
284}
285
Michael Holzheu62b749422009-06-16 10:30:40 +0200286static int __sclp_can_add_request(struct sclp_req *req)
287{
288 if (req == &sclp_suspend_req || req == &sclp_init_req)
289 return 1;
290 if (sclp_suspend_state != sclp_suspend_state_running)
291 return 0;
292 if (sclp_init_state != sclp_init_state_initialized)
293 return 0;
294 if (sclp_activation_state != sclp_activation_state_active)
295 return 0;
296 return 1;
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299/* Queue a new request. Return zero on success, non-zero otherwise. */
300int
301sclp_add_request(struct sclp_req *req)
302{
303 unsigned long flags;
304 int rc;
305
306 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +0200307 if (!__sclp_can_add_request(req)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 spin_unlock_irqrestore(&sclp_lock, flags);
309 return -EIO;
310 }
311 req->status = SCLP_REQ_QUEUED;
312 req->start_count = 0;
313 list_add_tail(&req->list, &sclp_req_queue);
314 rc = 0;
315 /* Start if request is first in list */
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100316 if (sclp_running_state == sclp_running_state_idle &&
317 req->list.prev == &sclp_req_queue) {
Michael Holzheu62b749422009-06-16 10:30:40 +0200318 if (!req->sccb) {
319 list_del(&req->list);
320 rc = -ENODATA;
321 goto out;
322 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 rc = __sclp_start_request(req);
324 if (rc)
325 list_del(&req->list);
326 }
Michael Holzheu62b749422009-06-16 10:30:40 +0200327out:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 spin_unlock_irqrestore(&sclp_lock, flags);
329 return rc;
330}
331
332EXPORT_SYMBOL(sclp_add_request);
333
334/* Dispatch events found in request buffer to registered listeners. Return 0
335 * if all events were dispatched, non-zero otherwise. */
336static int
337sclp_dispatch_evbufs(struct sccb_header *sccb)
338{
339 unsigned long flags;
340 struct evbuf_header *evbuf;
341 struct list_head *l;
342 struct sclp_register *reg;
343 int offset;
344 int rc;
345
346 spin_lock_irqsave(&sclp_lock, flags);
347 rc = 0;
348 for (offset = sizeof(struct sccb_header); offset < sccb->length;
349 offset += evbuf->length) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
Peter Oberparleitere2e5a0f2009-02-19 15:18:59 +0100351 /* Check for malformed hardware response */
352 if (evbuf->length == 0)
353 break;
354 /* Search for event handler */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 reg = NULL;
356 list_for_each(l, &sclp_reg_list) {
357 reg = list_entry(l, struct sclp_register, list);
358 if (reg->receive_mask & (1 << (32 - evbuf->type)))
359 break;
360 else
361 reg = NULL;
362 }
363 if (reg && reg->receiver_fn) {
364 spin_unlock_irqrestore(&sclp_lock, flags);
365 reg->receiver_fn(evbuf);
366 spin_lock_irqsave(&sclp_lock, flags);
367 } else if (reg == NULL)
Heiko Carstensd06cbda2012-09-06 15:00:07 +0200368 rc = -EOPNOTSUPP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 }
370 spin_unlock_irqrestore(&sclp_lock, flags);
371 return rc;
372}
373
374/* Read event data request callback. */
375static void
376sclp_read_cb(struct sclp_req *req, void *data)
377{
378 unsigned long flags;
379 struct sccb_header *sccb;
380
381 sccb = (struct sccb_header *) req->sccb;
382 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
383 sccb->response_code == 0x220))
384 sclp_dispatch_evbufs(sccb);
385 spin_lock_irqsave(&sclp_lock, flags);
386 sclp_reading_state = sclp_reading_state_idle;
387 spin_unlock_irqrestore(&sclp_lock, flags);
388}
389
390/* Prepare read event data request. Called while sclp_lock is locked. */
Heiko Carstens364c8552007-10-12 16:11:35 +0200391static void __sclp_make_read_req(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392{
393 struct sccb_header *sccb;
394
395 sccb = (struct sccb_header *) sclp_read_sccb;
396 clear_page(sccb);
397 memset(&sclp_read_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100398 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 sclp_read_req.status = SCLP_REQ_QUEUED;
400 sclp_read_req.start_count = 0;
401 sclp_read_req.callback = sclp_read_cb;
402 sclp_read_req.sccb = sccb;
403 sccb->length = PAGE_SIZE;
404 sccb->function_code = 0;
405 sccb->control_mask[2] = 0x80;
406}
407
408/* Search request list for request with matching sccb. Return request if found,
409 * NULL otherwise. Called while sclp_lock is locked. */
410static inline struct sclp_req *
411__sclp_find_req(u32 sccb)
412{
413 struct list_head *l;
414 struct sclp_req *req;
415
416 list_for_each(l, &sclp_req_queue) {
417 req = list_entry(l, struct sclp_req, list);
418 if (sccb == (u32) (addr_t) req->sccb)
419 return req;
420 }
421 return NULL;
422}
423
424/* Handler for external interruption. Perform request post-processing.
425 * Prepare read event data request if necessary. Start processing of next
426 * request on queue. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400427static void sclp_interrupt_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200428 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429{
430 struct sclp_req *req;
431 u32 finished_sccb;
432 u32 evbuf_pending;
433
Heiko Carstens420f42e2013-01-02 15:18:18 +0100434 inc_irq_stat(IRQEXT_SCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 spin_lock(&sclp_lock);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200436 finished_sccb = param32 & 0xfffffff8;
437 evbuf_pending = param32 & 0x3;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (finished_sccb) {
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100439 del_timer(&sclp_request_timer);
440 sclp_running_state = sclp_running_state_reset_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 req = __sclp_find_req(finished_sccb);
442 if (req) {
443 /* Request post-processing */
444 list_del(&req->list);
445 req->status = SCLP_REQ_DONE;
446 if (req->callback) {
447 spin_unlock(&sclp_lock);
448 req->callback(req, req->callback_data);
449 spin_lock(&sclp_lock);
450 }
451 }
452 sclp_running_state = sclp_running_state_idle;
453 }
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100454 if (evbuf_pending &&
Peter Oberparleiterdbd8ae62007-02-05 21:17:00 +0100455 sclp_activation_state == sclp_activation_state_active)
456 __sclp_queue_read_req();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 spin_unlock(&sclp_lock);
458 sclp_process_queue();
459}
460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461/* Convert interval in jiffies to TOD ticks. */
462static inline u64
463sclp_tod_from_jiffies(unsigned long jiffies)
464{
465 return (u64) (jiffies / HZ) << 32;
466}
467
468/* Wait until a currently running request finished. Note: while this function
469 * is running, no timers are served on the calling CPU. */
470void
471sclp_sync_wait(void)
472{
Heiko Carstens934b2852008-08-01 16:39:11 +0200473 unsigned long long old_tick;
Heiko Carstens1f194a42006-07-03 00:24:46 -0700474 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 unsigned long cr0, cr0_sync;
476 u64 timeout;
Heiko Carstensc59d7442007-02-05 21:17:16 +0100477 int irq_context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 /* We'll be disabling timer interrupts, so we need a custom timeout
480 * mechanism */
481 timeout = 0;
482 if (timer_pending(&sclp_request_timer)) {
483 /* Get timeout TOD value */
Heiko Carstens1aae0562013-01-30 09:49:40 +0100484 timeout = get_tod_clock() +
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 sclp_tod_from_jiffies(sclp_request_timer.expires -
486 jiffies);
487 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700488 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 /* Prevent bottom half from executing once we force interrupts open */
Heiko Carstensc59d7442007-02-05 21:17:16 +0100490 irq_context = in_interrupt();
491 if (!irq_context)
492 local_bh_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 /* Enable service-signal interruption, disable timer interrupts */
Heiko Carstens934b2852008-08-01 16:39:11 +0200494 old_tick = local_tick_disable();
Heiko Carstens1f194a42006-07-03 00:24:46 -0700495 trace_hardirqs_on();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 __ctl_store(cr0, 0, 0);
497 cr0_sync = cr0;
Heiko Carstens934b2852008-08-01 16:39:11 +0200498 cr0_sync &= 0xffff00a0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 cr0_sync |= 0x00000200;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 __ctl_load(cr0_sync, 0, 0);
David Howellsdf9ee292010-10-07 14:08:55 +0100501 __arch_local_irq_stosm(0x01);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 /* Loop until driver state indicates finished request */
503 while (sclp_running_state != sclp_running_state_idle) {
504 /* Check for expired request timer */
505 if (timer_pending(&sclp_request_timer) &&
Heiko Carstens1aae0562013-01-30 09:49:40 +0100506 get_tod_clock() > timeout &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 del_timer(&sclp_request_timer))
508 sclp_request_timer.function(sclp_request_timer.data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 cpu_relax();
510 }
Heiko Carstens1f194a42006-07-03 00:24:46 -0700511 local_irq_disable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 __ctl_load(cr0, 0, 0);
Heiko Carstensc59d7442007-02-05 21:17:16 +0100513 if (!irq_context)
514 _local_bh_enable();
Heiko Carstens934b2852008-08-01 16:39:11 +0200515 local_tick_enable(old_tick);
Heiko Carstens1f194a42006-07-03 00:24:46 -0700516 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518EXPORT_SYMBOL(sclp_sync_wait);
519
520/* Dispatch changes in send and receive mask to registered listeners. */
Heiko Carstens4d284ca2007-02-05 21:18:53 +0100521static void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522sclp_dispatch_state_change(void)
523{
524 struct list_head *l;
525 struct sclp_register *reg;
526 unsigned long flags;
527 sccb_mask_t receive_mask;
528 sccb_mask_t send_mask;
529
530 do {
531 spin_lock_irqsave(&sclp_lock, flags);
532 reg = NULL;
533 list_for_each(l, &sclp_reg_list) {
534 reg = list_entry(l, struct sclp_register, list);
Peter Oberparleiterd082d3c2008-02-19 15:29:32 +0100535 receive_mask = reg->send_mask & sclp_receive_mask;
536 send_mask = reg->receive_mask & sclp_send_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (reg->sclp_receive_mask != receive_mask ||
538 reg->sclp_send_mask != send_mask) {
539 reg->sclp_receive_mask = receive_mask;
540 reg->sclp_send_mask = send_mask;
541 break;
542 } else
543 reg = NULL;
544 }
545 spin_unlock_irqrestore(&sclp_lock, flags);
546 if (reg && reg->state_change_fn)
547 reg->state_change_fn(reg);
548 } while (reg);
549}
550
551struct sclp_statechangebuf {
552 struct evbuf_header header;
553 u8 validity_sclp_active_facility_mask : 1;
554 u8 validity_sclp_receive_mask : 1;
555 u8 validity_sclp_send_mask : 1;
556 u8 validity_read_data_function_mask : 1;
557 u16 _zeros : 12;
558 u16 mask_length;
559 u64 sclp_active_facility_mask;
560 sccb_mask_t sclp_receive_mask;
561 sccb_mask_t sclp_send_mask;
562 u32 read_data_function_mask;
563} __attribute__((packed));
564
565
566/* State change event callback. Inform listeners of changes. */
567static void
568sclp_state_change_cb(struct evbuf_header *evbuf)
569{
570 unsigned long flags;
571 struct sclp_statechangebuf *scbuf;
572
573 scbuf = (struct sclp_statechangebuf *) evbuf;
574 if (scbuf->mask_length != sizeof(sccb_mask_t))
575 return;
576 spin_lock_irqsave(&sclp_lock, flags);
577 if (scbuf->validity_sclp_receive_mask)
578 sclp_receive_mask = scbuf->sclp_receive_mask;
579 if (scbuf->validity_sclp_send_mask)
580 sclp_send_mask = scbuf->sclp_send_mask;
581 spin_unlock_irqrestore(&sclp_lock, flags);
Heiko Carstens887d9352008-07-14 09:57:26 +0200582 if (scbuf->validity_sclp_active_facility_mask)
583 sclp_facilities = scbuf->sclp_active_facility_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 sclp_dispatch_state_change();
585}
586
587static struct sclp_register sclp_state_change_event = {
Stefan Haberland6d4740c2007-04-27 16:01:53 +0200588 .receive_mask = EVTYP_STATECHANGE_MASK,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 .receiver_fn = sclp_state_change_cb
590};
591
592/* Calculate receive and send mask of currently registered listeners.
593 * Called while sclp_lock is locked. */
594static inline void
595__sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
596{
597 struct list_head *l;
598 struct sclp_register *t;
599
600 *receive_mask = 0;
601 *send_mask = 0;
602 list_for_each(l, &sclp_reg_list) {
603 t = list_entry(l, struct sclp_register, list);
604 *receive_mask |= t->receive_mask;
605 *send_mask |= t->send_mask;
606 }
607}
608
609/* Register event listener. Return 0 on success, non-zero otherwise. */
610int
611sclp_register(struct sclp_register *reg)
612{
613 unsigned long flags;
614 sccb_mask_t receive_mask;
615 sccb_mask_t send_mask;
616 int rc;
617
618 rc = sclp_init();
619 if (rc)
620 return rc;
621 spin_lock_irqsave(&sclp_lock, flags);
622 /* Check event mask for collisions */
623 __sclp_get_mask(&receive_mask, &send_mask);
624 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
625 spin_unlock_irqrestore(&sclp_lock, flags);
626 return -EBUSY;
627 }
628 /* Trigger initial state change callback */
629 reg->sclp_receive_mask = 0;
630 reg->sclp_send_mask = 0;
Michael Holzheu62b749422009-06-16 10:30:40 +0200631 reg->pm_event_posted = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 list_add(&reg->list, &sclp_reg_list);
633 spin_unlock_irqrestore(&sclp_lock, flags);
634 rc = sclp_init_mask(1);
635 if (rc) {
636 spin_lock_irqsave(&sclp_lock, flags);
637 list_del(&reg->list);
638 spin_unlock_irqrestore(&sclp_lock, flags);
639 }
640 return rc;
641}
642
643EXPORT_SYMBOL(sclp_register);
644
645/* Unregister event listener. */
646void
647sclp_unregister(struct sclp_register *reg)
648{
649 unsigned long flags;
650
651 spin_lock_irqsave(&sclp_lock, flags);
652 list_del(&reg->list);
653 spin_unlock_irqrestore(&sclp_lock, flags);
654 sclp_init_mask(1);
655}
656
657EXPORT_SYMBOL(sclp_unregister);
658
659/* Remove event buffers which are marked processed. Return the number of
660 * remaining event buffers. */
661int
662sclp_remove_processed(struct sccb_header *sccb)
663{
664 struct evbuf_header *evbuf;
665 int unprocessed;
666 u16 remaining;
667
668 evbuf = (struct evbuf_header *) (sccb + 1);
669 unprocessed = 0;
670 remaining = sccb->length - sizeof(struct sccb_header);
671 while (remaining > 0) {
672 remaining -= evbuf->length;
673 if (evbuf->flags & 0x80) {
674 sccb->length -= evbuf->length;
675 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
676 remaining);
677 } else {
678 unprocessed++;
679 evbuf = (struct evbuf_header *)
680 ((addr_t) evbuf + evbuf->length);
681 }
682 }
683 return unprocessed;
684}
685
686EXPORT_SYMBOL(sclp_remove_processed);
687
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688/* Prepare init mask request. Called while sclp_lock is locked. */
689static inline void
690__sclp_make_init_req(u32 receive_mask, u32 send_mask)
691{
692 struct init_sccb *sccb;
693
694 sccb = (struct init_sccb *) sclp_init_sccb;
695 clear_page(sccb);
696 memset(&sclp_init_req, 0, sizeof(struct sclp_req));
Heiko Carstensab14de62007-02-05 21:18:37 +0100697 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 sclp_init_req.status = SCLP_REQ_FILLED;
699 sclp_init_req.start_count = 0;
700 sclp_init_req.callback = NULL;
701 sclp_init_req.callback_data = NULL;
702 sclp_init_req.sccb = sccb;
703 sccb->header.length = sizeof(struct init_sccb);
704 sccb->mask_length = sizeof(sccb_mask_t);
705 sccb->receive_mask = receive_mask;
706 sccb->send_mask = send_mask;
707 sccb->sclp_receive_mask = 0;
708 sccb->sclp_send_mask = 0;
709}
710
711/* Start init mask request. If calculate is non-zero, calculate the mask as
712 * requested by registered listeners. Use zero mask otherwise. Return 0 on
713 * success, non-zero otherwise. */
714static int
715sclp_init_mask(int calculate)
716{
717 unsigned long flags;
718 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
719 sccb_mask_t receive_mask;
720 sccb_mask_t send_mask;
721 int retry;
722 int rc;
723 unsigned long wait;
724
725 spin_lock_irqsave(&sclp_lock, flags);
726 /* Check if interface is in appropriate state */
727 if (sclp_mask_state != sclp_mask_state_idle) {
728 spin_unlock_irqrestore(&sclp_lock, flags);
729 return -EBUSY;
730 }
731 if (sclp_activation_state == sclp_activation_state_inactive) {
732 spin_unlock_irqrestore(&sclp_lock, flags);
733 return -EINVAL;
734 }
735 sclp_mask_state = sclp_mask_state_initializing;
736 /* Determine mask */
737 if (calculate)
738 __sclp_get_mask(&receive_mask, &send_mask);
739 else {
740 receive_mask = 0;
741 send_mask = 0;
742 }
743 rc = -EIO;
744 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
745 /* Prepare request */
746 __sclp_make_init_req(receive_mask, send_mask);
747 spin_unlock_irqrestore(&sclp_lock, flags);
748 if (sclp_add_request(&sclp_init_req)) {
749 /* Try again later */
750 wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
751 while (time_before(jiffies, wait))
752 sclp_sync_wait();
753 spin_lock_irqsave(&sclp_lock, flags);
754 continue;
755 }
756 while (sclp_init_req.status != SCLP_REQ_DONE &&
757 sclp_init_req.status != SCLP_REQ_FAILED)
758 sclp_sync_wait();
759 spin_lock_irqsave(&sclp_lock, flags);
760 if (sclp_init_req.status == SCLP_REQ_DONE &&
761 sccb->header.response_code == 0x20) {
762 /* Successful request */
763 if (calculate) {
764 sclp_receive_mask = sccb->sclp_receive_mask;
765 sclp_send_mask = sccb->sclp_send_mask;
766 } else {
767 sclp_receive_mask = 0;
768 sclp_send_mask = 0;
769 }
770 spin_unlock_irqrestore(&sclp_lock, flags);
771 sclp_dispatch_state_change();
772 spin_lock_irqsave(&sclp_lock, flags);
773 rc = 0;
774 break;
775 }
776 }
777 sclp_mask_state = sclp_mask_state_idle;
778 spin_unlock_irqrestore(&sclp_lock, flags);
779 return rc;
780}
781
782/* Deactivate SCLP interface. On success, new requests will be rejected,
783 * events will no longer be dispatched. Return 0 on success, non-zero
784 * otherwise. */
785int
786sclp_deactivate(void)
787{
788 unsigned long flags;
789 int rc;
790
791 spin_lock_irqsave(&sclp_lock, flags);
792 /* Deactivate can only be called when active */
793 if (sclp_activation_state != sclp_activation_state_active) {
794 spin_unlock_irqrestore(&sclp_lock, flags);
795 return -EINVAL;
796 }
797 sclp_activation_state = sclp_activation_state_deactivating;
798 spin_unlock_irqrestore(&sclp_lock, flags);
799 rc = sclp_init_mask(0);
800 spin_lock_irqsave(&sclp_lock, flags);
801 if (rc == 0)
802 sclp_activation_state = sclp_activation_state_inactive;
803 else
804 sclp_activation_state = sclp_activation_state_active;
805 spin_unlock_irqrestore(&sclp_lock, flags);
806 return rc;
807}
808
809EXPORT_SYMBOL(sclp_deactivate);
810
811/* Reactivate SCLP interface after sclp_deactivate. On success, new
812 * requests will be accepted, events will be dispatched again. Return 0 on
813 * success, non-zero otherwise. */
814int
815sclp_reactivate(void)
816{
817 unsigned long flags;
818 int rc;
819
820 spin_lock_irqsave(&sclp_lock, flags);
821 /* Reactivate can only be called when inactive */
822 if (sclp_activation_state != sclp_activation_state_inactive) {
823 spin_unlock_irqrestore(&sclp_lock, flags);
824 return -EINVAL;
825 }
826 sclp_activation_state = sclp_activation_state_activating;
827 spin_unlock_irqrestore(&sclp_lock, flags);
828 rc = sclp_init_mask(1);
829 spin_lock_irqsave(&sclp_lock, flags);
830 if (rc == 0)
831 sclp_activation_state = sclp_activation_state_active;
832 else
833 sclp_activation_state = sclp_activation_state_inactive;
834 spin_unlock_irqrestore(&sclp_lock, flags);
835 return rc;
836}
837
838EXPORT_SYMBOL(sclp_reactivate);
839
840/* Handler for external interruption used during initialization. Modify
841 * request state to done. */
Heiko Carstensfde15c32012-03-11 11:59:31 -0400842static void sclp_check_handler(struct ext_code ext_code,
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200843 unsigned int param32, unsigned long param64)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844{
845 u32 finished_sccb;
846
Heiko Carstens420f42e2013-01-02 15:18:18 +0100847 inc_irq_stat(IRQEXT_SCP);
Martin Schwidefskyf6649a72010-10-25 16:10:38 +0200848 finished_sccb = param32 & 0xfffffff8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 /* Is this the interrupt we are waiting for? */
850 if (finished_sccb == 0)
851 return;
Martin Schwidefskya12c53f2008-07-14 09:59:28 +0200852 if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
853 panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
854 finished_sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 spin_lock(&sclp_lock);
856 if (sclp_running_state == sclp_running_state_running) {
857 sclp_init_req.status = SCLP_REQ_DONE;
858 sclp_running_state = sclp_running_state_idle;
859 }
860 spin_unlock(&sclp_lock);
861}
862
863/* Initial init mask request timed out. Modify request state to failed. */
864static void
865sclp_check_timeout(unsigned long data)
866{
867 unsigned long flags;
868
869 spin_lock_irqsave(&sclp_lock, flags);
870 if (sclp_running_state == sclp_running_state_running) {
871 sclp_init_req.status = SCLP_REQ_FAILED;
872 sclp_running_state = sclp_running_state_idle;
873 }
874 spin_unlock_irqrestore(&sclp_lock, flags);
875}
876
877/* Perform a check of the SCLP interface. Return zero if the interface is
878 * available and there are no pending requests from a previous instance.
879 * Return non-zero otherwise. */
880static int
881sclp_check_interface(void)
882{
883 struct init_sccb *sccb;
884 unsigned long flags;
885 int retry;
886 int rc;
887
888 spin_lock_irqsave(&sclp_lock, flags);
889 /* Prepare init mask command */
Heiko Carstens98b79982011-01-05 12:47:40 +0100890 rc = register_external_interrupt(0x2401, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 if (rc) {
892 spin_unlock_irqrestore(&sclp_lock, flags);
893 return rc;
894 }
895 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
896 __sclp_make_init_req(0, 0);
897 sccb = (struct init_sccb *) sclp_init_req.sccb;
Heiko Carstensab14de62007-02-05 21:18:37 +0100898 rc = sclp_service_call(sclp_init_req.command, sccb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 if (rc == -EIO)
900 break;
901 sclp_init_req.status = SCLP_REQ_RUNNING;
902 sclp_running_state = sclp_running_state_running;
903 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
904 sclp_check_timeout, 0);
905 spin_unlock_irqrestore(&sclp_lock, flags);
906 /* Enable service-signal interruption - needs to happen
907 * with IRQs enabled. */
Heiko Carstensdf7997a2011-05-26 09:48:23 +0200908 service_subclass_irq_register();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 /* Wait for signal from interrupt or timeout */
910 sclp_sync_wait();
911 /* Disable service-signal interruption - needs to happen
912 * with IRQs enabled. */
Heiko Carstensdf7997a2011-05-26 09:48:23 +0200913 service_subclass_irq_unregister();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 spin_lock_irqsave(&sclp_lock, flags);
915 del_timer(&sclp_request_timer);
916 if (sclp_init_req.status == SCLP_REQ_DONE &&
917 sccb->header.response_code == 0x20) {
918 rc = 0;
919 break;
920 } else
921 rc = -EBUSY;
922 }
Heiko Carstens98b79982011-01-05 12:47:40 +0100923 unregister_external_interrupt(0x2401, sclp_check_handler);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 spin_unlock_irqrestore(&sclp_lock, flags);
925 return rc;
926}
927
928/* Reboot event handler. Reset send and receive mask to prevent pending SCLP
929 * events from interfering with rebooted system. */
930static int
931sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
932{
933 sclp_deactivate();
934 return NOTIFY_DONE;
935}
936
937static struct notifier_block sclp_reboot_notifier = {
938 .notifier_call = sclp_reboot_event
939};
940
Michael Holzheu62b749422009-06-16 10:30:40 +0200941/*
942 * Suspend/resume SCLP notifier implementation
943 */
944
945static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
946{
947 struct sclp_register *reg;
948 unsigned long flags;
949
950 if (!rollback) {
951 spin_lock_irqsave(&sclp_lock, flags);
952 list_for_each_entry(reg, &sclp_reg_list, list)
953 reg->pm_event_posted = 0;
954 spin_unlock_irqrestore(&sclp_lock, flags);
955 }
956 do {
957 spin_lock_irqsave(&sclp_lock, flags);
958 list_for_each_entry(reg, &sclp_reg_list, list) {
959 if (rollback && reg->pm_event_posted)
960 goto found;
961 if (!rollback && !reg->pm_event_posted)
962 goto found;
963 }
964 spin_unlock_irqrestore(&sclp_lock, flags);
965 return;
966found:
967 spin_unlock_irqrestore(&sclp_lock, flags);
968 if (reg->pm_event_fn)
969 reg->pm_event_fn(reg, sclp_pm_event);
970 reg->pm_event_posted = rollback ? 0 : 1;
971 } while (1);
972}
973
974/*
975 * Susend/resume callbacks for platform device
976 */
977
978static int sclp_freeze(struct device *dev)
979{
980 unsigned long flags;
981 int rc;
982
983 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
984
985 spin_lock_irqsave(&sclp_lock, flags);
986 sclp_suspend_state = sclp_suspend_state_suspended;
987 spin_unlock_irqrestore(&sclp_lock, flags);
988
989 /* Init supend data */
990 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
991 sclp_suspend_req.callback = sclp_suspend_req_cb;
992 sclp_suspend_req.status = SCLP_REQ_FILLED;
993 init_completion(&sclp_request_queue_flushed);
994
995 rc = sclp_add_request(&sclp_suspend_req);
996 if (rc == 0)
997 wait_for_completion(&sclp_request_queue_flushed);
998 else if (rc != -ENODATA)
999 goto fail_thaw;
1000
1001 rc = sclp_deactivate();
1002 if (rc)
1003 goto fail_thaw;
1004 return 0;
1005
1006fail_thaw:
1007 spin_lock_irqsave(&sclp_lock, flags);
1008 sclp_suspend_state = sclp_suspend_state_running;
1009 spin_unlock_irqrestore(&sclp_lock, flags);
1010 sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1011 return rc;
1012}
1013
1014static int sclp_undo_suspend(enum sclp_pm_event event)
1015{
1016 unsigned long flags;
1017 int rc;
1018
1019 rc = sclp_reactivate();
1020 if (rc)
1021 return rc;
1022
1023 spin_lock_irqsave(&sclp_lock, flags);
1024 sclp_suspend_state = sclp_suspend_state_running;
1025 spin_unlock_irqrestore(&sclp_lock, flags);
1026
1027 sclp_pm_event(event, 0);
1028 return 0;
1029}
1030
1031static int sclp_thaw(struct device *dev)
1032{
1033 return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1034}
1035
1036static int sclp_restore(struct device *dev)
1037{
1038 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1039}
1040
Alexey Dobriyan47145212009-12-14 18:00:08 -08001041static const struct dev_pm_ops sclp_pm_ops = {
Michael Holzheu62b749422009-06-16 10:30:40 +02001042 .freeze = sclp_freeze,
1043 .thaw = sclp_thaw,
1044 .restore = sclp_restore,
1045};
1046
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001047static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf)
1048{
1049 return sprintf(buf, "%i\n", sclp_console_pages);
1050}
1051
1052static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL);
1053
1054static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf)
1055{
1056 return sprintf(buf, "%i\n", sclp_console_drop);
1057}
1058
1059static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL);
1060
1061static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf)
1062{
1063 return sprintf(buf, "%lu\n", sclp_console_full);
1064}
1065
1066static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL);
1067
1068static struct attribute *sclp_drv_attrs[] = {
1069 &driver_attr_con_pages.attr,
1070 &driver_attr_con_drop.attr,
1071 &driver_attr_con_full.attr,
1072 NULL,
1073};
1074static struct attribute_group sclp_drv_attr_group = {
1075 .attrs = sclp_drv_attrs,
1076};
1077static const struct attribute_group *sclp_drv_attr_groups[] = {
1078 &sclp_drv_attr_group,
1079 NULL,
1080};
1081
Michael Holzheu62b749422009-06-16 10:30:40 +02001082static struct platform_driver sclp_pdrv = {
1083 .driver = {
1084 .name = "sclp",
1085 .owner = THIS_MODULE,
1086 .pm = &sclp_pm_ops,
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001087 .groups = sclp_drv_attr_groups,
Michael Holzheu62b749422009-06-16 10:30:40 +02001088 },
1089};
1090
1091static struct platform_device *sclp_pdev;
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/* Initialize SCLP driver. Return zero if driver is operational, non-zero
1094 * otherwise. */
1095static int
1096sclp_init(void)
1097{
1098 unsigned long flags;
Michael Holzheu62b749422009-06-16 10:30:40 +02001099 int rc = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 spin_lock_irqsave(&sclp_lock, flags);
1102 /* Check for previous or running initialization */
Michael Holzheu62b749422009-06-16 10:30:40 +02001103 if (sclp_init_state != sclp_init_state_uninitialized)
1104 goto fail_unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 sclp_init_state = sclp_init_state_initializing;
1106 /* Set up variables */
1107 INIT_LIST_HEAD(&sclp_req_queue);
1108 INIT_LIST_HEAD(&sclp_reg_list);
1109 list_add(&sclp_state_change_event.list, &sclp_reg_list);
1110 init_timer(&sclp_request_timer);
1111 /* Check interface */
1112 spin_unlock_irqrestore(&sclp_lock, flags);
1113 rc = sclp_check_interface();
1114 spin_lock_irqsave(&sclp_lock, flags);
Michael Holzheu62b749422009-06-16 10:30:40 +02001115 if (rc)
1116 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117 /* Register reboot handler */
1118 rc = register_reboot_notifier(&sclp_reboot_notifier);
Michael Holzheu62b749422009-06-16 10:30:40 +02001119 if (rc)
1120 goto fail_init_state_uninitialized;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 /* Register interrupt handler */
Heiko Carstens98b79982011-01-05 12:47:40 +01001122 rc = register_external_interrupt(0x2401, sclp_interrupt_handler);
Michael Holzheu62b749422009-06-16 10:30:40 +02001123 if (rc)
1124 goto fail_unregister_reboot_notifier;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 sclp_init_state = sclp_init_state_initialized;
1126 spin_unlock_irqrestore(&sclp_lock, flags);
1127 /* Enable service-signal external interruption - needs to happen with
1128 * IRQs enabled. */
Heiko Carstensdf7997a2011-05-26 09:48:23 +02001129 service_subclass_irq_register();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 sclp_init_mask(1);
1131 return 0;
Michael Holzheu62b749422009-06-16 10:30:40 +02001132
1133fail_unregister_reboot_notifier:
1134 unregister_reboot_notifier(&sclp_reboot_notifier);
1135fail_init_state_uninitialized:
1136 sclp_init_state = sclp_init_state_uninitialized;
1137fail_unlock:
1138 spin_unlock_irqrestore(&sclp_lock, flags);
1139 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140}
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001141
Michael Holzheu62b749422009-06-16 10:30:40 +02001142/*
1143 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1144 * to print the panic message.
1145 */
1146static int sclp_panic_notify(struct notifier_block *self,
1147 unsigned long event, void *data)
1148{
1149 if (sclp_suspend_state == sclp_suspend_state_suspended)
1150 sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1151 return NOTIFY_OK;
1152}
1153
1154static struct notifier_block sclp_on_panic_nb = {
1155 .notifier_call = sclp_panic_notify,
1156 .priority = SCLP_PANIC_PRIO,
1157};
1158
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001159static __init int sclp_initcall(void)
1160{
Michael Holzheu62b749422009-06-16 10:30:40 +02001161 int rc;
1162
1163 rc = platform_driver_register(&sclp_pdrv);
1164 if (rc)
1165 return rc;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001166
Michael Holzheu62b749422009-06-16 10:30:40 +02001167 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1168 rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0;
1169 if (rc)
1170 goto fail_platform_driver_unregister;
Martin Schwidefsky25b41a72013-05-24 12:30:03 +02001171
Michael Holzheu62b749422009-06-16 10:30:40 +02001172 rc = atomic_notifier_chain_register(&panic_notifier_list,
1173 &sclp_on_panic_nb);
1174 if (rc)
1175 goto fail_platform_device_unregister;
1176
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001177 return sclp_init();
Michael Holzheu62b749422009-06-16 10:30:40 +02001178
1179fail_platform_device_unregister:
1180 platform_device_unregister(sclp_pdev);
1181fail_platform_driver_unregister:
1182 platform_driver_unregister(&sclp_pdrv);
1183 return rc;
Peter Oberparleiterb3d00c32007-04-27 16:01:51 +02001184}
1185
1186arch_initcall(sclp_initcall);