GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / s390 / cio / css.c
1 /*
2  * driver for channel subsystem
3  *
4  * Copyright IBM Corp. 2002, 2010
5  *
6  * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7  *            Cornelia Huck (cornelia.huck@de.ibm.com)
8  *
9  * License: GPL
10  */
11
12 #define KMSG_COMPONENT "cio"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/list.h>
21 #include <linux/reboot.h>
22 #include <linux/suspend.h>
23 #include <linux/proc_fs.h>
24 #include <asm/isc.h>
25 #include <asm/crw.h>
26
27 #include "css.h"
28 #include "cio.h"
29 #include "cio_debug.h"
30 #include "ioasm.h"
31 #include "chsc.h"
32 #include "device.h"
33 #include "idset.h"
34 #include "chp.h"
35
36 int css_init_done = 0;
37 int max_ssid;
38
39 #define MAX_CSS_IDX 0
40 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
41 static struct bus_type css_bus_type;
42
43 int
44 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
45 {
46         struct subchannel_id schid;
47         int ret;
48
49         init_subchannel_id(&schid);
50         do {
51                 do {
52                         ret = fn(schid, data);
53                         if (ret)
54                                 break;
55                 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
56                 schid.sch_no = 0;
57         } while (schid.ssid++ < max_ssid);
58         return ret;
59 }
60
61 struct cb_data {
62         void *data;
63         struct idset *set;
64         int (*fn_known_sch)(struct subchannel *, void *);
65         int (*fn_unknown_sch)(struct subchannel_id, void *);
66 };
67
68 static int call_fn_known_sch(struct device *dev, void *data)
69 {
70         struct subchannel *sch = to_subchannel(dev);
71         struct cb_data *cb = data;
72         int rc = 0;
73
74         if (cb->set)
75                 idset_sch_del(cb->set, sch->schid);
76         if (cb->fn_known_sch)
77                 rc = cb->fn_known_sch(sch, cb->data);
78         return rc;
79 }
80
81 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
82 {
83         struct cb_data *cb = data;
84         int rc = 0;
85
86         if (idset_sch_contains(cb->set, schid))
87                 rc = cb->fn_unknown_sch(schid, cb->data);
88         return rc;
89 }
90
91 static int call_fn_all_sch(struct subchannel_id schid, void *data)
92 {
93         struct cb_data *cb = data;
94         struct subchannel *sch;
95         int rc = 0;
96
97         sch = get_subchannel_by_schid(schid);
98         if (sch) {
99                 if (cb->fn_known_sch)
100                         rc = cb->fn_known_sch(sch, cb->data);
101                 put_device(&sch->dev);
102         } else {
103                 if (cb->fn_unknown_sch)
104                         rc = cb->fn_unknown_sch(schid, cb->data);
105         }
106
107         return rc;
108 }
109
110 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
111                                int (*fn_unknown)(struct subchannel_id,
112                                void *), void *data)
113 {
114         struct cb_data cb;
115         int rc;
116
117         cb.data = data;
118         cb.fn_known_sch = fn_known;
119         cb.fn_unknown_sch = fn_unknown;
120
121         if (fn_known && !fn_unknown) {
122                 /* Skip idset allocation in case of known-only loop. */
123                 cb.set = NULL;
124                 return bus_for_each_dev(&css_bus_type, NULL, &cb,
125                                         call_fn_known_sch);
126         }
127
128         cb.set = idset_sch_new();
129         if (!cb.set)
130                 /* fall back to brute force scanning in case of oom */
131                 return for_each_subchannel(call_fn_all_sch, &cb);
132
133         idset_fill(cb.set);
134
135         /* Process registered subchannels. */
136         rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
137         if (rc)
138                 goto out;
139         /* Process unregistered subchannels. */
140         if (fn_unknown)
141                 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
142 out:
143         idset_free(cb.set);
144
145         return rc;
146 }
147
148 static void css_sch_todo(struct work_struct *work);
149
150 static int css_sch_create_locks(struct subchannel *sch)
151 {
152         sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
153         if (!sch->lock)
154                 return -ENOMEM;
155
156         spin_lock_init(sch->lock);
157         mutex_init(&sch->reg_mutex);
158
159         return 0;
160 }
161
162 static void css_subchannel_release(struct device *dev)
163 {
164         struct subchannel *sch = to_subchannel(dev);
165
166         sch->config.intparm = 0;
167         cio_commit_config(sch);
168         kfree(sch->lock);
169         kfree(sch);
170 }
171
172 struct subchannel *css_alloc_subchannel(struct subchannel_id schid)
173 {
174         struct subchannel *sch;
175         int ret;
176
177         sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
178         if (!sch)
179                 return ERR_PTR(-ENOMEM);
180
181         ret = cio_validate_subchannel(sch, schid);
182         if (ret < 0)
183                 goto err;
184
185         ret = css_sch_create_locks(sch);
186         if (ret)
187                 goto err;
188
189         INIT_WORK(&sch->todo_work, css_sch_todo);
190         sch->dev.release = &css_subchannel_release;
191         device_initialize(&sch->dev);
192         return sch;
193
194 err:
195         kfree(sch);
196         return ERR_PTR(ret);
197 }
198
199 static int css_sch_device_register(struct subchannel *sch)
200 {
201         int ret;
202
203         mutex_lock(&sch->reg_mutex);
204         dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
205                      sch->schid.sch_no);
206         ret = device_add(&sch->dev);
207         mutex_unlock(&sch->reg_mutex);
208         return ret;
209 }
210
211 /**
212  * css_sch_device_unregister - unregister a subchannel
213  * @sch: subchannel to be unregistered
214  */
215 void css_sch_device_unregister(struct subchannel *sch)
216 {
217         mutex_lock(&sch->reg_mutex);
218         if (device_is_registered(&sch->dev))
219                 device_unregister(&sch->dev);
220         mutex_unlock(&sch->reg_mutex);
221 }
222 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
223
224 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
225 {
226         int i;
227         int mask;
228
229         memset(ssd, 0, sizeof(struct chsc_ssd_info));
230         ssd->path_mask = pmcw->pim;
231         for (i = 0; i < 8; i++) {
232                 mask = 0x80 >> i;
233                 if (pmcw->pim & mask) {
234                         chp_id_init(&ssd->chpid[i]);
235                         ssd->chpid[i].id = pmcw->chpid[i];
236                 }
237         }
238 }
239
240 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
241 {
242         int i;
243         int mask;
244
245         for (i = 0; i < 8; i++) {
246                 mask = 0x80 >> i;
247                 if (ssd->path_mask & mask)
248                         if (!chp_is_registered(ssd->chpid[i]))
249                                 chp_new(ssd->chpid[i]);
250         }
251 }
252
253 void css_update_ssd_info(struct subchannel *sch)
254 {
255         int ret;
256
257         ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
258         if (ret)
259                 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
260
261         ssd_register_chpids(&sch->ssd_info);
262 }
263
264 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
265                          char *buf)
266 {
267         struct subchannel *sch = to_subchannel(dev);
268
269         return sprintf(buf, "%01x\n", sch->st);
270 }
271
272 static DEVICE_ATTR(type, 0444, type_show, NULL);
273
274 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
275                              char *buf)
276 {
277         struct subchannel *sch = to_subchannel(dev);
278
279         return sprintf(buf, "css:t%01X\n", sch->st);
280 }
281
282 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL);
283
284 static struct attribute *subch_attrs[] = {
285         &dev_attr_type.attr,
286         &dev_attr_modalias.attr,
287         NULL,
288 };
289
290 static struct attribute_group subch_attr_group = {
291         .attrs = subch_attrs,
292 };
293
294 static const struct attribute_group *default_subch_attr_groups[] = {
295         &subch_attr_group,
296         NULL,
297 };
298
299 static ssize_t chpids_show(struct device *dev,
300                            struct device_attribute *attr,
301                            char *buf)
302 {
303         struct subchannel *sch = to_subchannel(dev);
304         struct chsc_ssd_info *ssd = &sch->ssd_info;
305         ssize_t ret = 0;
306         int mask;
307         int chp;
308
309         for (chp = 0; chp < 8; chp++) {
310                 mask = 0x80 >> chp;
311                 if (ssd->path_mask & mask)
312                         ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
313                 else
314                         ret += sprintf(buf + ret, "00 ");
315         }
316         ret += sprintf(buf + ret, "\n");
317         return ret;
318 }
319 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL);
320
321 static ssize_t pimpampom_show(struct device *dev,
322                               struct device_attribute *attr,
323                               char *buf)
324 {
325         struct subchannel *sch = to_subchannel(dev);
326         struct pmcw *pmcw = &sch->schib.pmcw;
327
328         return sprintf(buf, "%02x %02x %02x\n",
329                        pmcw->pim, pmcw->pam, pmcw->pom);
330 }
331 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL);
332
333 static ssize_t dev_busid_show(struct device *dev,
334                               struct device_attribute *attr,
335                               char *buf)
336 {
337         struct subchannel *sch = to_subchannel(dev);
338         struct pmcw *pmcw = &sch->schib.pmcw;
339
340         if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
341             (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
342                 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
343                                   pmcw->dev);
344         else
345                 return sysfs_emit(buf, "none\n");
346 }
347 static DEVICE_ATTR_RO(dev_busid);
348
349 static struct attribute *io_subchannel_type_attrs[] = {
350         &dev_attr_chpids.attr,
351         &dev_attr_pimpampom.attr,
352         &dev_attr_dev_busid.attr,
353         NULL,
354 };
355 ATTRIBUTE_GROUPS(io_subchannel_type);
356
357 static const struct device_type io_subchannel_type = {
358         .groups = io_subchannel_type_groups,
359 };
360
361 int css_register_subchannel(struct subchannel *sch)
362 {
363         int ret;
364
365         /* Initialize the subchannel structure */
366         sch->dev.parent = &channel_subsystems[0]->device;
367         sch->dev.bus = &css_bus_type;
368         sch->dev.groups = default_subch_attr_groups;
369
370         if (sch->st == SUBCHANNEL_TYPE_IO)
371                 sch->dev.type = &io_subchannel_type;
372
373         /*
374          * We don't want to generate uevents for I/O subchannels that don't
375          * have a working ccw device behind them since they will be
376          * unregistered before they can be used anyway, so we delay the add
377          * uevent until after device recognition was successful.
378          * Note that we suppress the uevent for all subchannel types;
379          * the subchannel driver can decide itself when it wants to inform
380          * userspace of its existence.
381          */
382         dev_set_uevent_suppress(&sch->dev, 1);
383         css_update_ssd_info(sch);
384         /* make it known to the system */
385         ret = css_sch_device_register(sch);
386         if (ret) {
387                 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
388                               sch->schid.ssid, sch->schid.sch_no, ret);
389                 return ret;
390         }
391         if (!sch->driver) {
392                 /*
393                  * No driver matched. Generate the uevent now so that
394                  * a fitting driver module may be loaded based on the
395                  * modalias.
396                  */
397                 dev_set_uevent_suppress(&sch->dev, 0);
398                 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
399         }
400         return ret;
401 }
402
403 static int css_probe_device(struct subchannel_id schid)
404 {
405         struct subchannel *sch;
406         int ret;
407
408         sch = css_alloc_subchannel(schid);
409         if (IS_ERR(sch))
410                 return PTR_ERR(sch);
411
412         ret = css_register_subchannel(sch);
413         if (ret)
414                 put_device(&sch->dev);
415
416         return ret;
417 }
418
419 static int
420 check_subchannel(struct device * dev, void * data)
421 {
422         struct subchannel *sch;
423         struct subchannel_id *schid = data;
424
425         sch = to_subchannel(dev);
426         return schid_equal(&sch->schid, schid);
427 }
428
429 struct subchannel *
430 get_subchannel_by_schid(struct subchannel_id schid)
431 {
432         struct device *dev;
433
434         dev = bus_find_device(&css_bus_type, NULL,
435                               &schid, check_subchannel);
436
437         return dev ? to_subchannel(dev) : NULL;
438 }
439
440 /**
441  * css_sch_is_valid() - check if a subchannel is valid
442  * @schib: subchannel information block for the subchannel
443  */
444 int css_sch_is_valid(struct schib *schib)
445 {
446         if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
447                 return 0;
448         if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
449                 return 0;
450         return 1;
451 }
452 EXPORT_SYMBOL_GPL(css_sch_is_valid);
453
454 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
455 {
456         struct schib schib;
457
458         if (!slow) {
459                 /* Will be done on the slow path. */
460                 return -EAGAIN;
461         }
462         if (stsch(schid, &schib)) {
463                 /* Subchannel is not provided. */
464                 return -ENXIO;
465         }
466         if (!css_sch_is_valid(&schib)) {
467                 /* Unusable - ignore. */
468                 return 0;
469         }
470         CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid,
471                       schid.sch_no);
472
473         return css_probe_device(schid);
474 }
475
476 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
477 {
478         int ret = 0;
479
480         if (sch->driver) {
481                 if (sch->driver->sch_event)
482                         ret = sch->driver->sch_event(sch, slow);
483                 else
484                         dev_dbg(&sch->dev,
485                                 "Got subchannel machine check but "
486                                 "no sch_event handler provided.\n");
487         }
488         if (ret != 0 && ret != -EAGAIN) {
489                 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
490                               sch->schid.ssid, sch->schid.sch_no, ret);
491         }
492         return ret;
493 }
494
495 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
496 {
497         struct subchannel *sch;
498         int ret;
499
500         sch = get_subchannel_by_schid(schid);
501         if (sch) {
502                 ret = css_evaluate_known_subchannel(sch, slow);
503                 put_device(&sch->dev);
504         } else
505                 ret = css_evaluate_new_subchannel(schid, slow);
506         if (ret == -EAGAIN)
507                 css_schedule_eval(schid);
508 }
509
510 /**
511  * css_sched_sch_todo - schedule a subchannel operation
512  * @sch: subchannel
513  * @todo: todo
514  *
515  * Schedule the operation identified by @todo to be performed on the slow path
516  * workqueue. Do nothing if another operation with higher priority is already
517  * scheduled. Needs to be called with subchannel lock held.
518  */
519 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
520 {
521         CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
522                       sch->schid.ssid, sch->schid.sch_no, todo);
523         if (sch->todo >= todo)
524                 return;
525         /* Get workqueue ref. */
526         if (!get_device(&sch->dev))
527                 return;
528         sch->todo = todo;
529         if (!queue_work(cio_work_q, &sch->todo_work)) {
530                 /* Already queued, release workqueue ref. */
531                 put_device(&sch->dev);
532         }
533 }
534 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
535
536 static void css_sch_todo(struct work_struct *work)
537 {
538         struct subchannel *sch;
539         enum sch_todo todo;
540         int ret;
541
542         sch = container_of(work, struct subchannel, todo_work);
543         /* Find out todo. */
544         spin_lock_irq(sch->lock);
545         todo = sch->todo;
546         CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
547                       sch->schid.sch_no, todo);
548         sch->todo = SCH_TODO_NOTHING;
549         spin_unlock_irq(sch->lock);
550         /* Perform todo. */
551         switch (todo) {
552         case SCH_TODO_NOTHING:
553                 break;
554         case SCH_TODO_EVAL:
555                 ret = css_evaluate_known_subchannel(sch, 1);
556                 if (ret == -EAGAIN) {
557                         spin_lock_irq(sch->lock);
558                         css_sched_sch_todo(sch, todo);
559                         spin_unlock_irq(sch->lock);
560                 }
561                 break;
562         case SCH_TODO_UNREG:
563                 css_sch_device_unregister(sch);
564                 break;
565         }
566         /* Release workqueue ref. */
567         put_device(&sch->dev);
568 }
569
570 static struct idset *slow_subchannel_set;
571 static spinlock_t slow_subchannel_lock;
572 static wait_queue_head_t css_eval_wq;
573 static atomic_t css_eval_scheduled;
574
575 static int __init slow_subchannel_init(void)
576 {
577         spin_lock_init(&slow_subchannel_lock);
578         atomic_set(&css_eval_scheduled, 0);
579         init_waitqueue_head(&css_eval_wq);
580         slow_subchannel_set = idset_sch_new();
581         if (!slow_subchannel_set) {
582                 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
583                 return -ENOMEM;
584         }
585         return 0;
586 }
587
588 static int slow_eval_known_fn(struct subchannel *sch, void *data)
589 {
590         int eval;
591         int rc;
592
593         spin_lock_irq(&slow_subchannel_lock);
594         eval = idset_sch_contains(slow_subchannel_set, sch->schid);
595         idset_sch_del(slow_subchannel_set, sch->schid);
596         spin_unlock_irq(&slow_subchannel_lock);
597         if (eval) {
598                 rc = css_evaluate_known_subchannel(sch, 1);
599                 if (rc == -EAGAIN)
600                         css_schedule_eval(sch->schid);
601                 /*
602                  * The loop might take long time for platforms with lots of
603                  * known devices. Allow scheduling here.
604                  */
605                 cond_resched();
606         }
607         return 0;
608 }
609
610 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
611 {
612         int eval;
613         int rc = 0;
614
615         spin_lock_irq(&slow_subchannel_lock);
616         eval = idset_sch_contains(slow_subchannel_set, schid);
617         idset_sch_del(slow_subchannel_set, schid);
618         spin_unlock_irq(&slow_subchannel_lock);
619         if (eval) {
620                 rc = css_evaluate_new_subchannel(schid, 1);
621                 switch (rc) {
622                 case -EAGAIN:
623                         css_schedule_eval(schid);
624                         rc = 0;
625                         break;
626                 case -ENXIO:
627                 case -ENOMEM:
628                 case -EIO:
629                         /* These should abort looping */
630                         spin_lock_irq(&slow_subchannel_lock);
631                         idset_sch_del_subseq(slow_subchannel_set, schid);
632                         spin_unlock_irq(&slow_subchannel_lock);
633                         break;
634                 default:
635                         rc = 0;
636                 }
637                 /* Allow scheduling here since the containing loop might
638                  * take a while.  */
639                 cond_resched();
640         }
641         return rc;
642 }
643
644 static void css_slow_path_func(struct work_struct *unused)
645 {
646         unsigned long flags;
647
648         CIO_TRACE_EVENT(4, "slowpath");
649         for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
650                                    NULL);
651         spin_lock_irqsave(&slow_subchannel_lock, flags);
652         if (idset_is_empty(slow_subchannel_set)) {
653                 atomic_set(&css_eval_scheduled, 0);
654                 wake_up(&css_eval_wq);
655         }
656         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
657 }
658
659 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
660 struct workqueue_struct *cio_work_q;
661
662 void css_schedule_eval(struct subchannel_id schid)
663 {
664         unsigned long flags;
665
666         spin_lock_irqsave(&slow_subchannel_lock, flags);
667         idset_sch_add(slow_subchannel_set, schid);
668         atomic_set(&css_eval_scheduled, 1);
669         queue_delayed_work(cio_work_q, &slow_path_work, 0);
670         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
671 }
672
673 void css_schedule_eval_all(void)
674 {
675         unsigned long flags;
676
677         spin_lock_irqsave(&slow_subchannel_lock, flags);
678         idset_fill(slow_subchannel_set);
679         atomic_set(&css_eval_scheduled, 1);
680         queue_delayed_work(cio_work_q, &slow_path_work, 0);
681         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
682 }
683
684 static int __unset_registered(struct device *dev, void *data)
685 {
686         struct idset *set = data;
687         struct subchannel *sch = to_subchannel(dev);
688
689         idset_sch_del(set, sch->schid);
690         return 0;
691 }
692
693 void css_schedule_eval_all_unreg(unsigned long delay)
694 {
695         unsigned long flags;
696         struct idset *unreg_set;
697
698         /* Find unregistered subchannels. */
699         unreg_set = idset_sch_new();
700         if (!unreg_set) {
701                 /* Fallback. */
702                 css_schedule_eval_all();
703                 return;
704         }
705         idset_fill(unreg_set);
706         bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered);
707         /* Apply to slow_subchannel_set. */
708         spin_lock_irqsave(&slow_subchannel_lock, flags);
709         idset_add_set(slow_subchannel_set, unreg_set);
710         atomic_set(&css_eval_scheduled, 1);
711         queue_delayed_work(cio_work_q, &slow_path_work, delay);
712         spin_unlock_irqrestore(&slow_subchannel_lock, flags);
713         idset_free(unreg_set);
714 }
715
716 void css_wait_for_slow_path(void)
717 {
718         flush_workqueue(cio_work_q);
719 }
720
721 /* Schedule reprobing of all unregistered subchannels. */
722 void css_schedule_reprobe(void)
723 {
724         /* Schedule with a delay to allow merging of subsequent calls. */
725         css_schedule_eval_all_unreg(1 * HZ);
726 }
727 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
728
729 /*
730  * Called from the machine check handler for subchannel report words.
731  */
732 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
733 {
734         struct subchannel_id mchk_schid;
735         struct subchannel *sch;
736
737         if (overflow) {
738                 css_schedule_eval_all();
739                 return;
740         }
741         CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
742                       "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
743                       crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
744                       crw0->erc, crw0->rsid);
745         if (crw1)
746                 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
747                               "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
748                               crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
749                               crw1->anc, crw1->erc, crw1->rsid);
750         init_subchannel_id(&mchk_schid);
751         mchk_schid.sch_no = crw0->rsid;
752         if (crw1)
753                 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
754
755         if (crw0->erc == CRW_ERC_PMOD) {
756                 sch = get_subchannel_by_schid(mchk_schid);
757                 if (sch) {
758                         css_update_ssd_info(sch);
759                         put_device(&sch->dev);
760                 }
761         }
762         /*
763          * Since we are always presented with IPI in the CRW, we have to
764          * use stsch() to find out if the subchannel in question has come
765          * or gone.
766          */
767         css_evaluate_subchannel(mchk_schid, 0);
768 }
769
770 static void __init
771 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
772 {
773         struct cpuid cpu_id;
774
775         if (css_general_characteristics.mcss) {
776                 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
777                 css->global_pgid.pgid_high.ext_cssid.cssid =
778                         (css->cssid < 0) ? 0 : css->cssid;
779         } else {
780                 css->global_pgid.pgid_high.cpu_addr = stap();
781         }
782         get_cpu_id(&cpu_id);
783         css->global_pgid.cpu_id = cpu_id.ident;
784         css->global_pgid.cpu_model = cpu_id.machine;
785         css->global_pgid.tod_high = tod_high;
786 }
787
788 static void channel_subsystem_release(struct device *dev)
789 {
790         struct channel_subsystem *css = to_css(dev);
791
792         mutex_destroy(&css->mutex);
793         kfree(css);
794 }
795
796 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
797                                char *buf)
798 {
799         struct channel_subsystem *css = to_css(dev);
800
801         if (css->cssid < 0)
802                 return -EINVAL;
803
804         return sprintf(buf, "%x\n", css->cssid);
805 }
806 static DEVICE_ATTR_RO(real_cssid);
807
808 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
809                               char *buf)
810 {
811         struct channel_subsystem *css = to_css(dev);
812         int ret;
813
814         mutex_lock(&css->mutex);
815         ret = sprintf(buf, "%x\n", css->cm_enabled);
816         mutex_unlock(&css->mutex);
817         return ret;
818 }
819
820 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
821                                const char *buf, size_t count)
822 {
823         struct channel_subsystem *css = to_css(dev);
824         unsigned long val;
825         int ret;
826
827         ret = kstrtoul(buf, 16, &val);
828         if (ret)
829                 return ret;
830         mutex_lock(&css->mutex);
831         switch (val) {
832         case 0:
833                 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
834                 break;
835         case 1:
836                 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
837                 break;
838         default:
839                 ret = -EINVAL;
840         }
841         mutex_unlock(&css->mutex);
842         return ret < 0 ? ret : count;
843 }
844 static DEVICE_ATTR_RW(cm_enable);
845
846 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
847                               int index)
848 {
849         return css_chsc_characteristics.secm ? attr->mode : 0;
850 }
851
852 static struct attribute *cssdev_attrs[] = {
853         &dev_attr_real_cssid.attr,
854         NULL,
855 };
856
857 static struct attribute_group cssdev_attr_group = {
858         .attrs = cssdev_attrs,
859 };
860
861 static struct attribute *cssdev_cm_attrs[] = {
862         &dev_attr_cm_enable.attr,
863         NULL,
864 };
865
866 static struct attribute_group cssdev_cm_attr_group = {
867         .attrs = cssdev_cm_attrs,
868         .is_visible = cm_enable_mode,
869 };
870
871 static const struct attribute_group *cssdev_attr_groups[] = {
872         &cssdev_attr_group,
873         &cssdev_cm_attr_group,
874         NULL,
875 };
876
877 static int __init setup_css(int nr)
878 {
879         struct channel_subsystem *css;
880         int ret;
881
882         css = kzalloc(sizeof(*css), GFP_KERNEL);
883         if (!css)
884                 return -ENOMEM;
885
886         channel_subsystems[nr] = css;
887         dev_set_name(&css->device, "css%x", nr);
888         css->device.groups = cssdev_attr_groups;
889         css->device.release = channel_subsystem_release;
890
891         mutex_init(&css->mutex);
892         css->cssid = chsc_get_cssid(nr);
893         css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
894
895         ret = device_register(&css->device);
896         if (ret) {
897                 put_device(&css->device);
898                 goto out_err;
899         }
900
901         css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
902                                          GFP_KERNEL);
903         if (!css->pseudo_subchannel) {
904                 device_unregister(&css->device);
905                 ret = -ENOMEM;
906                 goto out_err;
907         }
908
909         css->pseudo_subchannel->dev.parent = &css->device;
910         css->pseudo_subchannel->dev.release = css_subchannel_release;
911         mutex_init(&css->pseudo_subchannel->reg_mutex);
912         ret = css_sch_create_locks(css->pseudo_subchannel);
913         if (ret) {
914                 kfree(css->pseudo_subchannel);
915                 device_unregister(&css->device);
916                 goto out_err;
917         }
918
919         dev_set_name(&css->pseudo_subchannel->dev, "defunct");
920         ret = device_register(&css->pseudo_subchannel->dev);
921         if (ret) {
922                 put_device(&css->pseudo_subchannel->dev);
923                 device_unregister(&css->device);
924                 goto out_err;
925         }
926
927         return ret;
928 out_err:
929         channel_subsystems[nr] = NULL;
930         return ret;
931 }
932
933 static int css_reboot_event(struct notifier_block *this,
934                             unsigned long event,
935                             void *ptr)
936 {
937         struct channel_subsystem *css;
938         int ret;
939
940         ret = NOTIFY_DONE;
941         for_each_css(css) {
942                 mutex_lock(&css->mutex);
943                 if (css->cm_enabled)
944                         if (chsc_secm(css, 0))
945                                 ret = NOTIFY_BAD;
946                 mutex_unlock(&css->mutex);
947         }
948
949         return ret;
950 }
951
952 static struct notifier_block css_reboot_notifier = {
953         .notifier_call = css_reboot_event,
954 };
955
956 /*
957  * Since the css devices are neither on a bus nor have a class
958  * nor have a special device type, we cannot stop/restart channel
959  * path measurements via the normal suspend/resume callbacks, but have
960  * to use notifiers.
961  */
962 static int css_power_event(struct notifier_block *this, unsigned long event,
963                            void *ptr)
964 {
965         struct channel_subsystem *css;
966         int ret;
967
968         switch (event) {
969         case PM_HIBERNATION_PREPARE:
970         case PM_SUSPEND_PREPARE:
971                 ret = NOTIFY_DONE;
972                 for_each_css(css) {
973                         mutex_lock(&css->mutex);
974                         if (!css->cm_enabled) {
975                                 mutex_unlock(&css->mutex);
976                                 continue;
977                         }
978                         ret = __chsc_do_secm(css, 0);
979                         ret = notifier_from_errno(ret);
980                         mutex_unlock(&css->mutex);
981                 }
982                 break;
983         case PM_POST_HIBERNATION:
984         case PM_POST_SUSPEND:
985                 ret = NOTIFY_DONE;
986                 for_each_css(css) {
987                         mutex_lock(&css->mutex);
988                         if (!css->cm_enabled) {
989                                 mutex_unlock(&css->mutex);
990                                 continue;
991                         }
992                         ret = __chsc_do_secm(css, 1);
993                         ret = notifier_from_errno(ret);
994                         mutex_unlock(&css->mutex);
995                 }
996                 /* search for subchannels, which appeared during hibernation */
997                 css_schedule_reprobe();
998                 break;
999         default:
1000                 ret = NOTIFY_DONE;
1001         }
1002         return ret;
1003
1004 }
1005 static struct notifier_block css_power_notifier = {
1006         .notifier_call = css_power_event,
1007 };
1008
1009 /*
1010  * Now that the driver core is running, we can setup our channel subsystem.
1011  * The struct subchannel's are created during probing.
1012  */
1013 static int __init css_bus_init(void)
1014 {
1015         int ret, i;
1016
1017         ret = chsc_init();
1018         if (ret)
1019                 return ret;
1020
1021         chsc_determine_css_characteristics();
1022         /* Try to enable MSS. */
1023         ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1024         if (ret)
1025                 max_ssid = 0;
1026         else /* Success. */
1027                 max_ssid = __MAX_SSID;
1028
1029         ret = slow_subchannel_init();
1030         if (ret)
1031                 goto out;
1032
1033         ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1034         if (ret)
1035                 goto out;
1036
1037         if ((ret = bus_register(&css_bus_type)))
1038                 goto out;
1039
1040         /* Setup css structure. */
1041         for (i = 0; i <= MAX_CSS_IDX; i++) {
1042                 ret = setup_css(i);
1043                 if (ret)
1044                         goto out_unregister;
1045         }
1046         ret = register_reboot_notifier(&css_reboot_notifier);
1047         if (ret)
1048                 goto out_unregister;
1049         ret = register_pm_notifier(&css_power_notifier);
1050         if (ret) {
1051                 unregister_reboot_notifier(&css_reboot_notifier);
1052                 goto out_unregister;
1053         }
1054         css_init_done = 1;
1055
1056         /* Enable default isc for I/O subchannels. */
1057         isc_register(IO_SCH_ISC);
1058
1059         return 0;
1060 out_unregister:
1061         while (i-- > 0) {
1062                 struct channel_subsystem *css = channel_subsystems[i];
1063                 device_unregister(&css->pseudo_subchannel->dev);
1064                 device_unregister(&css->device);
1065         }
1066         bus_unregister(&css_bus_type);
1067 out:
1068         crw_unregister_handler(CRW_RSC_SCH);
1069         idset_free(slow_subchannel_set);
1070         chsc_init_cleanup();
1071         pr_alert("The CSS device driver initialization failed with "
1072                  "errno=%d\n", ret);
1073         return ret;
1074 }
1075
1076 static void __init css_bus_cleanup(void)
1077 {
1078         struct channel_subsystem *css;
1079
1080         for_each_css(css) {
1081                 device_unregister(&css->pseudo_subchannel->dev);
1082                 device_unregister(&css->device);
1083         }
1084         bus_unregister(&css_bus_type);
1085         crw_unregister_handler(CRW_RSC_SCH);
1086         idset_free(slow_subchannel_set);
1087         chsc_init_cleanup();
1088         isc_unregister(IO_SCH_ISC);
1089 }
1090
1091 static int __init channel_subsystem_init(void)
1092 {
1093         int ret;
1094
1095         ret = css_bus_init();
1096         if (ret)
1097                 return ret;
1098         cio_work_q = create_singlethread_workqueue("cio");
1099         if (!cio_work_q) {
1100                 ret = -ENOMEM;
1101                 goto out_bus;
1102         }
1103         ret = io_subchannel_init();
1104         if (ret)
1105                 goto out_wq;
1106
1107         return ret;
1108 out_wq:
1109         destroy_workqueue(cio_work_q);
1110 out_bus:
1111         css_bus_cleanup();
1112         return ret;
1113 }
1114 subsys_initcall(channel_subsystem_init);
1115
1116 static int css_settle(struct device_driver *drv, void *unused)
1117 {
1118         struct css_driver *cssdrv = to_cssdriver(drv);
1119
1120         if (cssdrv->settle)
1121                 return cssdrv->settle();
1122         return 0;
1123 }
1124
1125 int css_complete_work(void)
1126 {
1127         int ret;
1128
1129         /* Wait for the evaluation of subchannels to finish. */
1130         ret = wait_event_interruptible(css_eval_wq,
1131                                        atomic_read(&css_eval_scheduled) == 0);
1132         if (ret)
1133                 return -EINTR;
1134         flush_workqueue(cio_work_q);
1135         /* Wait for the subchannel type specific initialization to finish */
1136         return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1137 }
1138
1139
1140 /*
1141  * Wait for the initialization of devices to finish, to make sure we are
1142  * done with our setup if the search for the root device starts.
1143  */
1144 static int __init channel_subsystem_init_sync(void)
1145 {
1146         /* Register subchannels which are already in use. */
1147         cio_register_early_subchannels();
1148         /* Start initial subchannel evaluation. */
1149         css_schedule_eval_all();
1150         css_complete_work();
1151         return 0;
1152 }
1153 subsys_initcall_sync(channel_subsystem_init_sync);
1154
1155 void channel_subsystem_reinit(void)
1156 {
1157         struct channel_path *chp;
1158         struct chp_id chpid;
1159
1160         chsc_enable_facility(CHSC_SDA_OC_MSS);
1161         chp_id_for_each(&chpid) {
1162                 chp = chpid_to_chp(chpid);
1163                 if (chp)
1164                         chp_update_desc(chp);
1165         }
1166         cmf_reactivate();
1167 }
1168
1169 #ifdef CONFIG_PROC_FS
1170 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1171                                 size_t count, loff_t *ppos)
1172 {
1173         int ret;
1174
1175         /* Handle pending CRW's. */
1176         crw_wait_for_channel_report();
1177         ret = css_complete_work();
1178
1179         return ret ? ret : count;
1180 }
1181
1182 static const struct file_operations cio_settle_proc_fops = {
1183         .open = nonseekable_open,
1184         .write = cio_settle_write,
1185         .llseek = no_llseek,
1186 };
1187
1188 static int __init cio_settle_init(void)
1189 {
1190         struct proc_dir_entry *entry;
1191
1192         entry = proc_create("cio_settle", S_IWUSR, NULL,
1193                             &cio_settle_proc_fops);
1194         if (!entry)
1195                 return -ENOMEM;
1196         return 0;
1197 }
1198 device_initcall(cio_settle_init);
1199 #endif /*CONFIG_PROC_FS*/
1200
1201 int sch_is_pseudo_sch(struct subchannel *sch)
1202 {
1203         if (!sch->dev.parent)
1204                 return 0;
1205         return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1206 }
1207
1208 static int css_bus_match(struct device *dev, struct device_driver *drv)
1209 {
1210         struct subchannel *sch = to_subchannel(dev);
1211         struct css_driver *driver = to_cssdriver(drv);
1212         struct css_device_id *id;
1213
1214         for (id = driver->subchannel_type; id->match_flags; id++) {
1215                 if (sch->st == id->type)
1216                         return 1;
1217         }
1218
1219         return 0;
1220 }
1221
1222 static int css_probe(struct device *dev)
1223 {
1224         struct subchannel *sch;
1225         int ret;
1226
1227         sch = to_subchannel(dev);
1228         sch->driver = to_cssdriver(dev->driver);
1229         ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1230         if (ret)
1231                 sch->driver = NULL;
1232         return ret;
1233 }
1234
1235 static int css_remove(struct device *dev)
1236 {
1237         struct subchannel *sch;
1238         int ret;
1239
1240         sch = to_subchannel(dev);
1241         ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
1242         sch->driver = NULL;
1243         return ret;
1244 }
1245
1246 static void css_shutdown(struct device *dev)
1247 {
1248         struct subchannel *sch;
1249
1250         sch = to_subchannel(dev);
1251         if (sch->driver && sch->driver->shutdown)
1252                 sch->driver->shutdown(sch);
1253 }
1254
1255 static int css_uevent(struct device *dev, struct kobj_uevent_env *env)
1256 {
1257         struct subchannel *sch = to_subchannel(dev);
1258         int ret;
1259
1260         ret = add_uevent_var(env, "ST=%01X", sch->st);
1261         if (ret)
1262                 return ret;
1263         ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1264         return ret;
1265 }
1266
1267 static int css_pm_prepare(struct device *dev)
1268 {
1269         struct subchannel *sch = to_subchannel(dev);
1270         struct css_driver *drv;
1271
1272         if (mutex_is_locked(&sch->reg_mutex))
1273                 return -EAGAIN;
1274         if (!sch->dev.driver)
1275                 return 0;
1276         drv = to_cssdriver(sch->dev.driver);
1277         /* Notify drivers that they may not register children. */
1278         return drv->prepare ? drv->prepare(sch) : 0;
1279 }
1280
1281 static void css_pm_complete(struct device *dev)
1282 {
1283         struct subchannel *sch = to_subchannel(dev);
1284         struct css_driver *drv;
1285
1286         if (!sch->dev.driver)
1287                 return;
1288         drv = to_cssdriver(sch->dev.driver);
1289         if (drv->complete)
1290                 drv->complete(sch);
1291 }
1292
1293 static int css_pm_freeze(struct device *dev)
1294 {
1295         struct subchannel *sch = to_subchannel(dev);
1296         struct css_driver *drv;
1297
1298         if (!sch->dev.driver)
1299                 return 0;
1300         drv = to_cssdriver(sch->dev.driver);
1301         return drv->freeze ? drv->freeze(sch) : 0;
1302 }
1303
1304 static int css_pm_thaw(struct device *dev)
1305 {
1306         struct subchannel *sch = to_subchannel(dev);
1307         struct css_driver *drv;
1308
1309         if (!sch->dev.driver)
1310                 return 0;
1311         drv = to_cssdriver(sch->dev.driver);
1312         return drv->thaw ? drv->thaw(sch) : 0;
1313 }
1314
1315 static int css_pm_restore(struct device *dev)
1316 {
1317         struct subchannel *sch = to_subchannel(dev);
1318         struct css_driver *drv;
1319
1320         css_update_ssd_info(sch);
1321         if (!sch->dev.driver)
1322                 return 0;
1323         drv = to_cssdriver(sch->dev.driver);
1324         return drv->restore ? drv->restore(sch) : 0;
1325 }
1326
1327 static const struct dev_pm_ops css_pm_ops = {
1328         .prepare = css_pm_prepare,
1329         .complete = css_pm_complete,
1330         .freeze = css_pm_freeze,
1331         .thaw = css_pm_thaw,
1332         .restore = css_pm_restore,
1333 };
1334
1335 static struct bus_type css_bus_type = {
1336         .name     = "css",
1337         .match    = css_bus_match,
1338         .probe    = css_probe,
1339         .remove   = css_remove,
1340         .shutdown = css_shutdown,
1341         .uevent   = css_uevent,
1342         .pm = &css_pm_ops,
1343 };
1344
1345 /**
1346  * css_driver_register - register a css driver
1347  * @cdrv: css driver to register
1348  *
1349  * This is mainly a wrapper around driver_register that sets name
1350  * and bus_type in the embedded struct device_driver correctly.
1351  */
1352 int css_driver_register(struct css_driver *cdrv)
1353 {
1354         cdrv->drv.bus = &css_bus_type;
1355         return driver_register(&cdrv->drv);
1356 }
1357 EXPORT_SYMBOL_GPL(css_driver_register);
1358
1359 /**
1360  * css_driver_unregister - unregister a css driver
1361  * @cdrv: css driver to unregister
1362  *
1363  * This is a wrapper around driver_unregister.
1364  */
1365 void css_driver_unregister(struct css_driver *cdrv)
1366 {
1367         driver_unregister(&cdrv->drv);
1368 }
1369 EXPORT_SYMBOL_GPL(css_driver_unregister);