2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call
5 * Copyright IBM Corp. 1999,2008
6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/device.h>
20 #include <asm/chpid.h>
26 #include "cio_debug.h"
31 static void *sei_page;
32 static DEFINE_SPINLOCK(siosl_lock);
33 static DEFINE_SPINLOCK(sda_lock);
36 * chsc_error_from_response() - convert a chsc response to an error
37 * @response: chsc response code
39 * Returns an appropriate Linux error code for @response.
41 int chsc_error_from_response(int response)
60 EXPORT_SYMBOL_GPL(chsc_error_from_response);
62 struct chsc_ssd_area {
63 struct chsc_header request;
67 u16 f_sch; /* first subchannel */
69 u16 l_sch; /* last subchannel */
71 struct chsc_header response;
75 u8 st : 3; /* subchannel type */
77 u8 unit_addr; /* unit address */
78 u16 devno; /* device number */
81 u16 sch; /* subchannel */
82 u8 chpid[8]; /* chpids 0-7 */
83 u16 fla[8]; /* full link addresses 0-7 */
84 } __attribute__ ((packed));
86 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
89 struct chsc_ssd_area *ssd_area;
95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
98 ssd_area = (struct chsc_ssd_area *) page;
99 ssd_area->request.length = 0x0010;
100 ssd_area->request.code = 0x0004;
101 ssd_area->ssid = schid.ssid;
102 ssd_area->f_sch = schid.sch_no;
103 ssd_area->l_sch = schid.sch_no;
105 ccode = chsc(ssd_area);
106 /* Check response. */
108 ret = (ccode == 3) ? -ENODEV : -EBUSY;
111 ret = chsc_error_from_response(ssd_area->response.code);
113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 schid.ssid, schid.sch_no,
115 ssd_area->response.code);
118 if (!ssd_area->sch_valid) {
124 memset(ssd, 0, sizeof(struct chsc_ssd_info));
125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
126 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
128 ssd->path_mask = ssd_area->path_mask;
129 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
130 for (i = 0; i < 8; i++) {
132 if (ssd_area->path_mask & mask) {
133 chp_id_init(&ssd->chpid[i]);
134 ssd->chpid[i].id = ssd_area->chpid[i];
136 if (ssd_area->fla_valid_mask & mask)
137 ssd->fla[i] = ssd_area->fla[i];
144 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
146 spin_lock_irq(sch->lock);
147 if (sch->driver && sch->driver->chp_event)
148 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
150 spin_unlock_irq(sch->lock);
155 spin_unlock_irq(sch->lock);
156 css_schedule_eval(sch->schid);
160 void chsc_chp_offline(struct chp_id chpid)
163 struct chp_link link;
165 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
166 CIO_TRACE_EVENT(2, dbf_txt);
168 if (chp_get_status(chpid) <= 0)
170 memset(&link, 0, sizeof(struct chp_link));
172 /* Wait until previous actions have settled. */
173 css_wait_for_slow_path();
174 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
177 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
181 * We don't know the device yet, but since a path
182 * may be available now to the device we'll have
183 * to do recognition again.
184 * Since we don't have any idea about which chpid
185 * that beast may be on we'll have to do a stsch
186 * on all devices, grr...
188 if (stsch_err(schid, &schib))
192 /* Put it on the slow path. */
193 css_schedule_eval(schid);
197 static int __s390_process_res_acc(struct subchannel *sch, void *data)
199 spin_lock_irq(sch->lock);
200 if (sch->driver && sch->driver->chp_event)
201 sch->driver->chp_event(sch, data, CHP_ONLINE);
202 spin_unlock_irq(sch->lock);
207 static void s390_process_res_acc(struct chp_link *link)
211 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
213 CIO_TRACE_EVENT( 2, dbf_txt);
214 if (link->fla != 0) {
215 sprintf(dbf_txt, "fla%x", link->fla);
216 CIO_TRACE_EVENT( 2, dbf_txt);
218 /* Wait until previous actions have settled. */
219 css_wait_for_slow_path();
221 * I/O resources may have become accessible.
222 * Scan through all subchannels that may be concerned and
223 * do a validation on those.
224 * The more information we have (info), the less scanning
225 * will we have to do.
227 for_each_subchannel_staged(__s390_process_res_acc,
228 s390_process_res_acc_new_sch, link);
232 __get_chpid_from_lir(void *data)
238 /* incident-node descriptor */
240 /* attached-node descriptor */
242 /* incident-specific information */
244 } __attribute__ ((packed)) *lir;
248 /* NULL link incident record */
250 if (!(lir->indesc[0]&0xc0000000))
251 /* node descriptor not valid */
253 if (!(lir->indesc[0]&0x10000000))
254 /* don't handle device-type nodes - FIXME */
256 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
258 return (u16) (lir->indesc[0]&0x000000ff);
261 struct chsc_sei_area {
262 struct chsc_header request;
266 struct chsc_header response;
269 u8 vf; /* validity flags */
270 u8 rs; /* reporting source */
271 u8 cc; /* content code */
272 u16 fla; /* full link address */
273 u16 rsid; /* reporting source id */
276 u8 ccdf[4096 - 16 - 24]; /* content-code dependent field */
277 /* ccdf has to be big enough for a link-incident record */
278 } __attribute__ ((packed));
280 static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
285 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
286 sei_area->rs, sei_area->rsid);
287 if (sei_area->rs != 4)
289 id = __get_chpid_from_lir(sei_area->ccdf);
291 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
295 chsc_chp_offline(chpid);
299 static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
301 struct chp_link link;
305 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
306 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
307 if (sei_area->rs != 4)
310 chpid.id = sei_area->rsid;
311 /* allocate a new channel path structure, if needed */
312 status = chp_get_status(chpid);
317 memset(&link, 0, sizeof(struct chp_link));
319 if ((sei_area->vf & 0xc0) != 0) {
320 link.fla = sei_area->fla;
321 if ((sei_area->vf & 0xc0) == 0xc0)
322 /* full link address */
323 link.fla_mask = 0xffff;
326 link.fla_mask = 0xff00;
328 s390_process_res_acc(&link);
331 struct chp_config_data {
337 static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
339 struct chp_config_data *data;
342 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
344 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
345 if (sei_area->rs != 0)
347 data = (struct chp_config_data *) &(sei_area->ccdf);
349 for (num = 0; num <= __MAX_CHPID; num++) {
350 if (!chp_test_bit(data->map, num))
353 pr_notice("Processing %s for channel path %x.%02x\n",
354 events[data->op], chpid.cssid, chpid.id);
357 chp_cfg_schedule(chpid, 1);
360 chp_cfg_schedule(chpid, 0);
363 chp_cfg_cancel_deconfigure(chpid);
369 static void chsc_process_sei(struct chsc_sei_area *sei_area)
371 /* Check if we might have lost some information. */
372 if (sei_area->flags & 0x40) {
373 CIO_CRW_EVENT(2, "chsc: event overflow\n");
374 css_schedule_eval_all();
376 /* which kind of information was stored? */
377 switch (sei_area->cc) {
378 case 1: /* link incident*/
379 chsc_process_sei_link_incident(sei_area);
381 case 2: /* i/o resource accessibiliy */
382 chsc_process_sei_res_acc(sei_area);
384 case 8: /* channel-path-configuration notification */
385 chsc_process_sei_chp_config(sei_area);
387 default: /* other stuff */
388 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
394 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
396 struct chsc_sei_area *sei_area;
399 css_schedule_eval_all();
402 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
403 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
404 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
405 crw0->erc, crw0->rsid);
408 /* Access to sei_page is serialized through machine check handler
409 * thread, so no need for locking. */
412 CIO_TRACE_EVENT(2, "prcss");
414 memset(sei_area, 0, sizeof(*sei_area));
415 sei_area->request.length = 0x0010;
416 sei_area->request.code = 0x000e;
420 if (sei_area->response.code == 0x0001) {
421 CIO_CRW_EVENT(4, "chsc: sei successful\n");
422 chsc_process_sei(sei_area);
424 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
425 sei_area->response.code);
428 } while (sei_area->flags & 0x80);
431 void chsc_chp_online(struct chp_id chpid)
434 struct chp_link link;
436 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
437 CIO_TRACE_EVENT(2, dbf_txt);
439 if (chp_get_status(chpid) != 0) {
440 memset(&link, 0, sizeof(struct chp_link));
442 /* Wait until previous actions have settled. */
443 css_wait_for_slow_path();
444 for_each_subchannel_staged(__s390_process_res_acc, NULL,
449 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
450 struct chp_id chpid, int on)
453 struct chp_link link;
455 memset(&link, 0, sizeof(struct chp_link));
457 spin_lock_irqsave(sch->lock, flags);
458 if (sch->driver && sch->driver->chp_event)
459 sch->driver->chp_event(sch, &link,
460 on ? CHP_VARY_ON : CHP_VARY_OFF);
461 spin_unlock_irqrestore(sch->lock, flags);
464 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
466 struct chp_id *chpid = data;
468 __s390_subchannel_vary_chpid(sch, *chpid, 0);
472 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
474 struct chp_id *chpid = data;
476 __s390_subchannel_vary_chpid(sch, *chpid, 1);
481 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
485 if (stsch_err(schid, &schib))
488 /* Put it on the slow path. */
489 css_schedule_eval(schid);
494 * chsc_chp_vary - propagate channel-path vary operation to subchannels
495 * @chpid: channl-path ID
496 * @on: non-zero for vary online, zero for vary offline
498 int chsc_chp_vary(struct chp_id chpid, int on)
500 struct chp_link link;
502 memset(&link, 0, sizeof(struct chp_link));
504 /* Wait until previous actions have settled. */
505 css_wait_for_slow_path();
507 * Redo PathVerification on the devices the chpid connects to
511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512 __s390_vary_chpid_on, &link);
514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
521 chsc_remove_cmg_attr(struct channel_subsystem *css)
525 for (i = 0; i <= __MAX_CHPID; i++) {
528 chp_remove_cmg_attr(css->chps[i]);
533 chsc_add_cmg_attr(struct channel_subsystem *css)
538 for (i = 0; i <= __MAX_CHPID; i++) {
541 ret = chp_add_cmg_attr(css->chps[i]);
547 for (--i; i >= 0; i--) {
550 chp_remove_cmg_attr(css->chps[i]);
555 int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
558 struct chsc_header request;
559 u32 operation_code : 2;
568 struct chsc_header response;
573 } __attribute__ ((packed)) *secm_area;
577 secm_area->request.length = 0x0050;
578 secm_area->request.code = 0x0016;
580 secm_area->key = PAGE_DEFAULT_KEY >> 4;
581 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
582 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
584 secm_area->operation_code = enable ? 0 : 1;
586 ccode = chsc(secm_area);
588 return (ccode == 3) ? -ENODEV : -EBUSY;
590 switch (secm_area->response.code) {
596 ret = chsc_error_from_response(secm_area->response.code);
599 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
600 secm_area->response.code);
605 chsc_secm(struct channel_subsystem *css, int enable)
610 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
614 if (enable && !css->cm_enabled) {
615 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
616 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
617 if (!css->cub_addr1 || !css->cub_addr2) {
618 free_page((unsigned long)css->cub_addr1);
619 free_page((unsigned long)css->cub_addr2);
620 free_page((unsigned long)secm_area);
624 ret = __chsc_do_secm(css, enable, secm_area);
626 css->cm_enabled = enable;
627 if (css->cm_enabled) {
628 ret = chsc_add_cmg_attr(css);
630 memset(secm_area, 0, PAGE_SIZE);
631 __chsc_do_secm(css, 0, secm_area);
635 chsc_remove_cmg_attr(css);
637 if (!css->cm_enabled) {
638 free_page((unsigned long)css->cub_addr1);
639 free_page((unsigned long)css->cub_addr2);
641 free_page((unsigned long)secm_area);
645 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
647 struct chsc_response_struct *resp)
652 struct chsc_header request;
664 struct chsc_header response;
665 u8 data[PAGE_SIZE - 20];
666 } __attribute__ ((packed)) *scpd_area;
668 if ((rfmt == 1) && !css_general_characteristics.fcs)
670 if ((rfmt == 2) && !css_general_characteristics.cib)
672 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
676 scpd_area->request.length = 0x0010;
677 scpd_area->request.code = 0x0002;
679 scpd_area->cssid = chpid.cssid;
680 scpd_area->first_chpid = chpid.id;
681 scpd_area->last_chpid = chpid.id;
684 scpd_area->fmt = fmt;
685 scpd_area->rfmt = rfmt;
687 ccode = chsc(scpd_area);
689 ret = (ccode == 3) ? -ENODEV : -EBUSY;
693 ret = chsc_error_from_response(scpd_area->response.code);
696 memcpy(resp, &scpd_area->response, scpd_area->response.length);
698 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
699 scpd_area->response.code);
701 free_page((unsigned long)scpd_area);
704 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
706 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
707 struct channel_path_desc *desc)
709 struct chsc_response_struct *chsc_resp;
712 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL);
715 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp);
718 memcpy(desc, &chsc_resp->data, sizeof(*desc));
725 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
726 struct cmg_chars *chars)
731 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars),
733 if (chp->cmg_chars) {
735 struct cmg_chars *cmg_chars;
737 cmg_chars = chp->cmg_chars;
738 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
739 mask = 0x80 >> (i + 3);
741 cmg_chars->values[i] = chars->values[i];
743 cmg_chars->values[i] = 0;
748 /* No cmg-dependent data. */
753 int chsc_get_channel_measurement_chars(struct channel_path *chp)
758 struct chsc_header request;
764 struct chsc_header response;
775 u32 data[NR_MEASUREMENT_CHARS];
776 } __attribute__ ((packed)) *scmc_area;
778 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
782 scmc_area->request.length = 0x0010;
783 scmc_area->request.code = 0x0022;
785 scmc_area->first_chpid = chp->chpid.id;
786 scmc_area->last_chpid = chp->chpid.id;
788 ccode = chsc(scmc_area);
790 ret = (ccode == 3) ? -ENODEV : -EBUSY;
794 ret = chsc_error_from_response(scmc_area->response.code);
797 if (!scmc_area->not_valid) {
798 chp->cmg = scmc_area->cmg;
799 chp->shared = scmc_area->shared;
800 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
808 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
809 scmc_area->response.code);
812 free_page((unsigned long)scmc_area);
816 int __init chsc_alloc_sei_area(void)
820 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
822 CIO_MSG_EVENT(0, "Can't allocate page for processing of "
823 "chsc machine checks!\n");
826 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
832 void __init chsc_free_sei_area(void)
834 crw_unregister_handler(CRW_RSC_CSS);
838 int chsc_enable_facility(int operation_code)
842 struct chsc_header request;
849 u32 operation_data_area[252];
850 struct chsc_header response;
854 } __attribute__ ((packed, aligned(4096))) sda_area;
856 spin_lock(&sda_lock);
857 memset(&sda_area, 0, sizeof(sda_area));
858 sda_area.request.length = 0x0400;
859 sda_area.request.code = 0x0031;
860 sda_area.operation_code = operation_code;
862 ret = chsc(&sda_area);
864 ret = (ret == 3) ? -ENODEV : -EBUSY;
868 switch (sda_area.response.code) {
873 ret = chsc_error_from_response(sda_area.response.code);
876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
877 operation_code, sda_area.response.code);
879 spin_unlock(&sda_lock);
883 struct css_general_char css_general_characteristics;
884 struct css_chsc_char css_chsc_characteristics;
887 chsc_determine_css_characteristics(void)
891 struct chsc_header request;
895 struct chsc_header response;
897 u32 general_char[510];
899 } __attribute__ ((packed)) *scsc_area;
901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
905 scsc_area->request.length = 0x0010;
906 scsc_area->request.code = 0x0010;
908 result = chsc(scsc_area);
910 result = (result == 3) ? -ENODEV : -EBUSY;
914 result = chsc_error_from_response(scsc_area->response.code);
916 memcpy(&css_general_characteristics, scsc_area->general_char,
917 sizeof(css_general_characteristics));
918 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
919 sizeof(css_chsc_characteristics));
921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
922 scsc_area->response.code);
924 free_page ((unsigned long) scsc_area);
928 EXPORT_SYMBOL_GPL(css_general_characteristics);
929 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
931 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
934 struct chsc_header request;
937 unsigned int rsvd1 : 8;
938 unsigned int ctrl : 16;
939 unsigned int rsvd2[5];
940 struct chsc_header response;
941 unsigned int rsvd3[7];
942 } __attribute__ ((packed)) *rr;
945 memset(page, 0, PAGE_SIZE);
947 rr->request.length = 0x0020;
948 rr->request.code = 0x0033;
954 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
958 int chsc_sstpi(void *page, void *result, size_t size)
961 struct chsc_header request;
962 unsigned int rsvd0[3];
963 struct chsc_header response;
965 } __attribute__ ((packed)) *rr;
968 memset(page, 0, PAGE_SIZE);
970 rr->request.length = 0x0010;
971 rr->request.code = 0x0038;
975 memcpy(result, &rr->data, size);
976 return (rr->response.code == 0x0001) ? 0 : -EIO;
980 struct chsc_header request;
982 struct subchannel_id sid;
984 struct chsc_header response;
986 } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
988 int chsc_siosl(struct subchannel_id schid)
994 spin_lock_irqsave(&siosl_lock, flags);
995 memset(&siosl_area, 0, sizeof(siosl_area));
996 siosl_area.request.length = 0x0010;
997 siosl_area.request.code = 0x0046;
998 siosl_area.word1 = 0x80000000;
999 siosl_area.sid = schid;
1001 ccode = chsc(&siosl_area);
1007 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1008 schid.ssid, schid.sch_no, ccode);
1011 rc = chsc_error_from_response(siosl_area.response.code);
1013 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1014 schid.ssid, schid.sch_no,
1015 siosl_area.response.code);
1017 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1018 schid.ssid, schid.sch_no);
1020 spin_unlock_irqrestore(&siosl_lock, flags);
1024 EXPORT_SYMBOL_GPL(chsc_siosl);