2 * S/390 common I/O routines -- channel subsystem call
4 * Copyright IBM Corp. 1999,2012
5 * Author(s): Ingo Adlung (adlung@de.ibm.com)
6 * Cornelia Huck (cornelia.huck@de.ibm.com)
7 * Arnd Bergmann (arndb@de.ibm.com)
10 #define KMSG_COMPONENT "cio"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/pci.h>
20 #include <asm/chpid.h>
26 #include "cio_debug.h"
31 static void *sei_page;
32 static void *chsc_page;
33 static DEFINE_SPINLOCK(chsc_page_lock);
36 * chsc_error_from_response() - convert a chsc response to an error
37 * @response: chsc response code
39 * Returns an appropriate Linux error code for @response.
41 int chsc_error_from_response(int response)
65 EXPORT_SYMBOL_GPL(chsc_error_from_response);
67 struct chsc_ssd_area {
68 struct chsc_header request;
72 u16 f_sch; /* first subchannel */
74 u16 l_sch; /* last subchannel */
76 struct chsc_header response;
80 u8 st : 3; /* subchannel type */
82 u8 unit_addr; /* unit address */
83 u16 devno; /* device number */
86 u16 sch; /* subchannel */
87 u8 chpid[8]; /* chpids 0-7 */
88 u16 fla[8]; /* full link addresses 0-7 */
89 } __attribute__ ((packed));
91 int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
93 struct chsc_ssd_area *ssd_area;
99 spin_lock_irq(&chsc_page_lock);
100 memset(chsc_page, 0, PAGE_SIZE);
101 ssd_area = chsc_page;
102 ssd_area->request.length = 0x0010;
103 ssd_area->request.code = 0x0004;
104 ssd_area->ssid = schid.ssid;
105 ssd_area->f_sch = schid.sch_no;
106 ssd_area->l_sch = schid.sch_no;
108 ccode = chsc(ssd_area);
109 /* Check response. */
111 ret = (ccode == 3) ? -ENODEV : -EBUSY;
114 ret = chsc_error_from_response(ssd_area->response.code);
116 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
117 schid.ssid, schid.sch_no,
118 ssd_area->response.code);
121 if (!ssd_area->sch_valid) {
127 memset(ssd, 0, sizeof(struct chsc_ssd_info));
128 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
129 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
131 ssd->path_mask = ssd_area->path_mask;
132 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
133 for (i = 0; i < 8; i++) {
135 if (ssd_area->path_mask & mask) {
136 chp_id_init(&ssd->chpid[i]);
137 ssd->chpid[i].id = ssd_area->chpid[i];
139 if (ssd_area->fla_valid_mask & mask)
140 ssd->fla[i] = ssd_area->fla[i];
143 spin_unlock_irq(&chsc_page_lock);
147 static int s390_subchannel_remove_chpid(struct subchannel *sch, void *data)
149 spin_lock_irq(sch->lock);
150 if (sch->driver && sch->driver->chp_event)
151 if (sch->driver->chp_event(sch, data, CHP_OFFLINE) != 0)
153 spin_unlock_irq(sch->lock);
158 spin_unlock_irq(sch->lock);
159 css_schedule_eval(sch->schid);
163 void chsc_chp_offline(struct chp_id chpid)
166 struct chp_link link;
168 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
169 CIO_TRACE_EVENT(2, dbf_txt);
171 if (chp_get_status(chpid) <= 0)
173 memset(&link, 0, sizeof(struct chp_link));
175 /* Wait until previous actions have settled. */
176 css_wait_for_slow_path();
177 for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
180 static int s390_process_res_acc_new_sch(struct subchannel_id schid, void *data)
184 * We don't know the device yet, but since a path
185 * may be available now to the device we'll have
186 * to do recognition again.
187 * Since we don't have any idea about which chpid
188 * that beast may be on we'll have to do a stsch
189 * on all devices, grr...
191 if (stsch_err(schid, &schib))
195 /* Put it on the slow path. */
196 css_schedule_eval(schid);
200 static int __s390_process_res_acc(struct subchannel *sch, void *data)
202 spin_lock_irq(sch->lock);
203 if (sch->driver && sch->driver->chp_event)
204 sch->driver->chp_event(sch, data, CHP_ONLINE);
205 spin_unlock_irq(sch->lock);
210 static void s390_process_res_acc(struct chp_link *link)
214 sprintf(dbf_txt, "accpr%x.%02x", link->chpid.cssid,
216 CIO_TRACE_EVENT( 2, dbf_txt);
217 if (link->fla != 0) {
218 sprintf(dbf_txt, "fla%x", link->fla);
219 CIO_TRACE_EVENT( 2, dbf_txt);
221 /* Wait until previous actions have settled. */
222 css_wait_for_slow_path();
224 * I/O resources may have become accessible.
225 * Scan through all subchannels that may be concerned and
226 * do a validation on those.
227 * The more information we have (info), the less scanning
228 * will we have to do.
230 for_each_subchannel_staged(__s390_process_res_acc,
231 s390_process_res_acc_new_sch, link);
235 __get_chpid_from_lir(void *data)
241 /* incident-node descriptor */
243 /* attached-node descriptor */
245 /* incident-specific information */
247 } __attribute__ ((packed)) *lir;
251 /* NULL link incident record */
253 if (!(lir->indesc[0]&0xc0000000))
254 /* node descriptor not valid */
256 if (!(lir->indesc[0]&0x10000000))
257 /* don't handle device-type nodes - FIXME */
259 /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
261 return (u16) (lir->indesc[0]&0x000000ff);
264 struct chsc_sei_nt0_area {
266 u8 vf; /* validity flags */
267 u8 rs; /* reporting source */
268 u8 cc; /* content code */
269 u16 fla; /* full link address */
270 u16 rsid; /* reporting source id */
273 /* ccdf has to be big enough for a link-incident record */
274 u8 ccdf[PAGE_SIZE - 24 - 16]; /* content-code dependent field */
277 struct chsc_sei_nt2_area {
278 u8 flags; /* p and v bit */
281 u8 cc; /* content code */
283 u8 ccdf[PAGE_SIZE - 24 - 56]; /* content-code dependent field */
286 #define CHSC_SEI_NT0 (1ULL << 63)
287 #define CHSC_SEI_NT2 (1ULL << 61)
290 struct chsc_header request;
292 u64 ntsm; /* notification type mask */
293 struct chsc_header response;
297 struct chsc_sei_nt0_area nt0_area;
298 struct chsc_sei_nt2_area nt2_area;
299 u8 nt_area[PAGE_SIZE - 24];
303 static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
308 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
309 sei_area->rs, sei_area->rsid);
310 if (sei_area->rs != 4)
312 id = __get_chpid_from_lir(sei_area->ccdf);
314 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
318 chsc_chp_offline(chpid);
322 static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
324 struct chp_link link;
328 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
329 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
330 if (sei_area->rs != 4)
333 chpid.id = sei_area->rsid;
334 /* allocate a new channel path structure, if needed */
335 status = chp_get_status(chpid);
340 memset(&link, 0, sizeof(struct chp_link));
342 if ((sei_area->vf & 0xc0) != 0) {
343 link.fla = sei_area->fla;
344 if ((sei_area->vf & 0xc0) == 0xc0)
345 /* full link address */
346 link.fla_mask = 0xffff;
349 link.fla_mask = 0xff00;
351 s390_process_res_acc(&link);
354 static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area)
356 struct channel_path *chp;
361 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
362 if (sei_area->rs != 0)
364 data = sei_area->ccdf;
366 for (num = 0; num <= __MAX_CHPID; num++) {
367 if (!chp_test_bit(data, num))
371 CIO_CRW_EVENT(4, "Update information for channel path "
372 "%x.%02x\n", chpid.cssid, chpid.id);
373 chp = chpid_to_chp(chpid);
378 mutex_lock(&chp->lock);
379 chp_update_desc(chp);
380 mutex_unlock(&chp->lock);
384 struct chp_config_data {
390 static void chsc_process_sei_chp_config(struct chsc_sei_nt0_area *sei_area)
392 struct chp_config_data *data;
395 char *events[3] = {"configure", "deconfigure", "cancel deconfigure"};
397 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
398 if (sei_area->rs != 0)
400 data = (struct chp_config_data *) &(sei_area->ccdf);
402 for (num = 0; num <= __MAX_CHPID; num++) {
403 if (!chp_test_bit(data->map, num))
406 pr_notice("Processing %s for channel path %x.%02x\n",
407 events[data->op], chpid.cssid, chpid.id);
410 chp_cfg_schedule(chpid, 1);
413 chp_cfg_schedule(chpid, 0);
416 chp_cfg_cancel_deconfigure(chpid);
422 static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area)
426 CIO_CRW_EVENT(4, "chsc: scm change notification\n");
427 if (sei_area->rs != 7)
430 ret = scm_update_information();
432 CIO_CRW_EVENT(0, "chsc: updating change notification"
433 " failed (rc=%d).\n", ret);
436 static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
440 CIO_CRW_EVENT(4, "chsc: scm available information\n");
441 if (sei_area->rs != 7)
444 ret = scm_process_availability_information();
446 CIO_CRW_EVENT(0, "chsc: process availability information"
447 " failed (rc=%d).\n", ret);
450 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
452 switch (sei_area->cc) {
454 zpci_event_error(sei_area->ccdf);
457 zpci_event_availability(sei_area->ccdf);
460 CIO_CRW_EVENT(2, "chsc: sei nt2 unhandled cc=%d\n",
466 static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
468 /* which kind of information was stored? */
469 switch (sei_area->cc) {
470 case 1: /* link incident*/
471 chsc_process_sei_link_incident(sei_area);
473 case 2: /* i/o resource accessibility */
474 chsc_process_sei_res_acc(sei_area);
476 case 7: /* channel-path-availability information */
477 chsc_process_sei_chp_avail(sei_area);
479 case 8: /* channel-path-configuration notification */
480 chsc_process_sei_chp_config(sei_area);
482 case 12: /* scm change notification */
483 chsc_process_sei_scm_change(sei_area);
485 case 14: /* scm available notification */
486 chsc_process_sei_scm_avail(sei_area);
488 default: /* other stuff */
489 CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n",
494 /* Check if we might have lost some information. */
495 if (sei_area->flags & 0x40) {
496 CIO_CRW_EVENT(2, "chsc: event overflow\n");
497 css_schedule_eval_all();
501 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
504 memset(sei, 0, sizeof(*sei));
505 sei->request.length = 0x0010;
506 sei->request.code = 0x000e;
512 if (sei->response.code != 0x0001) {
513 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
518 CIO_CRW_EVENT(2, "chsc: sei successful (nt=%d)\n", sei->nt);
521 chsc_process_sei_nt0(&sei->u.nt0_area);
524 chsc_process_sei_nt2(&sei->u.nt2_area);
527 CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
530 } while (sei->u.nt0_area.flags & 0x80);
534 * Handle channel subsystem related CRWs.
535 * Use store event information to find out what's going on.
537 * Note: Access to sei_page is serialized through machine check handler
538 * thread, so no need for locking.
540 static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
542 struct chsc_sei *sei = sei_page;
545 css_schedule_eval_all();
548 CIO_CRW_EVENT(2, "CRW reports slct=%d, oflw=%d, "
549 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
550 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
551 crw0->erc, crw0->rsid);
553 CIO_TRACE_EVENT(2, "prcss");
554 chsc_process_event_information(sei, CHSC_SEI_NT0 | CHSC_SEI_NT2);
557 void chsc_chp_online(struct chp_id chpid)
560 struct chp_link link;
562 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
563 CIO_TRACE_EVENT(2, dbf_txt);
565 if (chp_get_status(chpid) != 0) {
566 memset(&link, 0, sizeof(struct chp_link));
568 /* Wait until previous actions have settled. */
569 css_wait_for_slow_path();
570 for_each_subchannel_staged(__s390_process_res_acc, NULL,
575 static void __s390_subchannel_vary_chpid(struct subchannel *sch,
576 struct chp_id chpid, int on)
579 struct chp_link link;
581 memset(&link, 0, sizeof(struct chp_link));
583 spin_lock_irqsave(sch->lock, flags);
584 if (sch->driver && sch->driver->chp_event)
585 sch->driver->chp_event(sch, &link,
586 on ? CHP_VARY_ON : CHP_VARY_OFF);
587 spin_unlock_irqrestore(sch->lock, flags);
590 static int s390_subchannel_vary_chpid_off(struct subchannel *sch, void *data)
592 struct chp_id *chpid = data;
594 __s390_subchannel_vary_chpid(sch, *chpid, 0);
598 static int s390_subchannel_vary_chpid_on(struct subchannel *sch, void *data)
600 struct chp_id *chpid = data;
602 __s390_subchannel_vary_chpid(sch, *chpid, 1);
607 __s390_vary_chpid_on(struct subchannel_id schid, void *data)
611 if (stsch_err(schid, &schib))
614 /* Put it on the slow path. */
615 css_schedule_eval(schid);
620 * chsc_chp_vary - propagate channel-path vary operation to subchannels
621 * @chpid: channl-path ID
622 * @on: non-zero for vary online, zero for vary offline
624 int chsc_chp_vary(struct chp_id chpid, int on)
626 struct channel_path *chp = chpid_to_chp(chpid);
628 /* Wait until previous actions have settled. */
629 css_wait_for_slow_path();
631 * Redo PathVerification on the devices the chpid connects to
634 /* Try to update the channel path description. */
635 chp_update_desc(chp);
636 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
637 __s390_vary_chpid_on, &chpid);
639 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
646 chsc_remove_cmg_attr(struct channel_subsystem *css)
650 for (i = 0; i <= __MAX_CHPID; i++) {
653 chp_remove_cmg_attr(css->chps[i]);
658 chsc_add_cmg_attr(struct channel_subsystem *css)
663 for (i = 0; i <= __MAX_CHPID; i++) {
666 ret = chp_add_cmg_attr(css->chps[i]);
672 for (--i; i >= 0; i--) {
675 chp_remove_cmg_attr(css->chps[i]);
680 int __chsc_do_secm(struct channel_subsystem *css, int enable)
683 struct chsc_header request;
684 u32 operation_code : 2;
693 struct chsc_header response;
698 } __attribute__ ((packed)) *secm_area;
701 spin_lock_irq(&chsc_page_lock);
702 memset(chsc_page, 0, PAGE_SIZE);
703 secm_area = chsc_page;
704 secm_area->request.length = 0x0050;
705 secm_area->request.code = 0x0016;
707 secm_area->key = PAGE_DEFAULT_KEY >> 4;
708 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
709 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
711 secm_area->operation_code = enable ? 0 : 1;
713 ccode = chsc(secm_area);
715 ret = (ccode == 3) ? -ENODEV : -EBUSY;
719 switch (secm_area->response.code) {
725 ret = chsc_error_from_response(secm_area->response.code);
728 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
729 secm_area->response.code);
731 spin_unlock_irq(&chsc_page_lock);
736 chsc_secm(struct channel_subsystem *css, int enable)
740 if (enable && !css->cm_enabled) {
741 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
742 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
743 if (!css->cub_addr1 || !css->cub_addr2) {
744 free_page((unsigned long)css->cub_addr1);
745 free_page((unsigned long)css->cub_addr2);
749 ret = __chsc_do_secm(css, enable);
751 css->cm_enabled = enable;
752 if (css->cm_enabled) {
753 ret = chsc_add_cmg_attr(css);
755 __chsc_do_secm(css, 0);
759 chsc_remove_cmg_attr(css);
761 if (!css->cm_enabled) {
762 free_page((unsigned long)css->cub_addr1);
763 free_page((unsigned long)css->cub_addr2);
768 int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
769 int c, int m, void *page)
771 struct chsc_scpd *scpd_area;
774 if ((rfmt == 1) && !css_general_characteristics.fcs)
776 if ((rfmt == 2) && !css_general_characteristics.cib)
779 memset(page, 0, PAGE_SIZE);
781 scpd_area->request.length = 0x0010;
782 scpd_area->request.code = 0x0002;
783 scpd_area->cssid = chpid.cssid;
784 scpd_area->first_chpid = chpid.id;
785 scpd_area->last_chpid = chpid.id;
788 scpd_area->fmt = fmt;
789 scpd_area->rfmt = rfmt;
791 ccode = chsc(scpd_area);
793 return (ccode == 3) ? -ENODEV : -EBUSY;
795 ret = chsc_error_from_response(scpd_area->response.code);
797 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
798 scpd_area->response.code);
801 EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
803 int chsc_determine_base_channel_path_desc(struct chp_id chpid,
804 struct channel_path_desc *desc)
806 struct chsc_response_struct *chsc_resp;
807 struct chsc_scpd *scpd_area;
811 spin_lock_irqsave(&chsc_page_lock, flags);
812 scpd_area = chsc_page;
813 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
816 chsc_resp = (void *)&scpd_area->response;
817 memcpy(desc, &chsc_resp->data, sizeof(*desc));
819 spin_unlock_irqrestore(&chsc_page_lock, flags);
823 int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
824 struct channel_path_desc_fmt1 *desc)
826 struct chsc_response_struct *chsc_resp;
827 struct chsc_scpd *scpd_area;
831 spin_lock_irqsave(&chsc_page_lock, flags);
832 scpd_area = chsc_page;
833 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
836 chsc_resp = (void *)&scpd_area->response;
837 memcpy(desc, &chsc_resp->data, sizeof(*desc));
839 spin_unlock_irqrestore(&chsc_page_lock, flags);
844 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
845 struct cmg_chars *chars)
847 struct cmg_chars *cmg_chars;
850 cmg_chars = chp->cmg_chars;
851 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
852 mask = 0x80 >> (i + 3);
854 cmg_chars->values[i] = chars->values[i];
856 cmg_chars->values[i] = 0;
860 int chsc_get_channel_measurement_chars(struct channel_path *chp)
862 struct cmg_chars *cmg_chars;
866 struct chsc_header request;
872 struct chsc_header response;
883 u32 data[NR_MEASUREMENT_CHARS];
884 } __attribute__ ((packed)) *scmc_area;
886 chp->cmg_chars = NULL;
887 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
891 spin_lock_irq(&chsc_page_lock);
892 memset(chsc_page, 0, PAGE_SIZE);
893 scmc_area = chsc_page;
894 scmc_area->request.length = 0x0010;
895 scmc_area->request.code = 0x0022;
896 scmc_area->first_chpid = chp->chpid.id;
897 scmc_area->last_chpid = chp->chpid.id;
899 ccode = chsc(scmc_area);
901 ret = (ccode == 3) ? -ENODEV : -EBUSY;
905 ret = chsc_error_from_response(scmc_area->response.code);
907 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
908 scmc_area->response.code);
911 if (scmc_area->not_valid) {
916 chp->cmg = scmc_area->cmg;
917 chp->shared = scmc_area->shared;
918 if (chp->cmg != 2 && chp->cmg != 3) {
919 /* No cmg-dependent data. */
922 chp->cmg_chars = cmg_chars;
923 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
924 (struct cmg_chars *) &scmc_area->data);
926 spin_unlock_irq(&chsc_page_lock);
933 int __init chsc_init(void)
937 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
938 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
939 if (!sei_page || !chsc_page) {
943 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
948 free_page((unsigned long)chsc_page);
949 free_page((unsigned long)sei_page);
953 void __init chsc_init_cleanup(void)
955 crw_unregister_handler(CRW_RSC_CSS);
956 free_page((unsigned long)chsc_page);
957 free_page((unsigned long)sei_page);
960 int chsc_enable_facility(int operation_code)
965 struct chsc_header request;
972 u32 operation_data_area[252];
973 struct chsc_header response;
977 } __attribute__ ((packed)) *sda_area;
979 spin_lock_irqsave(&chsc_page_lock, flags);
980 memset(chsc_page, 0, PAGE_SIZE);
981 sda_area = chsc_page;
982 sda_area->request.length = 0x0400;
983 sda_area->request.code = 0x0031;
984 sda_area->operation_code = operation_code;
986 ret = chsc(sda_area);
988 ret = (ret == 3) ? -ENODEV : -EBUSY;
992 switch (sda_area->response.code) {
997 ret = chsc_error_from_response(sda_area->response.code);
1000 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
1001 operation_code, sda_area->response.code);
1003 spin_unlock_irqrestore(&chsc_page_lock, flags);
1007 struct css_general_char css_general_characteristics;
1008 struct css_chsc_char css_chsc_characteristics;
1011 chsc_determine_css_characteristics(void)
1015 struct chsc_header request;
1019 struct chsc_header response;
1021 u32 general_char[510];
1023 } __attribute__ ((packed)) *scsc_area;
1025 spin_lock_irq(&chsc_page_lock);
1026 memset(chsc_page, 0, PAGE_SIZE);
1027 scsc_area = chsc_page;
1028 scsc_area->request.length = 0x0010;
1029 scsc_area->request.code = 0x0010;
1031 result = chsc(scsc_area);
1033 result = (result == 3) ? -ENODEV : -EBUSY;
1037 result = chsc_error_from_response(scsc_area->response.code);
1039 memcpy(&css_general_characteristics, scsc_area->general_char,
1040 sizeof(css_general_characteristics));
1041 memcpy(&css_chsc_characteristics, scsc_area->chsc_char,
1042 sizeof(css_chsc_characteristics));
1044 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
1045 scsc_area->response.code);
1047 spin_unlock_irq(&chsc_page_lock);
1051 EXPORT_SYMBOL_GPL(css_general_characteristics);
1052 EXPORT_SYMBOL_GPL(css_chsc_characteristics);
1054 int chsc_sstpc(void *page, unsigned int op, u16 ctrl)
1057 struct chsc_header request;
1059 unsigned int op : 8;
1060 unsigned int rsvd1 : 8;
1061 unsigned int ctrl : 16;
1062 unsigned int rsvd2[5];
1063 struct chsc_header response;
1064 unsigned int rsvd3[7];
1065 } __attribute__ ((packed)) *rr;
1068 memset(page, 0, PAGE_SIZE);
1070 rr->request.length = 0x0020;
1071 rr->request.code = 0x0033;
1077 rc = (rr->response.code == 0x0001) ? 0 : -EIO;
1081 int chsc_sstpi(void *page, void *result, size_t size)
1084 struct chsc_header request;
1085 unsigned int rsvd0[3];
1086 struct chsc_header response;
1088 } __attribute__ ((packed)) *rr;
1091 memset(page, 0, PAGE_SIZE);
1093 rr->request.length = 0x0010;
1094 rr->request.code = 0x0038;
1098 memcpy(result, &rr->data, size);
1099 return (rr->response.code == 0x0001) ? 0 : -EIO;
1102 int chsc_siosl(struct subchannel_id schid)
1105 struct chsc_header request;
1107 struct subchannel_id sid;
1109 struct chsc_header response;
1111 } __attribute__ ((packed)) *siosl_area;
1112 unsigned long flags;
1116 spin_lock_irqsave(&chsc_page_lock, flags);
1117 memset(chsc_page, 0, PAGE_SIZE);
1118 siosl_area = chsc_page;
1119 siosl_area->request.length = 0x0010;
1120 siosl_area->request.code = 0x0046;
1121 siosl_area->word1 = 0x80000000;
1122 siosl_area->sid = schid;
1124 ccode = chsc(siosl_area);
1130 CIO_MSG_EVENT(2, "chsc: chsc failed for 0.%x.%04x (ccode=%d)\n",
1131 schid.ssid, schid.sch_no, ccode);
1134 rc = chsc_error_from_response(siosl_area->response.code);
1136 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1137 schid.ssid, schid.sch_no,
1138 siosl_area->response.code);
1140 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1141 schid.ssid, schid.sch_no);
1143 spin_unlock_irqrestore(&chsc_page_lock, flags);
1146 EXPORT_SYMBOL_GPL(chsc_siosl);
1149 * chsc_scm_info() - store SCM information (SSI)
1150 * @scm_area: request and response block for SSI
1151 * @token: continuation token
1153 * Returns 0 on success.
1155 int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token)
1159 memset(scm_area, 0, sizeof(*scm_area));
1160 scm_area->request.length = 0x0020;
1161 scm_area->request.code = 0x004C;
1162 scm_area->reqtok = token;
1164 ccode = chsc(scm_area);
1166 ret = (ccode == 3) ? -ENODEV : -EBUSY;
1169 ret = chsc_error_from_response(scm_area->response.code);
1171 CIO_MSG_EVENT(2, "chsc: scm info failed (rc=%04x)\n",
1172 scm_area->response.code);
1176 EXPORT_SYMBOL_GPL(chsc_scm_info);