clk: rockchip: use rk3368-efuse clock ids
[firefly-linux-kernel-4.4.55.git] / drivers / s390 / char / vmlogrdr.c
1 /*
2  *      character device driver for reading z/VM system service records
3  *
4  *
5  *      Copyright IBM Corp. 2004, 2009
6  *      character device driver for reading z/VM system service records,
7  *      Version 1.0
8  *      Author(s): Xenia Tkatschow <xenia@us.ibm.com>
9  *                 Stefan Weinhuber <wein@de.ibm.com>
10  *
11  */
12
13 #define KMSG_COMPONENT "vmlogrdr"
14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
15
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/errno.h>
20 #include <linux/types.h>
21 #include <linux/interrupt.h>
22 #include <linux/spinlock.h>
23 #include <linux/atomic.h>
24 #include <asm/uaccess.h>
25 #include <asm/cpcmd.h>
26 #include <asm/debug.h>
27 #include <asm/ebcdic.h>
28 #include <net/iucv/iucv.h>
29 #include <linux/kmod.h>
30 #include <linux/cdev.h>
31 #include <linux/device.h>
32 #include <linux/string.h>
33
34 MODULE_AUTHOR
35         ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n"
36          "                            Stefan Weinhuber (wein@de.ibm.com)");
37 MODULE_DESCRIPTION ("Character device driver for reading z/VM "
38                     "system service records.");
39 MODULE_LICENSE("GPL");
40
41
42 /*
43  * The size of the buffer for iucv data transfer is one page,
44  * but in addition to the data we read from iucv we also
45  * place an integer and some characters into that buffer,
46  * so the maximum size for record data is a little less then
47  * one page.
48  */
49 #define NET_BUFFER_SIZE (PAGE_SIZE - sizeof(int) - sizeof(FENCE))
50
51 /*
52  * The elements that are concurrently accessed by bottom halves are
53  * connection_established, iucv_path_severed, local_interrupt_buffer
54  * and receive_ready. The first three can be protected by
55  * priv_lock.  receive_ready is atomic, so it can be incremented and
56  * decremented without holding a lock.
57  * The variable dev_in_use needs to be protected by the lock, since
58  * it's a flag used by open to make sure that the device is opened only
59  * by one user at the same time.
60  */
61 struct vmlogrdr_priv_t {
62         char system_service[8];
63         char internal_name[8];
64         char recording_name[8];
65         struct iucv_path *path;
66         int connection_established;
67         int iucv_path_severed;
68         struct iucv_message local_interrupt_buffer;
69         atomic_t receive_ready;
70         int minor_num;
71         char * buffer;
72         char * current_position;
73         int remaining;
74         ulong residual_length;
75         int buffer_free;
76         int dev_in_use; /* 1: already opened, 0: not opened*/
77         spinlock_t priv_lock;
78         struct device  *device;
79         struct device  *class_device;
80         int autorecording;
81         int autopurge;
82 };
83
84
85 /*
86  * File operation structure for vmlogrdr devices
87  */
88 static int vmlogrdr_open(struct inode *, struct file *);
89 static int vmlogrdr_release(struct inode *, struct file *);
90 static ssize_t vmlogrdr_read (struct file *filp, char __user *data,
91                               size_t count, loff_t * ppos);
92
93 static const struct file_operations vmlogrdr_fops = {
94         .owner   = THIS_MODULE,
95         .open    = vmlogrdr_open,
96         .release = vmlogrdr_release,
97         .read    = vmlogrdr_read,
98         .llseek  = no_llseek,
99 };
100
101
102 static void vmlogrdr_iucv_path_complete(struct iucv_path *, u8 *ipuser);
103 static void vmlogrdr_iucv_path_severed(struct iucv_path *, u8 *ipuser);
104 static void vmlogrdr_iucv_message_pending(struct iucv_path *,
105                                           struct iucv_message *);
106
107
108 static struct iucv_handler vmlogrdr_iucv_handler = {
109         .path_complete   = vmlogrdr_iucv_path_complete,
110         .path_severed    = vmlogrdr_iucv_path_severed,
111         .message_pending = vmlogrdr_iucv_message_pending,
112 };
113
114
115 static DECLARE_WAIT_QUEUE_HEAD(conn_wait_queue);
116 static DECLARE_WAIT_QUEUE_HEAD(read_wait_queue);
117
118 /*
119  * pointer to system service private structure
120  * minor number 0 --> logrec
121  * minor number 1 --> account
122  * minor number 2 --> symptom
123  */
124
125 static struct vmlogrdr_priv_t sys_ser[] = {
126         { .system_service = "*LOGREC ",
127           .internal_name  = "logrec",
128           .recording_name = "EREP",
129           .minor_num      = 0,
130           .buffer_free    = 1,
131           .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[0].priv_lock),
132           .autorecording  = 1,
133           .autopurge      = 1,
134         },
135         { .system_service = "*ACCOUNT",
136           .internal_name  = "account",
137           .recording_name = "ACCOUNT",
138           .minor_num      = 1,
139           .buffer_free    = 1,
140           .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[1].priv_lock),
141           .autorecording  = 1,
142           .autopurge      = 1,
143         },
144         { .system_service = "*SYMPTOM",
145           .internal_name  = "symptom",
146           .recording_name = "SYMPTOM",
147           .minor_num      = 2,
148           .buffer_free    = 1,
149           .priv_lock      = __SPIN_LOCK_UNLOCKED(sys_ser[2].priv_lock),
150           .autorecording  = 1,
151           .autopurge      = 1,
152         }
153 };
154
155 #define MAXMINOR  (sizeof(sys_ser)/sizeof(struct vmlogrdr_priv_t))
156
157 static char FENCE[] = {"EOR"};
158 static int vmlogrdr_major = 0;
159 static struct cdev  *vmlogrdr_cdev = NULL;
160 static int recording_class_AB;
161
162
163 static void vmlogrdr_iucv_path_complete(struct iucv_path *path, u8 *ipuser)
164 {
165         struct vmlogrdr_priv_t * logptr = path->private;
166
167         spin_lock(&logptr->priv_lock);
168         logptr->connection_established = 1;
169         spin_unlock(&logptr->priv_lock);
170         wake_up(&conn_wait_queue);
171 }
172
173
174 static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
175 {
176         struct vmlogrdr_priv_t * logptr = path->private;
177         u8 reason = (u8) ipuser[8];
178
179         pr_err("vmlogrdr: connection severed with reason %i\n", reason);
180
181         iucv_path_sever(path, NULL);
182         kfree(path);
183         logptr->path = NULL;
184
185         spin_lock(&logptr->priv_lock);
186         logptr->connection_established = 0;
187         logptr->iucv_path_severed = 1;
188         spin_unlock(&logptr->priv_lock);
189
190         wake_up(&conn_wait_queue);
191         /* just in case we're sleeping waiting for a record */
192         wake_up_interruptible(&read_wait_queue);
193 }
194
195
196 static void vmlogrdr_iucv_message_pending(struct iucv_path *path,
197                                           struct iucv_message *msg)
198 {
199         struct vmlogrdr_priv_t * logptr = path->private;
200
201         /*
202          * This function is the bottom half so it should be quick.
203          * Copy the external interrupt data into our local eib and increment
204          * the usage count
205          */
206         spin_lock(&logptr->priv_lock);
207         memcpy(&logptr->local_interrupt_buffer, msg, sizeof(*msg));
208         atomic_inc(&logptr->receive_ready);
209         spin_unlock(&logptr->priv_lock);
210         wake_up_interruptible(&read_wait_queue);
211 }
212
213
214 static int vmlogrdr_get_recording_class_AB(void)
215 {
216         static const char cp_command[] = "QUERY COMMAND RECORDING ";
217         char cp_response[80];
218         char *tail;
219         int len,i;
220
221         cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
222         len = strnlen(cp_response,sizeof(cp_response));
223         // now the parsing
224         tail=strnchr(cp_response,len,'=');
225         if (!tail)
226                 return 0;
227         tail++;
228         if (!strncmp("ANY",tail,3))
229                 return 1;
230         if (!strncmp("NONE",tail,4))
231                 return 0;
232         /*
233          * expect comma separated list of classes here, if one of them
234          * is A or B return 1 otherwise 0
235          */
236         for (i=tail-cp_response; i<len; i++)
237                 if ( cp_response[i]=='A' || cp_response[i]=='B' )
238                         return 1;
239         return 0;
240 }
241
242
243 static int vmlogrdr_recording(struct vmlogrdr_priv_t * logptr,
244                               int action, int purge)
245 {
246
247         char cp_command[80];
248         char cp_response[160];
249         char *onoff, *qid_string;
250         int rc;
251
252         onoff = ((action == 1) ? "ON" : "OFF");
253         qid_string = ((recording_class_AB == 1) ? " QID * " : "");
254
255         /*
256          * The recording commands needs to be called with option QID
257          * for guests that have previlege classes A or B.
258          * Purging has to be done as separate step, because recording
259          * can't be switched on as long as records are on the queue.
260          * Doing both at the same time doesn't work.
261          */
262         if (purge && (action == 1)) {
263                 memset(cp_command, 0x00, sizeof(cp_command));
264                 memset(cp_response, 0x00, sizeof(cp_response));
265                 snprintf(cp_command, sizeof(cp_command),
266                          "RECORDING %s PURGE %s",
267                          logptr->recording_name,
268                          qid_string);
269                 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
270         }
271
272         memset(cp_command, 0x00, sizeof(cp_command));
273         memset(cp_response, 0x00, sizeof(cp_response));
274         snprintf(cp_command, sizeof(cp_command), "RECORDING %s %s %s",
275                 logptr->recording_name,
276                 onoff,
277                 qid_string);
278         cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
279         /* The recording command will usually answer with 'Command complete'
280          * on success, but when the specific service was never connected
281          * before then there might be an additional informational message
282          * 'HCPCRC8072I Recording entry not found' before the
283          * 'Command complete'. So I use strstr rather then the strncmp.
284          */
285         if (strstr(cp_response,"Command complete"))
286                 rc = 0;
287         else
288                 rc = -EIO;
289         /*
290          * If we turn recording off, we have to purge any remaining records
291          * afterwards, as a large number of queued records may impact z/VM
292          * performance.
293          */
294         if (purge && (action == 0)) {
295                 memset(cp_command, 0x00, sizeof(cp_command));
296                 memset(cp_response, 0x00, sizeof(cp_response));
297                 snprintf(cp_command, sizeof(cp_command),
298                          "RECORDING %s PURGE %s",
299                          logptr->recording_name,
300                          qid_string);
301                 cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
302         }
303
304         return rc;
305 }
306
307
308 static int vmlogrdr_open (struct inode *inode, struct file *filp)
309 {
310         int dev_num = 0;
311         struct vmlogrdr_priv_t * logptr = NULL;
312         int connect_rc = 0;
313         int ret;
314
315         dev_num = iminor(inode);
316         if (dev_num >= MAXMINOR)
317                 return -ENODEV;
318         logptr = &sys_ser[dev_num];
319
320         /*
321          * only allow for blocking reads to be open
322          */
323         if (filp->f_flags & O_NONBLOCK)
324                 return -EOPNOTSUPP;
325
326         /* Besure this device hasn't already been opened */
327         spin_lock_bh(&logptr->priv_lock);
328         if (logptr->dev_in_use) {
329                 spin_unlock_bh(&logptr->priv_lock);
330                 return -EBUSY;
331         }
332         logptr->dev_in_use = 1;
333         logptr->connection_established = 0;
334         logptr->iucv_path_severed = 0;
335         atomic_set(&logptr->receive_ready, 0);
336         logptr->buffer_free = 1;
337         spin_unlock_bh(&logptr->priv_lock);
338
339         /* set the file options */
340         filp->private_data = logptr;
341
342         /* start recording for this service*/
343         if (logptr->autorecording) {
344                 ret = vmlogrdr_recording(logptr,1,logptr->autopurge);
345                 if (ret)
346                         pr_warning("vmlogrdr: failed to start "
347                                    "recording automatically\n");
348         }
349
350         /* create connection to the system service */
351         logptr->path = iucv_path_alloc(10, 0, GFP_KERNEL);
352         if (!logptr->path)
353                 goto out_dev;
354         connect_rc = iucv_path_connect(logptr->path, &vmlogrdr_iucv_handler,
355                                        logptr->system_service, NULL, NULL,
356                                        logptr);
357         if (connect_rc) {
358                 pr_err("vmlogrdr: iucv connection to %s "
359                        "failed with rc %i \n",
360                        logptr->system_service, connect_rc);
361                 goto out_path;
362         }
363
364         /* We've issued the connect and now we must wait for a
365          * ConnectionComplete or ConnectinSevered Interrupt
366          * before we can continue to process.
367          */
368         wait_event(conn_wait_queue, (logptr->connection_established)
369                    || (logptr->iucv_path_severed));
370         if (logptr->iucv_path_severed)
371                 goto out_record;
372         nonseekable_open(inode, filp);
373         return 0;
374
375 out_record:
376         if (logptr->autorecording)
377                 vmlogrdr_recording(logptr,0,logptr->autopurge);
378 out_path:
379         kfree(logptr->path);    /* kfree(NULL) is ok. */
380         logptr->path = NULL;
381 out_dev:
382         logptr->dev_in_use = 0;
383         return -EIO;
384 }
385
386
387 static int vmlogrdr_release (struct inode *inode, struct file *filp)
388 {
389         int ret;
390
391         struct vmlogrdr_priv_t * logptr = filp->private_data;
392
393         iucv_path_sever(logptr->path, NULL);
394         kfree(logptr->path);
395         logptr->path = NULL;
396         if (logptr->autorecording) {
397                 ret = vmlogrdr_recording(logptr,0,logptr->autopurge);
398                 if (ret)
399                         pr_warning("vmlogrdr: failed to stop "
400                                    "recording automatically\n");
401         }
402         logptr->dev_in_use = 0;
403
404         return 0;
405 }
406
407
408 static int vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv)
409 {
410         int rc, *temp;
411         /* we need to keep track of two data sizes here:
412          * The number of bytes we need to receive from iucv and
413          * the total number of bytes we actually write into the buffer.
414          */
415         int user_data_count, iucv_data_count;
416         char * buffer;
417
418         if (atomic_read(&priv->receive_ready)) {
419                 spin_lock_bh(&priv->priv_lock);
420                 if (priv->residual_length){
421                         /* receive second half of a record */
422                         iucv_data_count = priv->residual_length;
423                         user_data_count = 0;
424                         buffer = priv->buffer;
425                 } else {
426                         /* receive a new record:
427                          * We need to return the total length of the record
428                          * + size of FENCE in the first 4 bytes of the buffer.
429                          */
430                         iucv_data_count = priv->local_interrupt_buffer.length;
431                         user_data_count = sizeof(int);
432                         temp = (int*)priv->buffer;
433                         *temp= iucv_data_count + sizeof(FENCE);
434                         buffer = priv->buffer + sizeof(int);
435                 }
436                 /*
437                  * If the record is bigger than our buffer, we receive only
438                  * a part of it. We can get the rest later.
439                  */
440                 if (iucv_data_count > NET_BUFFER_SIZE)
441                         iucv_data_count = NET_BUFFER_SIZE;
442                 rc = iucv_message_receive(priv->path,
443                                           &priv->local_interrupt_buffer,
444                                           0, buffer, iucv_data_count,
445                                           &priv->residual_length);
446                 spin_unlock_bh(&priv->priv_lock);
447                 /* An rc of 5 indicates that the record was bigger than
448                  * the buffer, which is OK for us. A 9 indicates that the
449                  * record was purged befor we could receive it.
450                  */
451                 if (rc == 5)
452                         rc = 0;
453                 if (rc == 9)
454                         atomic_set(&priv->receive_ready, 0);
455         } else {
456                 rc = 1;
457         }
458         if (!rc) {
459                 priv->buffer_free = 0;
460                 user_data_count += iucv_data_count;
461                 priv->current_position = priv->buffer;
462                 if (priv->residual_length == 0){
463                         /* the whole record has been captured,
464                          * now add the fence */
465                         atomic_dec(&priv->receive_ready);
466                         buffer = priv->buffer + user_data_count;
467                         memcpy(buffer, FENCE, sizeof(FENCE));
468                         user_data_count += sizeof(FENCE);
469                 }
470                 priv->remaining = user_data_count;
471         }
472
473         return rc;
474 }
475
476
477 static ssize_t vmlogrdr_read(struct file *filp, char __user *data,
478                              size_t count, loff_t * ppos)
479 {
480         int rc;
481         struct vmlogrdr_priv_t * priv = filp->private_data;
482
483         while (priv->buffer_free) {
484                 rc = vmlogrdr_receive_data(priv);
485                 if (rc) {
486                         rc = wait_event_interruptible(read_wait_queue,
487                                         atomic_read(&priv->receive_ready));
488                         if (rc)
489                                 return rc;
490                 }
491         }
492         /* copy only up to end of record */
493         if (count > priv->remaining)
494                 count = priv->remaining;
495
496         if (copy_to_user(data, priv->current_position, count))
497                 return -EFAULT;
498
499         *ppos += count;
500         priv->current_position += count;
501         priv->remaining -= count;
502
503         /* if all data has been transferred, set buffer free */
504         if (priv->remaining == 0)
505                 priv->buffer_free = 1;
506
507         return count;
508 }
509
510 static ssize_t vmlogrdr_autopurge_store(struct device * dev,
511                                         struct device_attribute *attr,
512                                         const char * buf, size_t count)
513 {
514         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
515         ssize_t ret = count;
516
517         switch (buf[0]) {
518         case '0':
519                 priv->autopurge=0;
520                 break;
521         case '1':
522                 priv->autopurge=1;
523                 break;
524         default:
525                 ret = -EINVAL;
526         }
527         return ret;
528 }
529
530
531 static ssize_t vmlogrdr_autopurge_show(struct device *dev,
532                                        struct device_attribute *attr,
533                                        char *buf)
534 {
535         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
536         return sprintf(buf, "%u\n", priv->autopurge);
537 }
538
539
540 static DEVICE_ATTR(autopurge, 0644, vmlogrdr_autopurge_show,
541                    vmlogrdr_autopurge_store);
542
543
544 static ssize_t vmlogrdr_purge_store(struct device * dev,
545                                     struct device_attribute *attr,
546                                     const char * buf, size_t count)
547 {
548
549         char cp_command[80];
550         char cp_response[80];
551         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
552
553         if (buf[0] != '1')
554                 return -EINVAL;
555
556         memset(cp_command, 0x00, sizeof(cp_command));
557         memset(cp_response, 0x00, sizeof(cp_response));
558
559         /*
560          * The recording command needs to be called with option QID
561          * for guests that have previlege classes A or B.
562          * Other guests will not recognize the command and we have to
563          * issue the same command without the QID parameter.
564          */
565
566         if (recording_class_AB)
567                 snprintf(cp_command, sizeof(cp_command),
568                          "RECORDING %s PURGE QID * ",
569                          priv->recording_name);
570         else
571                 snprintf(cp_command, sizeof(cp_command),
572                          "RECORDING %s PURGE ",
573                          priv->recording_name);
574
575         cpcmd(cp_command, cp_response, sizeof(cp_response), NULL);
576
577         return count;
578 }
579
580
581 static DEVICE_ATTR(purge, 0200, NULL, vmlogrdr_purge_store);
582
583
584 static ssize_t vmlogrdr_autorecording_store(struct device *dev,
585                                             struct device_attribute *attr,
586                                             const char *buf, size_t count)
587 {
588         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
589         ssize_t ret = count;
590
591         switch (buf[0]) {
592         case '0':
593                 priv->autorecording=0;
594                 break;
595         case '1':
596                 priv->autorecording=1;
597                 break;
598         default:
599                 ret = -EINVAL;
600         }
601         return ret;
602 }
603
604
605 static ssize_t vmlogrdr_autorecording_show(struct device *dev,
606                                            struct device_attribute *attr,
607                                            char *buf)
608 {
609         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
610         return sprintf(buf, "%u\n", priv->autorecording);
611 }
612
613
614 static DEVICE_ATTR(autorecording, 0644, vmlogrdr_autorecording_show,
615                    vmlogrdr_autorecording_store);
616
617
618 static ssize_t vmlogrdr_recording_store(struct device * dev,
619                                         struct device_attribute *attr,
620                                         const char * buf, size_t count)
621 {
622         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
623         ssize_t ret;
624
625         switch (buf[0]) {
626         case '0':
627                 ret = vmlogrdr_recording(priv,0,0);
628                 break;
629         case '1':
630                 ret = vmlogrdr_recording(priv,1,0);
631                 break;
632         default:
633                 ret = -EINVAL;
634         }
635         if (ret)
636                 return ret;
637         else
638                 return count;
639
640 }
641
642
643 static DEVICE_ATTR(recording, 0200, NULL, vmlogrdr_recording_store);
644
645
646 static ssize_t vmlogrdr_recording_status_show(struct device_driver *driver,
647                                               char *buf)
648 {
649
650         static const char cp_command[] = "QUERY RECORDING ";
651         int len;
652
653         cpcmd(cp_command, buf, 4096, NULL);
654         len = strlen(buf);
655         return len;
656 }
657 static DRIVER_ATTR(recording_status, 0444, vmlogrdr_recording_status_show,
658                    NULL);
659 static struct attribute *vmlogrdr_drv_attrs[] = {
660         &driver_attr_recording_status.attr,
661         NULL,
662 };
663 static struct attribute_group vmlogrdr_drv_attr_group = {
664         .attrs = vmlogrdr_drv_attrs,
665 };
666 static const struct attribute_group *vmlogrdr_drv_attr_groups[] = {
667         &vmlogrdr_drv_attr_group,
668         NULL,
669 };
670
671 static struct attribute *vmlogrdr_attrs[] = {
672         &dev_attr_autopurge.attr,
673         &dev_attr_purge.attr,
674         &dev_attr_autorecording.attr,
675         &dev_attr_recording.attr,
676         NULL,
677 };
678 static struct attribute_group vmlogrdr_attr_group = {
679         .attrs = vmlogrdr_attrs,
680 };
681 static const struct attribute_group *vmlogrdr_attr_groups[] = {
682         &vmlogrdr_attr_group,
683         NULL,
684 };
685
686 static int vmlogrdr_pm_prepare(struct device *dev)
687 {
688         int rc;
689         struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
690
691         rc = 0;
692         if (priv) {
693                 spin_lock_bh(&priv->priv_lock);
694                 if (priv->dev_in_use)
695                         rc = -EBUSY;
696                 spin_unlock_bh(&priv->priv_lock);
697         }
698         if (rc)
699                 pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n",
700                        dev_name(dev));
701         return rc;
702 }
703
704
705 static const struct dev_pm_ops vmlogrdr_pm_ops = {
706         .prepare = vmlogrdr_pm_prepare,
707 };
708
709 static struct class *vmlogrdr_class;
710 static struct device_driver vmlogrdr_driver = {
711         .name = "vmlogrdr",
712         .bus  = &iucv_bus,
713         .pm = &vmlogrdr_pm_ops,
714         .groups = vmlogrdr_drv_attr_groups,
715 };
716
717 static int vmlogrdr_register_driver(void)
718 {
719         int ret;
720
721         /* Register with iucv driver */
722         ret = iucv_register(&vmlogrdr_iucv_handler, 1);
723         if (ret)
724                 goto out;
725
726         ret = driver_register(&vmlogrdr_driver);
727         if (ret)
728                 goto out_iucv;
729
730         vmlogrdr_class = class_create(THIS_MODULE, "vmlogrdr");
731         if (IS_ERR(vmlogrdr_class)) {
732                 ret = PTR_ERR(vmlogrdr_class);
733                 vmlogrdr_class = NULL;
734                 goto out_driver;
735         }
736         return 0;
737
738 out_driver:
739         driver_unregister(&vmlogrdr_driver);
740 out_iucv:
741         iucv_unregister(&vmlogrdr_iucv_handler, 1);
742 out:
743         return ret;
744 }
745
746
747 static void vmlogrdr_unregister_driver(void)
748 {
749         class_destroy(vmlogrdr_class);
750         vmlogrdr_class = NULL;
751         driver_unregister(&vmlogrdr_driver);
752         iucv_unregister(&vmlogrdr_iucv_handler, 1);
753 }
754
755
756 static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
757 {
758         struct device *dev;
759         int ret;
760
761         dev = kzalloc(sizeof(struct device), GFP_KERNEL);
762         if (dev) {
763                 dev_set_name(dev, "%s", priv->internal_name);
764                 dev->bus = &iucv_bus;
765                 dev->parent = iucv_root;
766                 dev->driver = &vmlogrdr_driver;
767                 dev->groups = vmlogrdr_attr_groups;
768                 dev_set_drvdata(dev, priv);
769                 /*
770                  * The release function could be called after the
771                  * module has been unloaded. It's _only_ task is to
772                  * free the struct. Therefore, we specify kfree()
773                  * directly here. (Probably a little bit obfuscating
774                  * but legitime ...).
775                  */
776                 dev->release = (void (*)(struct device *))kfree;
777         } else
778                 return -ENOMEM;
779         ret = device_register(dev);
780         if (ret) {
781                 put_device(dev);
782                 return ret;
783         }
784
785         priv->class_device = device_create(vmlogrdr_class, dev,
786                                            MKDEV(vmlogrdr_major,
787                                                  priv->minor_num),
788                                            priv, "%s", dev_name(dev));
789         if (IS_ERR(priv->class_device)) {
790                 ret = PTR_ERR(priv->class_device);
791                 priv->class_device=NULL;
792                 device_unregister(dev);
793                 return ret;
794         }
795         priv->device = dev;
796         return 0;
797 }
798
799
800 static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
801 {
802         device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
803         if (priv->device != NULL) {
804                 device_unregister(priv->device);
805                 priv->device=NULL;
806         }
807         return 0;
808 }
809
810
811 static int vmlogrdr_register_cdev(dev_t dev)
812 {
813         int rc = 0;
814         vmlogrdr_cdev = cdev_alloc();
815         if (!vmlogrdr_cdev) {
816                 return -ENOMEM;
817         }
818         vmlogrdr_cdev->owner = THIS_MODULE;
819         vmlogrdr_cdev->ops = &vmlogrdr_fops;
820         vmlogrdr_cdev->dev = dev;
821         rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR);
822         if (!rc)
823                 return 0;
824
825         // cleanup: cdev is not fully registered, no cdev_del here!
826         kobject_put(&vmlogrdr_cdev->kobj);
827         vmlogrdr_cdev=NULL;
828         return rc;
829 }
830
831
832 static void vmlogrdr_cleanup(void)
833 {
834         int i;
835
836         if (vmlogrdr_cdev) {
837                 cdev_del(vmlogrdr_cdev);
838                 vmlogrdr_cdev=NULL;
839         }
840         for (i=0; i < MAXMINOR; ++i ) {
841                 vmlogrdr_unregister_device(&sys_ser[i]);
842                 free_page((unsigned long)sys_ser[i].buffer);
843         }
844         vmlogrdr_unregister_driver();
845         if (vmlogrdr_major) {
846                 unregister_chrdev_region(MKDEV(vmlogrdr_major, 0), MAXMINOR);
847                 vmlogrdr_major=0;
848         }
849 }
850
851
852 static int __init vmlogrdr_init(void)
853 {
854         int rc;
855         int i;
856         dev_t dev;
857
858         if (! MACHINE_IS_VM) {
859                 pr_err("not running under VM, driver not loaded.\n");
860                 return -ENODEV;
861         }
862
863         recording_class_AB = vmlogrdr_get_recording_class_AB();
864
865         rc = alloc_chrdev_region(&dev, 0, MAXMINOR, "vmlogrdr");
866         if (rc)
867                 return rc;
868         vmlogrdr_major = MAJOR(dev);
869
870         rc=vmlogrdr_register_driver();
871         if (rc)
872                 goto cleanup;
873
874         for (i=0; i < MAXMINOR; ++i ) {
875                 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
876                 if (!sys_ser[i].buffer) {
877                         rc = -ENOMEM;
878                         break;
879                 }
880                 sys_ser[i].current_position = sys_ser[i].buffer;
881                 rc=vmlogrdr_register_device(&sys_ser[i]);
882                 if (rc)
883                         break;
884         }
885         if (rc)
886                 goto cleanup;
887
888         rc = vmlogrdr_register_cdev(dev);
889         if (rc)
890                 goto cleanup;
891         return 0;
892
893 cleanup:
894         vmlogrdr_cleanup();
895         return rc;
896 }
897
898
899 static void __exit vmlogrdr_exit(void)
900 {
901         vmlogrdr_cleanup();
902         return;
903 }
904
905
906 module_init(vmlogrdr_init);
907 module_exit(vmlogrdr_exit);