ARM64: DTS: Add rk3399-firefly uart4 device, node as /dev/ttyS1
[firefly-linux-kernel-4.4.55.git] / drivers / platform / goldfish / goldfish_pipe.c
1 /*
2  * Copyright (C) 2011 Google, Inc.
3  * Copyright (C) 2012 Intel, Inc.
4  * Copyright (C) 2013 Intel, Inc.
5  * Copyright (C) 2014 Linaro Limited
6  *
7  * This software is licensed under the terms of the GNU General Public
8  * License version 2, as published by the Free Software Foundation, and
9  * may be copied, distributed, and modified under those terms.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  */
17
18 /* This source file contains the implementation of the legacy version of
19  * a goldfish pipe device driver. See goldfish_pipe_v2.c for the current
20  * version.
21  */
22 #include "goldfish_pipe.h"
23
24 /*
25  * IMPORTANT: The following constants must match the ones used and defined
26  * in external/qemu/hw/goldfish_pipe.c in the Android source tree.
27  */
28
29 /* pipe device registers */
30 #define PIPE_REG_COMMAND                0x00  /* write: value = command */
31 #define PIPE_REG_STATUS                 0x04  /* read */
32 #define PIPE_REG_CHANNEL                0x08  /* read/write: channel id */
33 #define PIPE_REG_CHANNEL_HIGH           0x30  /* read/write: channel id */
34 #define PIPE_REG_SIZE                   0x0c  /* read/write: buffer size */
35 #define PIPE_REG_ADDRESS                0x10  /* write: physical address */
36 #define PIPE_REG_ADDRESS_HIGH           0x34  /* write: physical address */
37 #define PIPE_REG_WAKES                  0x14  /* read: wake flags */
38 #define PIPE_REG_PARAMS_ADDR_LOW        0x18  /* read/write: batch data address */
39 #define PIPE_REG_PARAMS_ADDR_HIGH       0x1c  /* read/write: batch data address */
40 #define PIPE_REG_ACCESS_PARAMS          0x20  /* write: batch access */
41 #define PIPE_REG_VERSION                0x24  /* read: device version */
42
43 /* list of commands for PIPE_REG_COMMAND */
44 #define CMD_OPEN                        1  /* open new channel */
45 #define CMD_CLOSE                       2  /* close channel (from guest) */
46 #define CMD_POLL                        3  /* poll read/write status */
47
48 /* List of bitflags returned in status of CMD_POLL command */
49 #define PIPE_POLL_IN                    (1 << 0)
50 #define PIPE_POLL_OUT                   (1 << 1)
51 #define PIPE_POLL_HUP                   (1 << 2)
52
53 /* The following commands are related to write operations */
54 #define CMD_WRITE_BUFFER        4  /* send a user buffer to the emulator */
55 #define CMD_WAKE_ON_WRITE       5  /* tell the emulator to wake us when writing
56                                      is possible */
57 #define CMD_READ_BUFFER        6  /* receive a user buffer from the emulator */
58 #define CMD_WAKE_ON_READ       7  /* tell the emulator to wake us when reading
59                                    * is possible */
60
61 /* Possible status values used to signal errors - see goldfish_pipe_error_convert */
62 #define PIPE_ERROR_INVAL       -1
63 #define PIPE_ERROR_AGAIN       -2
64 #define PIPE_ERROR_NOMEM       -3
65 #define PIPE_ERROR_IO          -4
66
67 /* Bit-flags used to signal events from the emulator */
68 #define PIPE_WAKE_CLOSED       (1 << 0)  /* emulator closed pipe */
69 #define PIPE_WAKE_READ         (1 << 1)  /* pipe can now be read from */
70 #define PIPE_WAKE_WRITE        (1 << 2)  /* pipe can now be written to */
71
72 #define MAX_PAGES_TO_GRAB 32
73
74 #define DEBUG 0
75
76 #if DEBUG
77 #define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); }
78 #else
79 #define DPRINT(...)
80 #endif
81
82 /* This data type models a given pipe instance */
83 struct goldfish_pipe {
84         struct goldfish_pipe_dev *dev;
85         struct mutex lock;
86         unsigned long flags;
87         wait_queue_head_t wake_queue;
88 };
89
90 struct access_params {
91         unsigned long channel;
92         u32 size;
93         unsigned long address;
94         u32 cmd;
95         u32 result;
96         /* reserved for future extension */
97         u32 flags;
98 };
99
100 /* Bit flags for the 'flags' field */
101 enum {
102         BIT_CLOSED_ON_HOST = 0,  /* pipe closed by host */
103         BIT_WAKE_ON_WRITE  = 1,  /* want to be woken on writes */
104         BIT_WAKE_ON_READ   = 2,  /* want to be woken on reads */
105 };
106
107
108 static u32 goldfish_cmd_status(struct goldfish_pipe *pipe, u32 cmd)
109 {
110         unsigned long flags;
111         u32 status;
112         struct goldfish_pipe_dev *dev = pipe->dev;
113
114         spin_lock_irqsave(&dev->lock, flags);
115         gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
116                      dev->base + PIPE_REG_CHANNEL_HIGH);
117         writel(cmd, dev->base + PIPE_REG_COMMAND);
118         status = readl(dev->base + PIPE_REG_STATUS);
119         spin_unlock_irqrestore(&dev->lock, flags);
120         return status;
121 }
122
123 static void goldfish_cmd(struct goldfish_pipe *pipe, u32 cmd)
124 {
125         unsigned long flags;
126         struct goldfish_pipe_dev *dev = pipe->dev;
127
128         spin_lock_irqsave(&dev->lock, flags);
129         gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
130                      dev->base + PIPE_REG_CHANNEL_HIGH);
131         writel(cmd, dev->base + PIPE_REG_COMMAND);
132         spin_unlock_irqrestore(&dev->lock, flags);
133 }
134
135 /* This function converts an error code returned by the emulator through
136  * the PIPE_REG_STATUS i/o register into a valid negative errno value.
137  */
138 static int goldfish_pipe_error_convert(int status)
139 {
140         switch (status) {
141         case PIPE_ERROR_AGAIN:
142                 return -EAGAIN;
143         case PIPE_ERROR_NOMEM:
144                 return -ENOMEM;
145         case PIPE_ERROR_IO:
146                 return -EIO;
147         default:
148                 return -EINVAL;
149         }
150 }
151
152 /*
153  * Notice: QEMU will return 0 for un-known register access, indicating
154  * param_acess is supported or not
155  */
156 static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev,
157                                   struct access_params *aps)
158 {
159         u32 aph, apl;
160         u64 paddr;
161         aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
162         apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW);
163
164         paddr = ((u64)aph << 32) | apl;
165         if (paddr != (__pa(aps)))
166                 return 0;
167         return 1;
168 }
169
170 /* 0 on success */
171 static int setup_access_params_addr(struct platform_device *pdev,
172                                         struct goldfish_pipe_dev *dev)
173 {
174         u64 paddr;
175         struct access_params *aps;
176
177         aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL);
178         if (!aps)
179                 return -1;
180
181         /* FIXME */
182         paddr = __pa(aps);
183         writel((u32)(paddr >> 32), dev->base + PIPE_REG_PARAMS_ADDR_HIGH);
184         writel((u32)paddr, dev->base + PIPE_REG_PARAMS_ADDR_LOW);
185
186         if (valid_batchbuffer_addr(dev, aps)) {
187                 dev->aps = aps;
188                 return 0;
189         } else {
190                 devm_kfree(&pdev->dev, aps);
191                 return -1;
192         }
193 }
194
195 /* A value that will not be set by qemu emulator */
196 #define INITIAL_BATCH_RESULT (0xdeadbeaf)
197 static int access_with_param(struct goldfish_pipe_dev *dev, const int cmd,
198                                 unsigned long address, unsigned long avail,
199                                 struct goldfish_pipe *pipe, int *status)
200 {
201         struct access_params *aps = dev->aps;
202
203         if (aps == NULL)
204                 return -1;
205
206         aps->result = INITIAL_BATCH_RESULT;
207         aps->channel = (unsigned long)pipe;
208         aps->size = avail;
209         aps->address = address;
210         aps->cmd = cmd;
211         writel(cmd, dev->base + PIPE_REG_ACCESS_PARAMS);
212         /*
213          * If the aps->result has not changed, that means
214          * that the batch command failed
215          */
216         if (aps->result == INITIAL_BATCH_RESULT)
217                 return -1;
218         *status = aps->result;
219         return 0;
220 }
221
222 static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
223                                     size_t bufflen, int is_write)
224 {
225         unsigned long irq_flags;
226         struct goldfish_pipe *pipe = filp->private_data;
227         struct goldfish_pipe_dev *dev = pipe->dev;
228         unsigned long address, address_end;
229         struct page* pages[MAX_PAGES_TO_GRAB] = {};
230         int count = 0, ret = -EINVAL;
231
232         /* If the emulator already closed the pipe, no need to go further */
233         if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
234                 return -EIO;
235
236         /* Null reads or writes succeeds */
237         if (unlikely(bufflen == 0))
238                 return 0;
239
240         /* Check the buffer range for access */
241         if (!access_ok(is_write ? VERIFY_WRITE : VERIFY_READ,
242                         buffer, bufflen))
243                 return -EFAULT;
244
245         /* Serialize access to the pipe */
246         if (mutex_lock_interruptible(&pipe->lock))
247                 return -ERESTARTSYS;
248
249         address = (unsigned long)(void *)buffer;
250         address_end = address + bufflen;
251
252         while (address < address_end) {
253                 unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
254                 unsigned long next, avail;
255                 int status, wakeBit, page_i, num_contiguous_pages;
256                 long first_page, last_page, requested_pages;
257                 unsigned long xaddr, xaddr_prev, xaddr_i;
258
259                 /*
260                  * Attempt to grab multiple physically contiguous pages.
261                  */
262                 first_page = address & PAGE_MASK;
263                 last_page = (address_end - 1) & PAGE_MASK;
264                 requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1;
265                 if (requested_pages > MAX_PAGES_TO_GRAB) {
266                         requested_pages = MAX_PAGES_TO_GRAB;
267                 }
268                 ret = get_user_pages_fast(first_page, requested_pages,
269                                 !is_write, pages);
270
271                 DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__,
272                         ret, requested_pages, first_page);
273                 if (ret == 0) {
274                         DPRINT("%s: error: (requested pages == 0) (wanted %d)\n",
275                                         __FUNCTION__, requested_pages);
276                         mutex_unlock(&pipe->lock);
277                         return ret;
278                 }
279                 if (ret < 0) {
280                         DPRINT("%s: (requested pages < 0) %d \n",
281                                         __FUNCTION__, requested_pages);
282                         mutex_unlock(&pipe->lock);
283                         return ret;
284                 }
285
286                 xaddr = page_to_phys(pages[0]) | (address & ~PAGE_MASK);
287                 xaddr_prev = xaddr;
288                 num_contiguous_pages = ret == 0 ? 0 : 1;
289                 for (page_i = 1; page_i < ret; page_i++) {
290                         xaddr_i = page_to_phys(pages[page_i]) | (address & ~PAGE_MASK);
291                         if (xaddr_i == xaddr_prev + PAGE_SIZE) {
292                                 page_end += PAGE_SIZE;
293                                 xaddr_prev = xaddr_i;
294                                 num_contiguous_pages++;
295                         } else {
296                                 DPRINT("%s: discontinuous page boundary: %d pages instead\n",
297                                                 __FUNCTION__, page_i);
298                                 break;
299                         }
300                 }
301                 next = page_end < address_end ? page_end : address_end;
302                 avail = next - address;
303
304                 /* Now, try to transfer the bytes in the current page */
305                 spin_lock_irqsave(&dev->lock, irq_flags);
306                 if (access_with_param(dev,
307                                         is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
308                                         xaddr, avail, pipe, &status)) {
309                         gf_write_ptr(pipe, dev->base + PIPE_REG_CHANNEL,
310                                      dev->base + PIPE_REG_CHANNEL_HIGH);
311                         writel(avail, dev->base + PIPE_REG_SIZE);
312                         gf_write_ptr((void *)xaddr,
313                                      dev->base + PIPE_REG_ADDRESS,
314                                      dev->base + PIPE_REG_ADDRESS_HIGH);
315                         writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
316                                dev->base + PIPE_REG_COMMAND);
317                         status = readl(dev->base + PIPE_REG_STATUS);
318                 }
319                 spin_unlock_irqrestore(&dev->lock, irq_flags);
320
321                 for (page_i = 0; page_i < ret; page_i++) {
322                         if (status > 0 && !is_write &&
323                                 page_i < num_contiguous_pages) {
324                                 set_page_dirty(pages[page_i]);
325                         }
326                         put_page(pages[page_i]);
327                 }
328
329                 if (status > 0) { /* Correct transfer */
330                         count += status;
331                         address += status;
332                         continue;
333                 } else if (status == 0) { /* EOF */
334                         ret = 0;
335                         break;
336                 } else if (status < 0 && count > 0) {
337                         /*
338                          * An error occured and we already transfered
339                          * something on one of the previous pages.
340                          * Just return what we already copied and log this
341                          * err.
342                          *
343                          * Note: This seems like an incorrect approach but
344                          * cannot change it until we check if any user space
345                          * ABI relies on this behavior.
346                          */
347                         if (status != PIPE_ERROR_AGAIN)
348                                 pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
349                                                 status, is_write ? "write" : "read");
350                         ret = 0;
351                         break;
352                 }
353
354                 /*
355                  * If the error is not PIPE_ERROR_AGAIN, or if we are not in
356                  * non-blocking mode, just return the error code.
357                  */
358                 if (status != PIPE_ERROR_AGAIN ||
359                                 (filp->f_flags & O_NONBLOCK) != 0) {
360                         ret = goldfish_pipe_error_convert(status);
361                         break;
362                 }
363
364                 /*
365                  * The backend blocked the read/write, wait until the backend
366                  * tells us it's ready to process more data.
367                  */
368                 wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
369                 set_bit(wakeBit, &pipe->flags);
370
371                 /* Tell the emulator we're going to wait for a wake event */
372                 goldfish_cmd(pipe,
373                                 is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
374
375                 /* Unlock the pipe, then wait for the wake signal */
376                 mutex_unlock(&pipe->lock);
377
378                 while (test_bit(wakeBit, &pipe->flags)) {
379                         if (wait_event_interruptible(
380                                         pipe->wake_queue,
381                                         !test_bit(wakeBit, &pipe->flags)))
382                                 return -ERESTARTSYS;
383
384                         if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
385                                 return -EIO;
386                 }
387
388                 /* Try to re-acquire the lock */
389                 if (mutex_lock_interruptible(&pipe->lock))
390                         return -ERESTARTSYS;
391         }
392         mutex_unlock(&pipe->lock);
393
394         if (ret < 0)
395                 return ret;
396         else
397                 return count;
398 }
399
400 static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
401                               size_t bufflen, loff_t *ppos)
402 {
403         return goldfish_pipe_read_write(filp, buffer, bufflen, 0);
404 }
405
406 static ssize_t goldfish_pipe_write(struct file *filp,
407                                 const char __user *buffer, size_t bufflen,
408                                 loff_t *ppos)
409 {
410         return goldfish_pipe_read_write(filp, (char __user *)buffer,
411                                                                 bufflen, 1);
412 }
413
414
415 static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait)
416 {
417         struct goldfish_pipe *pipe = filp->private_data;
418         unsigned int mask = 0;
419         int status;
420
421         mutex_lock(&pipe->lock);
422
423         poll_wait(filp, &pipe->wake_queue, wait);
424
425         status = goldfish_cmd_status(pipe, CMD_POLL);
426
427         mutex_unlock(&pipe->lock);
428
429         if (status & PIPE_POLL_IN)
430                 mask |= POLLIN | POLLRDNORM;
431
432         if (status & PIPE_POLL_OUT)
433                 mask |= POLLOUT | POLLWRNORM;
434
435         if (status & PIPE_POLL_HUP)
436                 mask |= POLLHUP;
437
438         if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
439                 mask |= POLLERR;
440
441         return mask;
442 }
443
444 static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id)
445 {
446         struct goldfish_pipe_dev *dev = dev_id;
447         unsigned long irq_flags;
448         int count = 0;
449
450         /*
451          * We're going to read from the emulator a list of (channel,flags)
452          * pairs corresponding to the wake events that occured on each
453          * blocked pipe (i.e. channel).
454          */
455         spin_lock_irqsave(&dev->lock, irq_flags);
456         for (;;) {
457                 /* First read the channel, 0 means the end of the list */
458                 struct goldfish_pipe *pipe;
459                 unsigned long wakes;
460                 unsigned long channel = 0;
461
462 #ifdef CONFIG_64BIT
463                 channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
464
465                 if (channel == 0)
466                         break;
467 #endif
468                 channel |= readl(dev->base + PIPE_REG_CHANNEL);
469
470                 if (channel == 0)
471                         break;
472
473                 /* Convert channel to struct pipe pointer + read wake flags */
474                 wakes = readl(dev->base + PIPE_REG_WAKES);
475                 pipe  = (struct goldfish_pipe *)(ptrdiff_t)channel;
476
477                 /* Did the emulator just closed a pipe? */
478                 if (wakes & PIPE_WAKE_CLOSED) {
479                         set_bit(BIT_CLOSED_ON_HOST, &pipe->flags);
480                         wakes |= PIPE_WAKE_READ | PIPE_WAKE_WRITE;
481                 }
482                 if (wakes & PIPE_WAKE_READ)
483                         clear_bit(BIT_WAKE_ON_READ, &pipe->flags);
484                 if (wakes & PIPE_WAKE_WRITE)
485                         clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags);
486
487                 wake_up_interruptible(&pipe->wake_queue);
488                 count++;
489         }
490         spin_unlock_irqrestore(&dev->lock, irq_flags);
491
492         return (count == 0) ? IRQ_NONE : IRQ_HANDLED;
493 }
494
495 /**
496  *      goldfish_pipe_open      -       open a channel to the AVD
497  *      @inode: inode of device
498  *      @file: file struct of opener
499  *
500  *      Create a new pipe link between the emulator and the use application.
501  *      Each new request produces a new pipe.
502  *
503  *      Note: we use the pipe ID as a mux. All goldfish emulations are 32bit
504  *      right now so this is fine. A move to 64bit will need this addressing
505  */
506 static int goldfish_pipe_open(struct inode *inode, struct file *file)
507 {
508         struct goldfish_pipe *pipe;
509         struct goldfish_pipe_dev *dev = pipe_dev;
510         int32_t status;
511
512         /* Allocate new pipe kernel object */
513         pipe = kzalloc(sizeof(*pipe), GFP_KERNEL);
514         if (pipe == NULL)
515                 return -ENOMEM;
516
517         pipe->dev = dev;
518         mutex_init(&pipe->lock);
519         DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file);
520         // spin lock init, write head of list, i guess
521         init_waitqueue_head(&pipe->wake_queue);
522
523         /*
524          * Now, tell the emulator we're opening a new pipe. We use the
525          * pipe object's address as the channel identifier for simplicity.
526          */
527
528         status = goldfish_cmd_status(pipe, CMD_OPEN);
529         if (status < 0) {
530                 kfree(pipe);
531                 return status;
532         }
533
534         /* All is done, save the pipe into the file's private data field */
535         file->private_data = pipe;
536         return 0;
537 }
538
539 static int goldfish_pipe_release(struct inode *inode, struct file *filp)
540 {
541         struct goldfish_pipe *pipe = filp->private_data;
542
543         DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp);
544         /* The guest is closing the channel, so tell the emulator right now */
545         goldfish_cmd(pipe, CMD_CLOSE);
546         kfree(pipe);
547         filp->private_data = NULL;
548         return 0;
549 }
550
551 static const struct file_operations goldfish_pipe_fops = {
552         .owner = THIS_MODULE,
553         .read = goldfish_pipe_read,
554         .write = goldfish_pipe_write,
555         .poll = goldfish_pipe_poll,
556         .open = goldfish_pipe_open,
557         .release = goldfish_pipe_release,
558 };
559
560 static struct miscdevice goldfish_pipe_dev = {
561         .minor = MISC_DYNAMIC_MINOR,
562         .name = "goldfish_pipe",
563         .fops = &goldfish_pipe_fops,
564 };
565
566 int goldfish_pipe_device_init_v1(struct platform_device *pdev)
567 {
568         struct goldfish_pipe_dev *dev = pipe_dev;
569         int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt,
570                                 IRQF_SHARED, "goldfish_pipe", dev);
571         if (err) {
572                 dev_err(&pdev->dev, "unable to allocate IRQ for v1\n");
573                 return err;
574         }
575
576         err = misc_register(&goldfish_pipe_dev);
577         if (err) {
578                 dev_err(&pdev->dev, "unable to register v1 device\n");
579                 return err;
580         }
581
582         setup_access_params_addr(pdev, dev);
583         return 0;
584 }
585
586 void goldfish_pipe_device_deinit_v1(struct platform_device *pdev)
587 {
588     misc_deregister(&goldfish_pipe_dev);
589 }