vga_switcheroo: Cleanup header comment
[firefly-linux-kernel-4.4.55.git] / drivers / xen / privcmd.c
index 59ac71c4a04352d055f40435444e7ccc46debf31..5a296161d843baa5eda5282e15b198e6a6f69feb 100644 (file)
@@ -159,6 +159,40 @@ static int traverse_pages(unsigned nelem, size_t size,
        return ret;
 }
 
+/*
+ * Similar to traverse_pages, but use each page as a "block" of
+ * data to be processed as one unit.
+ */
+static int traverse_pages_block(unsigned nelem, size_t size,
+                               struct list_head *pos,
+                               int (*fn)(void *data, int nr, void *state),
+                               void *state)
+{
+       void *pagedata;
+       unsigned pageidx;
+       int ret = 0;
+
+       BUG_ON(size > PAGE_SIZE);
+
+       pageidx = PAGE_SIZE;
+
+       while (nelem) {
+               int nr = (PAGE_SIZE/size);
+               struct page *page;
+               if (nr > nelem)
+                       nr = nelem;
+               pos = pos->next;
+               page = list_entry(pos, struct page, lru);
+               pagedata = page_address(page);
+               ret = (*fn)(pagedata, nr, state);
+               if (ret)
+                       break;
+               nelem -= nr;
+       }
+
+       return ret;
+}
+
 struct mmap_mfn_state {
        unsigned long va;
        struct vm_area_struct *vma;
@@ -274,39 +308,25 @@ struct mmap_batch_state {
 /* auto translated dom0 note: if domU being created is PV, then mfn is
  * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
  */
-static int mmap_batch_fn(void *data, void *state)
+static int mmap_batch_fn(void *data, int nr, void *state)
 {
        xen_pfn_t *mfnp = data;
        struct mmap_batch_state *st = state;
        struct vm_area_struct *vma = st->vma;
        struct page **pages = vma->vm_private_data;
-       struct page *cur_page = NULL;
+       struct page **cur_pages = NULL;
        int ret;
 
        if (xen_feature(XENFEAT_auto_translated_physmap))
-               cur_page = pages[st->index++];
+               cur_pages = &pages[st->index];
 
-       ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
-                                        st->vma->vm_page_prot, st->domain,
-                                        &cur_page);
+       BUG_ON(nr < 0);
+       ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
+                                        (int *)mfnp, st->vma->vm_page_prot,
+                                        st->domain, cur_pages);
 
-       /* Store error code for second pass. */
-       if (st->version == 1) {
-               if (ret < 0) {
-                       /*
-                        * V1 encodes the error codes in the 32bit top nibble of the
-                        * mfn (with its known limitations vis-a-vis 64 bit callers).
-                        */
-                       *mfnp |= (ret == -ENOENT) ?
-                                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
-                                               PRIVCMD_MMAPBATCH_MFN_ERROR;
-               }
-       } else { /* st->version == 2 */
-               *((int *) mfnp) = ret;
-       }
-
-       /* And see if it affects the global_error. */
-       if (ret < 0) {
+       /* Adjust the global_error? */
+       if (ret != nr) {
                if (ret == -ENOENT)
                        st->global_error = -ENOENT;
                else {
@@ -315,23 +335,35 @@ static int mmap_batch_fn(void *data, void *state)
                                st->global_error = 1;
                }
        }
-       st->va += PAGE_SIZE;
+       st->va += PAGE_SIZE * nr;
+       st->index += nr;
 
        return 0;
 }
 
-static int mmap_return_errors(void *data, void *state)
+static int mmap_return_error(int err, struct mmap_batch_state *st)
 {
-       struct mmap_batch_state *st = state;
+       int ret;
 
        if (st->version == 1) {
-               xen_pfn_t mfnp = *((xen_pfn_t *) data);
-               if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
-                       return __put_user(mfnp, st->user_mfn++);
-               else
+               if (err) {
+                       xen_pfn_t mfn;
+
+                       ret = get_user(mfn, st->user_mfn);
+                       if (ret < 0)
+                               return ret;
+                       /*
+                        * V1 encodes the error codes in the 32bit top
+                        * nibble of the mfn (with its known
+                        * limitations vis-a-vis 64 bit callers).
+                        */
+                       mfn |= (err == -ENOENT) ?
+                               PRIVCMD_MMAPBATCH_PAGED_ERROR :
+                               PRIVCMD_MMAPBATCH_MFN_ERROR;
+                       return __put_user(mfn, st->user_mfn++);
+               } else
                        st->user_mfn++;
        } else { /* st->version == 2 */
-               int err = *((int *) data);
                if (err)
                        return __put_user(err, st->user_err++);
                else
@@ -341,6 +373,21 @@ static int mmap_return_errors(void *data, void *state)
        return 0;
 }
 
+static int mmap_return_errors(void *data, int nr, void *state)
+{
+       struct mmap_batch_state *st = state;
+       int *errs = data;
+       int i;
+       int ret;
+
+       for (i = 0; i < nr; i++) {
+               ret = mmap_return_error(errs[i], st);
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
 /* Allocate pfns that are then mapped with gmfns from foreign domid. Update
  * the vma with the page info to use later.
  * Returns: 0 if success, otherwise -errno
@@ -472,8 +519,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        state.version       = version;
 
        /* mmap_batch_fn guarantees ret == 0 */
-       BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
-                            &pagelist, mmap_batch_fn, &state));
+       BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
+                                   &pagelist, mmap_batch_fn, &state));
 
        up_write(&mm->mmap_sem);
 
@@ -481,8 +528,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
                /* Write back errors in second pass. */
                state.user_mfn = (xen_pfn_t *)m.arr;
                state.user_err = m.err;
-               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                                                        &pagelist, mmap_return_errors, &state);
+               ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
+                                          &pagelist, mmap_return_errors, &state);
        } else
                ret = 0;