projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
staging: xillybus: Fixes related to "rc" variable
[firefly-linux-kernel-4.4.55.git]
/
mm
/
vmalloc.c
diff --git
a/mm/vmalloc.c
b/mm/vmalloc.c
index f64632b671964a0788b43e8d30ae0edb7b292292..2b0aa5486092dca2745c2ec201cda44db033550c 100644
(file)
--- a/
mm/vmalloc.c
+++ b/
mm/vmalloc.c
@@
-1270,19
+1270,15
@@
void unmap_kernel_range(unsigned long addr, unsigned long size)
}
EXPORT_SYMBOL_GPL(unmap_kernel_range);
}
EXPORT_SYMBOL_GPL(unmap_kernel_range);
-int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **
*
pages)
+int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
{
unsigned long addr = (unsigned long)area->addr;
unsigned long end = addr + get_vm_area_size(area);
int err;
{
unsigned long addr = (unsigned long)area->addr;
unsigned long end = addr + get_vm_area_size(area);
int err;
- err = vmap_page_range(addr, end, prot, *pages);
- if (err > 0) {
- *pages += err;
- err = 0;
- }
+ err = vmap_page_range(addr, end, prot, pages);
- return err;
+ return err
> 0 ? 0 : err
;
}
EXPORT_SYMBOL_GPL(map_vm_area);
}
EXPORT_SYMBOL_GPL(map_vm_area);
@@
-1548,7
+1544,7
@@
void *vmap(struct page **pages, unsigned int count,
if (!area)
return NULL;
if (!area)
return NULL;
- if (map_vm_area(area, prot,
&
pages)) {
+ if (map_vm_area(area, prot, pages)) {
vunmap(area->addr);
return NULL;
}
vunmap(area->addr);
return NULL;
}
@@
-1566,7
+1562,8
@@
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
const int order = 0;
struct page **pages;
unsigned int nr_pages, array_size, i;
- gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
+ const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
+ const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
array_size = (nr_pages * sizeof(struct page *));
@@
-1589,12
+1586,11
@@
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
for (i = 0; i < area->nr_pages; i++) {
struct page *page;
- gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
if (node == NUMA_NO_NODE)
if (node == NUMA_NO_NODE)
- page = alloc_page(
tmp
_mask);
+ page = alloc_page(
alloc
_mask);
else
else
- page = alloc_pages_node(node,
tmp
_mask, order);
+ page = alloc_pages_node(node,
alloc
_mask, order);
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
if (unlikely(!page)) {
/* Successfully allocated i pages, free them in __vunmap() */
@@
-1602,9
+1598,11
@@
static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
goto fail;
}
area->pages[i] = page;
goto fail;
}
area->pages[i] = page;
+ if (gfp_mask & __GFP_WAIT)
+ cond_resched();
}
}
- if (map_vm_area(area, prot,
&
pages))
+ if (map_vm_area(area, prot, pages))
goto fail;
return area->addr;
goto fail;
return area->addr;
@@
-2690,14
+2688,14
@@
void get_vmalloc_info(struct vmalloc_info *vmi)
prev_end = VMALLOC_START;
prev_end = VMALLOC_START;
-
spin_lock(&vmap_area_lock
);
+
rcu_read_lock(
);
if (list_empty(&vmap_area_list)) {
vmi->largest_chunk = VMALLOC_TOTAL;
goto out;
}
if (list_empty(&vmap_area_list)) {
vmi->largest_chunk = VMALLOC_TOTAL;
goto out;
}
- list_for_each_entry(va, &vmap_area_list, list) {
+ list_for_each_entry
_rcu
(va, &vmap_area_list, list) {
unsigned long addr = va->va_start;
/*
unsigned long addr = va->va_start;
/*
@@
-2724,7
+2722,7
@@
void get_vmalloc_info(struct vmalloc_info *vmi)
vmi->largest_chunk = VMALLOC_END - prev_end;
out:
vmi->largest_chunk = VMALLOC_END - prev_end;
out:
-
spin_unlock(&vmap_area_lock
);
+
rcu_read_unlock(
);
}
#endif
}
#endif