projects
/
firefly-linux-kernel-4.4.55.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mm/migrate: correct failure handling if !hugepage_migration_support()
[firefly-linux-kernel-4.4.55.git]
/
mm
/
memory.c
diff --git
a/mm/memory.c
b/mm/memory.c
index 6768ce9e57d29b6d8076b11c62c2097662f334d5..86487dfa5e59bd4ecac8c09445cce49b435e8c29 100644
(file)
--- a/
mm/memory.c
+++ b/
mm/memory.c
@@
-59,6
+59,7
@@
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
#include <linux/gfp.h>
#include <linux/migrate.h>
#include <linux/string.h>
+#include <linux/dma-debug.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/pgalloc.h>
@@
-2559,6
+2560,8
@@
static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
{
+ debug_dma_assert_idle(src);
+
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
/*
* If the source page was a PFN mapping, we don't have
* a "struct page" for it. We do a best-effort copy by
@@
-4272,11
+4275,20
@@
void copy_user_huge_page(struct page *dst, struct page *src,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
+
+static struct kmem_cache *page_ptl_cachep;
+
+void __init ptlock_cache_init(void)
+{
+ page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0,
+ SLAB_PANIC, NULL);
+}
+
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
- ptl = km
alloc(sizeof(spinlock_t)
, GFP_KERNEL);
+ ptl = km
em_cache_alloc(page_ptl_cachep
, GFP_KERNEL);
if (!ptl)
return false;
page->ptl = ptl;
if (!ptl)
return false;
page->ptl = ptl;
@@
-4285,6
+4297,6
@@
bool ptlock_alloc(struct page *page)
void ptlock_free(struct page *page)
{
void ptlock_free(struct page *page)
{
- k
free(
page->ptl);
+ k
mem_cache_free(page_ptl_cachep,
page->ptl);
}
#endif
}
#endif