On 6/22/19 2:20 AM, Yang Shi wrote:
@@ -969,10 +975,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,Hm but !PageLRU() is not the only way why queueing for migration can
/*
* page migration, thp tail pages can be passed.
*/
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
struct page *head = compound_head(page);
+
+ /*
+ * Non-movable page may reach here. And, there may be
+ * temporaty off LRU pages or non-LRU movable pages.
+ * Treat them as unmovable pages since they can't be
+ * isolated, so they can't be moved at the moment. It
+ * should return -EIO for this case too.
+ */
+ if (!PageLRU(head) && (flags & MPOL_MF_STRICT))
+ return -EIO;
+
fail, as can be seen from the rest of the function. Shouldn't all cases
be reported?
/*
* Avoid migrating a page that is shared with others.
*/
@@ -984,6 +1001,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
hpage_nr_pages(head));
}
}
+
+ return 0;
}
/* page allocation callback for NUMA node migration */
@@ -1186,9 +1205,10 @@ static struct page *new_page(struct page *page, unsigned long start)
}
#else
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
+ return -EIO;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,