blob: a90d45e9dfb0cbb7b1dce5fb8441a249dc0556c3 [file] [log] [blame]
Martin Schwidefsky45e576b2008-05-07 09:22:59 +02001/*
Martin Schwidefsky45e576b2008-05-07 09:22:59 +02002 * Copyright IBM Corp. 2008
3 *
4 * Guest page hinting for unused pages.
5 *
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/kernel.h>
10#include <linux/errno.h>
11#include <linux/types.h>
12#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/gfp.h>
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020014#include <linux/init.h>
15
16#define ESSA_SET_STABLE 1
17#define ESSA_SET_UNUSED 2
18
Heiko Carstens2ddddf32009-09-11 10:29:01 +020019static int cmma_flag = 1;
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020020
21static int __init cmma(char *str)
22{
23 char *parm;
Heiko Carstens2ddddf32009-09-11 10:29:01 +020024
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020025 parm = strstrip(str);
26 if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
27 cmma_flag = 1;
28 return 1;
29 }
30 cmma_flag = 0;
31 if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
32 return 1;
33 return 0;
34}
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020035__setup("cmma=", cmma);
36
37void __init cmma_init(void)
38{
39 register unsigned long tmp asm("0") = 0;
40 register int rc asm("1") = -EOPNOTSUPP;
41
42 if (!cmma_flag)
43 return;
44 asm volatile(
45 " .insn rrf,0xb9ab0000,%1,%1,0,0\n"
46 "0: la %0,0\n"
47 "1:\n"
48 EX_TABLE(0b,1b)
49 : "+&d" (rc), "+&d" (tmp));
50 if (rc)
51 cmma_flag = 0;
52}
53
Heiko Carstens846955c2009-09-22 22:58:44 +020054static inline void set_page_unstable(struct page *page, int order)
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020055{
56 int i, rc;
57
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020058 for (i = 0; i < (1 << order); i++)
59 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
60 : "=&d" (rc)
Heiko Carstens846955c2009-09-22 22:58:44 +020061 : "a" (page_to_phys(page + i)),
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020062 "i" (ESSA_SET_UNUSED));
63}
64
Heiko Carstens846955c2009-09-22 22:58:44 +020065void arch_free_page(struct page *page, int order)
66{
67 if (!cmma_flag)
68 return;
69 set_page_unstable(page, order);
70}
71
72static inline void set_page_stable(struct page *page, int order)
73{
74 int i, rc;
75
76 for (i = 0; i < (1 << order); i++)
77 asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
78 : "=&d" (rc)
79 : "a" (page_to_phys(page + i)),
80 "i" (ESSA_SET_STABLE));
81}
82
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020083void arch_alloc_page(struct page *page, int order)
84{
Heiko Carstens846955c2009-09-22 22:58:44 +020085 if (!cmma_flag)
86 return;
87 set_page_stable(page, order);
88}
89
90void arch_set_page_states(int make_stable)
91{
92 unsigned long flags, order, t;
93 struct list_head *l;
94 struct page *page;
95 struct zone *zone;
Martin Schwidefsky45e576b2008-05-07 09:22:59 +020096
97 if (!cmma_flag)
98 return;
Heiko Carstens846955c2009-09-22 22:58:44 +020099 if (make_stable)
100 drain_local_pages(NULL);
101 for_each_populated_zone(zone) {
102 spin_lock_irqsave(&zone->lock, flags);
103 for_each_migratetype_order(order, t) {
104 list_for_each(l, &zone->free_area[order].free_list[t]) {
105 page = list_entry(l, struct page, lru);
106 if (make_stable)
107 set_page_stable(page, order);
108 else
109 set_page_unstable(page, order);
110 }
111 }
112 spin_unlock_irqrestore(&zone->lock, flags);
113 }
Martin Schwidefsky45e576b2008-05-07 09:22:59 +0200114}