blob: 2592883d862db8bb857e2bec5439e86219918079 [file] [log] [blame]
Mel Gorman748446b2010-05-24 14:32:27 -07001#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H
3
Mel Gorman56de7262010-05-24 14:32:30 -07004/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0
7/* compaction should continue to another pageblock */
8#define COMPACT_CONTINUE 1
9/* direct compaction partially compacted a zone and there are suitable pages */
10#define COMPACT_PARTIAL 2
11/* The full zone was compacted */
12#define COMPACT_COMPLETE 3
Mel Gorman748446b2010-05-24 14:32:27 -070013
Mel Gorman76ab0f52010-05-24 14:32:28 -070014#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory;
16extern int sysctl_compaction_handler(struct ctl_table *table, int write,
17 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman5e771902010-05-24 14:32:31 -070018extern int sysctl_extfrag_threshold;
19extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
20 void __user *buffer, size_t *length, loff_t *ppos);
Mel Gorman56de7262010-05-24 14:32:30 -070021
22extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask);
Mel Gorman3e7d3442011-01-13 15:45:56 -080025extern unsigned long compaction_suitable(struct zone *zone, int order);
26extern unsigned long compact_zone_order(struct zone *zone, int order,
27 gfp_t gfp_mask);
Mel Gorman4f92e252010-05-24 14:32:32 -070028
29/* Do not skip compaction more than 64 times */
30#define COMPACT_MAX_DEFER_SHIFT 6
31
32/*
33 * Compaction is deferred when compaction fails to result in a page
34 * allocation success. 1 << compact_defer_limit compactions are skipped up
35 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
36 */
37static inline void defer_compaction(struct zone *zone)
38{
39 zone->compact_considered = 0;
40 zone->compact_defer_shift++;
41
42 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
43 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
44}
45
46/* Returns true if compaction should be skipped this time */
47static inline bool compaction_deferred(struct zone *zone)
48{
49 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
50
51 /* Avoid possible overflow */
52 if (++zone->compact_considered > defer_limit)
53 zone->compact_considered = defer_limit;
54
55 return zone->compact_considered < (1UL << zone->compact_defer_shift);
56}
57
Mel Gorman56de7262010-05-24 14:32:30 -070058#else
59static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
60 int order, gfp_t gfp_mask, nodemask_t *nodemask)
61{
62 return COMPACT_CONTINUE;
63}
64
Mel Gorman3e7d3442011-01-13 15:45:56 -080065static inline unsigned long compaction_suitable(struct zone *zone, int order)
66{
67 return COMPACT_SKIPPED;
68}
69
70static inline unsigned long compact_zone_order(struct zone *zone, int order,
71 gfp_t gfp_mask)
72{
73 return 0;
74}
75
Mel Gorman4f92e252010-05-24 14:32:32 -070076static inline void defer_compaction(struct zone *zone)
77{
78}
79
80static inline bool compaction_deferred(struct zone *zone)
81{
82 return 1;
83}
84
Mel Gorman76ab0f52010-05-24 14:32:28 -070085#endif /* CONFIG_COMPACTION */
86
Mel Gormaned4a6d72010-05-24 14:32:29 -070087#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
88extern int compaction_register_node(struct node *node);
89extern void compaction_unregister_node(struct node *node);
90
91#else
92
93static inline int compaction_register_node(struct node *node)
94{
95 return 0;
96}
97
98static inline void compaction_unregister_node(struct node *node)
99{
100}
101#endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
102
Mel Gorman748446b2010-05-24 14:32:27 -0700103#endif /* _LINUX_COMPACTION_H */