aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/zram/zram_drv.c1
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/swap.h20
-rw-r--r--mm/memory.c3
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/vmscan.c2
6 files changed, 52 insertions, 9 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 803e7a2..7040165 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -936,6 +936,7 @@ static int create_device(struct zram *zram, int device_id)
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
+ __set_bit(QUEUE_FLAG_FAST, &zram->queue->queue_flags);
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b13021..af75d16 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -403,6 +403,7 @@ struct request_queue
#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
+#define QUEUE_FLAG_FAST 20 /* fast block device (e.g. ram based) */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -487,6 +488,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
+#define blk_queue_fast(q) test_bit(QUEUE_FLAG_FAST, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e73799d..be5eecc 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,6 +150,7 @@ enum {
SWP_BLKDEV = (1 << 6), /* its a block device */
/* add others here before... */
SWP_SCANNING = (1 << 8), /* refcount in scan_swap_map */
+ SWP_FAST = (1 << 9), /* blkdev access is fast and cheap */
};
#define SWAP_CLUSTER_MAX 32
@@ -201,9 +202,6 @@ struct swap_list_t {
int next; /* swapfile to be used next */
};
-/* Swap 50% full? Release swapcache more aggressively.. */
-#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
-
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
@@ -320,6 +318,21 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
/* linux/mm/swapfile.c */
extern long nr_swap_pages;
extern long total_swap_pages;
+extern bool is_swap_fast(swp_entry_t entry);
+
+/* Swap 50% full? Release swapcache more aggressively.. */
+static inline bool vm_swap_full(struct swap_info_struct *si)
+{
+ /*
+ * If the swap device is fast, return true
+ * not to delay swap free.
+ */
+ if (si->flags & SWP_FAST)
+ return true;
+
+ return nr_swap_pages*2 < total_swap_pages;
+}
+
extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(void);
extern swp_entry_t get_swap_page_of_type(int);
@@ -379,6 +392,7 @@ static inline void mem_cgroup_uncharge_swap(swp_entry_t ent)
#define nr_swap_pages 0L
#define total_swap_pages 0L
#define total_swapcache_pages 0UL
+#define vm_swap_full(si) 0
#define si_swapinfo(val) \
do { (val)->freeswap = (val)->totalswap = 0; } while (0)
diff --git a/mm/memory.c b/mm/memory.c
index 5331e67..067a4f3 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3140,7 +3140,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
- if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
+ (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (swapcache) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c8f4338..7197864 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -69,6 +69,26 @@ static inline unsigned char swap_count(unsigned char ent)
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
+bool is_swap_fast(swp_entry_t entry)
+{
+ struct swap_info_struct *p;
+ unsigned long type;
+
+ if (non_swap_entry(entry))
+ return false;
+
+ type = swp_type(entry);
+ if (type >= nr_swapfiles)
+ return false;
+
+ p = swap_info[type];
+
+ if (p->flags & SWP_FAST)
+ return true;
+
+ return false;
+}
+
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
@@ -289,7 +309,7 @@ checks:
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -378,7 +398,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -393,7 +414,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -708,7 +730,8 @@ int free_swap_and_cache(swp_entry_t entry)
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
- (!page_mapped(page) || vm_swap_full())) {
+ (!page_mapped(page) ||
+ vm_swap_full(page_swap_info(page)))) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
@@ -2114,6 +2137,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
p->flags |= SWP_DISCARDABLE;
+ if (blk_queue_fast(bdev_get_queue(p->bdev)))
+ p->flags |= SWP_FAST;
}
mutex_lock(&swapon_mutex);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 08f11e2..9b72c26 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -985,7 +985,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
- if (PageSwapCache(page) && vm_swap_full())
+ if (PageSwapCache(page) && vm_swap_full(page_swap_info(page)))
try_to_free_swap(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);