![[LWN Logo]](/images/lcorner.png) |
|
![[LWN.net]](/images/Included.png) |
From: Linus Torvalds <torvalds@transmeta.com>
To: Ben LaHaise <bcrl@redhat.com>
Subject: Re: [RFC][DATA] re "ongoing vm suckage"
Date: Fri, 3 Aug 2001 20:48:03 -0700 (PDT)
Cc: Daniel Phillips <phillips@bonn-fries.net>,
Rik van Riel <riel@conectiva.com.br>,
<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>
You might try this approach instead, which just removes the thing that
might deadlock and always is unfair..
(Ugh, I hate attachements, but the system I'm sending this from has this
broken version of 'pine' that will mess up white-space).
For nicer interactive behaviour while flushing things out, the
inode_fsync() thing should really use "write_locked_buffers()". That's a
separate patch, though.
Linus
diff -u --recursive --new-file pre3/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
--- pre3/linux/drivers/block/ll_rw_blk.c Thu Jul 19 20:51:23 2001
+++ linux/drivers/block/ll_rw_blk.c Fri Aug 3 20:28:43 2001
@@ -119,17 +119,10 @@
int * max_sectors[MAX_BLKDEV];
/*
- * queued sectors for all devices, used to make sure we don't fill all
- * of memory with locked buffers
+ * How many reqeusts do we allocate per queue,
+ * and how many do we "batch" on freeing them?
*/
-atomic_t queued_sectors;
-
-/*
- * high and low watermark for above
- */
-static int high_queued_sectors, low_queued_sectors;
-static int batch_requests, queue_nr_requests;
-static DECLARE_WAIT_QUEUE_HEAD(blk_buffers_wait);
+static int queue_nr_requests, batch_requests;
static inline int get_max_sectors(kdev_t dev)
{
@@ -592,13 +585,6 @@
*/
if (q) {
/*
- * we've released enough buffers to start I/O again
- */
- if (waitqueue_active(&blk_buffers_wait)
- && atomic_read(&queued_sectors) < low_queued_sectors)
- wake_up(&blk_buffers_wait);
-
- /*
* Add to pending free list and batch wakeups
*/
list_add(&req->table, &q->pending_freelist[rw]);
@@ -1032,16 +1018,6 @@
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
- /*
- * don't lock any more buffers if we are above the high
- * water mark. instead start I/O on the queued stuff.
- */
- if (atomic_read(&queued_sectors) >= high_queued_sectors) {
- run_task_queue(&tq_disk);
- wait_event(blk_buffers_wait,
- atomic_read(&queued_sectors) < low_queued_sectors);
- }
-
/* Only one thread can actually submit the I/O. */
if (test_and_set_bit(BH_Lock, &bh->b_state))
continue;
@@ -1168,26 +1144,9 @@
memset(max_readahead, 0, sizeof(max_readahead));
memset(max_sectors, 0, sizeof(max_sectors));
- atomic_set(&queued_sectors, 0);
total_ram = nr_free_pages() << (PAGE_SHIFT - 10);
/*
- * Try to keep 128MB max hysteris. If not possible,
- * use half of RAM
- */
- high_queued_sectors = (total_ram * 2) / 3;
- low_queued_sectors = high_queued_sectors / 3;
- if (high_queued_sectors - low_queued_sectors > MB(128))
- low_queued_sectors = high_queued_sectors - MB(128);
-
-
- /*
- * make it sectors (512b)
- */
- high_queued_sectors <<= 1;
- low_queued_sectors <<= 1;
-
- /*
* Scale free request slots per queue too
*/
total_ram = (total_ram + MB(32) - 1) & ~(MB(32) - 1);
@@ -1200,10 +1159,7 @@
if ((batch_requests = queue_nr_requests >> 3) > 32)
batch_requests = 32;
- printk("block: queued sectors max/low %dkB/%dkB, %d slots per queue\n",
- high_queued_sectors / 2,
- low_queued_sectors / 2,
- queue_nr_requests);
+ printk("block: %d slots per queue, batch=%d\n", queue_nr_requests, batch_requests);
#ifdef CONFIG_AMIGA_Z2RAM
z2_init();
@@ -1324,4 +1280,3 @@
EXPORT_SYMBOL(generic_make_request);
EXPORT_SYMBOL(blkdev_release_request);
EXPORT_SYMBOL(generic_unplug_device);
-EXPORT_SYMBOL(queued_sectors);
diff -u --recursive --new-file pre3/linux/include/linux/blkdev.h linux/include/linux/blkdev.h
--- pre3/linux/include/linux/blkdev.h Mon Jul 30 10:45:59 2001
+++ linux/include/linux/blkdev.h Fri Aug 3 20:30:01 2001
@@ -174,8 +174,6 @@
extern int * max_segments[MAX_BLKDEV];
-extern atomic_t queued_sectors;
-
#define MAX_SEGMENTS 128
#define MAX_SECTORS 255
@@ -203,14 +201,7 @@
return 512;
}
-#define blk_finished_io(nsects) \
- atomic_sub(nsects, &queued_sectors); \
- if (atomic_read(&queued_sectors) < 0) { \
- printk("block: queued_sectors < 0\n"); \
- atomic_set(&queued_sectors, 0); \
- }
-
-#define blk_started_io(nsects) \
- atomic_add(nsects, &queued_sectors);
+#define blk_finished_io(nsects) do { } while (0)
+#define blk_started_io(nsects) do { } while (0)
#endif