diff -ru --new-file /usr/src/linux/MAGIC linux/MAGIC
--- /usr/src/linux/MAGIC	Tue Jan 10 09:07:09 1995
+++ linux/MAGIC	Fri Jun 30 18:50:17 1995
@@ -52,6 +52,7 @@
 0x00	fd.h
 0x03	hdreg.h
 0x06	lp.h
+0x09	md.h
 0x12	fs.h
 'C'	soundcard.h
 'K'	kd.h
diff -ru --new-file /usr/src/linux/arch/i386/config.in linux/arch/i386/config.in
--- /usr/src/linux/arch/i386/config.in	Fri Jun 30 20:01:19 1995
+++ linux/arch/i386/config.in	Fri Jun 30 18:50:17 1995
@@ -22,6 +22,13 @@
 fi
 
 bool 'XT harddisk support' CONFIG_BLK_DEV_XD n
+bool 'Multiple devices driver support' CONFIG_BLK_DEV_MD n
+if [ "$CONFIG_BLK_DEV_MD" = "y" ]; then
+ bool '   Linear (append) mode' CONFIG_MD_LINEAR n
+ bool '   RAID-0 (striping) mode' CONFIG_MD_STRIPED y
+ bool '   RAID-1 mode (very ALPHA)' CONFIG_MD_RAID1 n
+# bool '   RAID-5 mode (NOT usable - debug only ;-)' CONFIG_MD_RAID5 n
+fi
 bool 'Networking support' CONFIG_NET y
 bool 'Limit memory to low 16MB' CONFIG_MAX_16M n
 bool 'PCI bios support' CONFIG_PCI y
diff -ru --new-file /usr/src/linux/drivers/block/Makefile linux/drivers/block/Makefile
--- /usr/src/linux/drivers/block/Makefile	Fri Jun 30 12:05:02 1995
+++ linux/drivers/block/Makefile	Fri Jun 30 18:50:17 1995
@@ -24,6 +24,7 @@
 OBJS := ll_rw_blk.o ramdisk.o genhd.o 
 SRCS := ll_rw_blk.c ramdisk.c genhd.c 
 BLOCK_MODULE_OBJS =
+SYMTAB_OBJS =
 
 ifdef CONFIG_BLK_DEV_FD
 OBJS := $(OBJS) floppy.o
@@ -93,11 +94,47 @@
 BLOCK_MODULE_OBJS := $(BLOCK_MODULE_OBJS) sonycd535.o
 endif
 
+ifdef CONFIG_BLK_DEV_MD
+SYMTAB_OBJS :=  $(SYMTAB_OBJS) md.o
+SRCS := $(SRCS) md.c
+
+ifdef CONFIG_MD_LINEAR
+OBJS := $(OBJS) linear.o
+SRCS := $(SRCS) linear.c
+else
+BLOCK_MODULE_OBJS := $(BLOCK_MODULE_OBJS) linear.o
+endif
+
+ifdef CONFIG_MD_STRIPED
+OBJS := $(OBJS) raid0.o
+SRCS := $(SRCS) raid0.c
+else
+BLOCK_MODULE_OBJS := $(BLOCK_MODULE_OBJS) raid0.o
+endif
+
+ifdef CONFIG_MD_RAID1
+OBJS := $(OBJS) raid1.o
+SRCS := $(SRCS) raid1.c
+else
+BLOCK_MODULE_OBJS := $(BLOCK_MODULE_OBJS) raid1.o
+endif
+
+ifdef CONFIG_MD_RAID5
+OBJS := $(OBJS) raid5.o
+SRCS := $(SRCS) raid5.c
+else
+BLOCK_MODULE_OBJS := $(BLOCK_MODULE_OBJS) raid5.o
+endif
+
+endif
+
 all: block.a
 
-block.a: $(OBJS)
+include ../../versions.mk
+
+block.a: $(SYMTAB_OBJS) $(OBJS)
 	rm -f block.a
-	$(AR) rcs block.a $(OBJS)
+	$(AR) rcs block.a $(SYMTAB_OBJS) $(OBJS)
 	sync
 
 dep:
diff -ru --new-file /usr/src/linux/drivers/block/blk.h linux/drivers/block/blk.h
--- /usr/src/linux/drivers/block/blk.h	Fri Jun 30 12:05:02 1995
+++ linux/drivers/block/blk.h	Fri Jun 30 18:50:17 1995
@@ -26,7 +26,7 @@
  * These will have to be changed to be aware of different buffer
  * sizes etc.. It actually needs a major cleanup.
  */
-#ifdef IDE_DRIVER
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
 #define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
 #else
 #define SECTOR_MASK (blksize_size[MAJOR_NR] &&     \
@@ -54,6 +54,9 @@
 #ifdef CONFIG_SBPCD
 extern unsigned long sbpcd_init(unsigned long, unsigned long);
 #endif CONFIG_SBPCD
+#ifdef CONFIG_BLK_DEV_MD
+extern unsigned long md_init(unsigned long mem_start, unsigned long mem_end);
+#endif
 extern void set_device_ro(int dev,int flag);
 
 extern int floppy_init(void);
@@ -123,6 +126,19 @@
 #define DEVICE_ON(device)
 #define DEVICE_OFF(device)
 
+/* Kludge to use the same number for both char and block major numbers */
+#elif  (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#ifndef MD_PERSONALITY
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif
+
 #elif (MAJOR_NR == SCSI_TAPE_MAJOR)
 
 #define DEVICE_NAME "scsitape"
@@ -216,7 +232,7 @@
 
 #endif /* MAJOR_NR == whatever */
 
-#if (MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER)
+#if ((MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) && !defined(MD_DRIVER))
 
 #ifndef CURRENT
 #define CURRENT (blk_dev[MAJOR_NR].current_request)
@@ -248,8 +264,10 @@
 
 #endif /* DEVICE_TIMEOUT */
 
+#ifndef MD_PERSONALITY
 static void (DEVICE_REQUEST)(void);
-
+#endif
+  
 #ifdef DEVICE_INTR
 #define CLEAR_INTR SET_INTR(NULL)
 #else
@@ -268,15 +286,18 @@
 			panic(DEVICE_NAME ": block not locked"); \
 	}
 
-#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) */
+#endif /* ((MAJOR_NR != SCSI_TAPE_MAJOR) && !defined(IDE_DRIVER) && !defined(MD_DRIVER)) */
 
 /* end_request() - SCSI devices have their own version */
 
-#if ! SCSI_MAJOR(MAJOR_NR)
+#if ! SCSI_MAJOR(MAJOR_NR) || (defined(MD_DRIVER) && !defined(MD_PERSONALITY))
 
 #ifdef IDE_DRIVER
 static void end_request(byte uptodate, byte hwif) {
 	struct request *req = ide_cur_rq[HWIF];
+#elif defined(MD_DRIVER)
+static void end_request (int uptodate) {
+        struct request *req = md_cur_req;
 #else
 static void end_request(int uptodate) {
 	struct request *req = CURRENT;
@@ -295,9 +316,17 @@
 
 	if ((bh = req->bh) != NULL) {
 		req->bh = bh->b_reqnext;
-		bh->b_reqnext = NULL;
+		/* Only remove from buffer list if last access
+		   for this buffer, since raid-1 does 2 write
+		   access for a single buffer */
+		if (bh->b_lock==1)
+		        bh->b_reqnext = NULL;
 		bh->b_uptodate = uptodate;		
-		if (!uptodate) bh->b_req = 0; /* So no "Weird" errors */
+		if (!uptodate)
+		{
+		  bh->b_req = 0; /* So no "Weird" errors */
+		  bh->b_rw |= 0x80;
+		}
 		unlock_buffer(bh);
 		if ((bh = req->bh) != NULL) {
 			req->current_nr_sectors = bh->b_size >> 9;
@@ -311,6 +340,8 @@
 	}
 #ifdef IDE_DRIVER
 	ide_cur_rq[HWIF] = NULL;
+#elif defined(MD_DRIVER)
+	md_cur_req = NULL;
 #else
 	DEVICE_OFF(req->dev);
 	CURRENT = req->next;
@@ -320,7 +351,42 @@
 	req->dev = -1;
 	wake_up(&wait_for_request);
 }
-#endif /* ! SCSI_MAJOR(MAJOR_NR) */
+#endif /* ! SCSI_MAJOR(MAJOR_NR) || defined(MD_DRIVER) */
+
+#ifdef MD_PERSONALITY
+extern inline void end_redirect (void)
+{
+  struct request *req = md_cur_req;
+  struct buffer_head * bh;
+
+  req->errors = 0;
+  
+  if ((bh = req->bh) != NULL)
+  {
+    req->bh = bh->b_reqnext;
+    bh->b_reqnext = NULL;
+    
+    if ((bh = req->bh) != NULL)
+    {
+      req->sector += req->current_nr_sectors;
+      req->current_nr_sectors = bh->b_size >> 9;
+      
+      if (req->nr_sectors < req->current_nr_sectors)
+      {
+	req->nr_sectors = req->current_nr_sectors;
+	printk("end_redirect : buffer-list destroyed\n");
+      }
+      
+      req->buffer = bh->b_data;
+      return;
+    }
+  }
+
+  md_cur_req=NULL;
+  req->dev = -1;
+  wake_up(&wait_for_request);
+}
+#endif /* MD_PERSONALITY */
 
 #endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
 
diff -ru --new-file /usr/src/linux/drivers/block/linear.c linux/drivers/block/linear.c
--- /usr/src/linux/drivers/block/linear.c
+++ linux/drivers/block/linear.c	Fri Jun 30 18:50:18 1995
@@ -0,0 +1,225 @@
+
+/*
+   linear.c : Multiple Devices driver for Linux
+              Copyright (C) 1994, 1995 Marc ZYNGIER
+	      <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	      <maz@gloups.fdn.fr>
+
+   Linear mode management functions.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#include <linux/md.h>
+#include <linux/linear.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include "blk.h"
+
+static int linear_run (int minor, struct md_dev *mddev)
+{
+  int current=0, i, size, dev0_size, nb_zone;
+  struct linear_data *data;
+
+  MOD_INC_USE_COUNT;
+  
+  mddev->private=kmalloc (sizeof (struct linear_data), GFP_KERNEL);
+  data=(struct linear_data *) mddev->private;
+
+  /*
+     Find out the smallest device. This was previously done
+     at registery time, but since it violates modularity,
+     I moved it here... Any comment ? ;-)
+   */
+
+  data->smallest=devices[minor];
+  for (i=1; i<mddev->nb_dev; i++)
+    if (data->smallest->size > devices[minor][i].size)
+      data->smallest=devices[minor]+i;
+  
+  nb_zone=data->nr_zones=
+    md_size[minor]/data->smallest->size +
+    (md_size[minor]%data->smallest->size ? 1 : 0);
+  
+  data->hash_table=kmalloc (sizeof (struct linear_hash)*nb_zone, GFP_KERNEL);
+
+  size=devices[minor][current].size;
+
+  i=0;
+  while (current<mddev->nb_dev)
+  {
+    data->hash_table[i].dev0=devices[minor]+current;
+
+    if (size>=data->smallest->size) /* If we completly fill the slot */
+    {
+      data->hash_table[i++].dev1=NULL;
+      size-=data->smallest->size;
+
+      if (!size)
+      {
+	if (++current==mddev->nb_dev) continue;
+	size=devices[minor][current].size;
+      }
+
+      continue;
+    }
+
+    if (++current==mddev->nb_dev) /* Last dev, set dev1 as NULL */
+    {
+      data->hash_table[i].dev1=NULL;
+      continue;
+    }
+
+    dev0_size=size;		/* Here, we use a 2nd dev to fill the slot */
+    size=devices[minor][current].size;
+    data->hash_table[i++].dev1=devices[minor]+current;
+    size-=(data->smallest->size - dev0_size);
+  }
+
+  return 0;
+}
+
+static int linear_stop (int minor, struct md_dev *mddev)
+{
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  
+  kfree (data->hash_table);
+  kfree (data);
+
+  MOD_DEC_USE_COUNT;
+
+  return 0;
+}
+
+
+static int linear_map (int minor, struct md_dev *mddev, struct request *req)
+{
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  struct linear_hash *hash;
+  struct real_dev *tmp_dev;
+  long block, rblock;
+  struct buffer_head *bh;
+  int rw;
+
+  block=req->sector >> 1;
+  hash=data->hash_table+(block/data->smallest->size);
+  
+  if (block >= (hash->dev0->size + hash->dev0->offset))
+  {
+    if (!hash->dev1)
+      printk ("linear_map : hash->dev1==NULL for block %ld\n", block);
+    tmp_dev=hash->dev1;
+  }
+  else
+    tmp_dev=hash->dev0;
+  
+  if (block >= (tmp_dev->size + tmp_dev->offset) || block < tmp_dev->offset)
+    printk ("Block %ld out of bounds on dev %04x size %d offset %d\n", block, tmp_dev->dev, tmp_dev->size, tmp_dev->offset);
+  
+  rblock=(block-(tmp_dev->offset));
+  
+  if ((bh=req->bh))		/* This a buffer request */
+  {
+    if (req->cmd==WRITE)
+      mark_buffer_dirty (bh, 0);/* re-dirtyfy buffer ! */
+
+    rw=req->cmd;
+    end_redirect ();		/* Must be done BEFORE redirection ! */
+    bh->b_rdev=tmp_dev->dev;
+    make_request (tmp_dev->dev, rblock*(bh->b_size>>9), rw, bh);
+  }
+  else				/* This is a paging request */
+  {
+    md_cur_req=NULL;
+    req->dev=tmp_dev->dev;
+    req->sector=rblock << 1;
+    add_request (blk_dev+MAJOR (tmp_dev->dev), req);
+  }
+
+  return 0;
+}
+
+
+static int linear_status (char *page, int minor, struct md_dev *mddev)
+{
+  int sz=0;
+
+#undef MD_DEBUG
+#ifdef MD_DEBUG
+  int j;
+  struct linear_data *data=(struct linear_data *) mddev->private;
+  
+  sz+=sprintf (page+sz, "      ");
+  for (j=0; j<data->nr_zones; j++)
+  {
+    sz+=sprintf (page+sz, "[%s",
+		 partition_name (data->hash_table[j].dev0->dev));
+
+    if (data->hash_table[j].dev1)
+      sz+=sprintf (page+sz, "/%s] ",
+		   partition_name(data->hash_table[j].dev1->dev));
+    else
+      sz+=sprintf (page+sz, "] ");
+  }
+
+  sz+=sprintf (page+sz, "\n");
+#endif
+  return sz;
+}
+
+
+static struct md_personality linear_personality=
+{
+  "linear",
+  linear_map,
+  linear_run,
+  linear_stop,
+  linear_status,
+  NULL,				/* no ioctls */
+  0
+};
+
+
+#ifndef MODULE
+
+void linear_init (void)
+{
+  register_md_personality (LINEAR, &linear_personality);
+}
+
+#else
+
+char kernel_version[]= UTS_RELEASE;
+
+int init_module (void)
+{
+  return (register_md_personality (LINEAR, &linear_personality));
+}
+
+void cleanup_module (void)
+{
+  if (MOD_IN_USE)
+    printk ("md linear : module still busy...\n");
+  else
+    unregister_md_personality (LINEAR);
+}
+
+#endif
diff -ru --new-file /usr/src/linux/drivers/block/ll_rw_blk.c linux/drivers/block/ll_rw_blk.c
--- /usr/src/linux/drivers/block/ll_rw_blk.c	Fri Jun 30 12:05:03 1995
+++ linux/drivers/block/ll_rw_blk.c	Fri Jun 30 18:50:18 1995
@@ -233,7 +233,7 @@
  * It disables interrupts so that it can muck with the
  * request-lists in peace.
  */
-static void add_request(struct blk_dev_struct * dev, struct request * req)
+void add_request(struct blk_dev_struct * dev, struct request * req)
 {
 	struct request * tmp;
 	short		 disk_index;
@@ -271,25 +271,32 @@
 	req->next = tmp->next;
 	tmp->next = req;
 
-/* for SCSI devices, call request_fn unconditionally */
-	if (scsi_major(MAJOR(req->dev)))
+/* for SCSI devices, call request_fn unconditionally
+   (MD_MAJOR conflicts with SCSI_TAPE_MAJOR...) */
+	if (scsi_major(MAJOR(req->dev)) && MAJOR(req->dev)!=MD_MAJOR)
 		(dev->request_fn)();
 
 	sti();
 }
 
-static void make_request(int major,int rw, struct buffer_head * bh)
+void make_request(dev_t dev, unsigned int sector,
+		  int rw, struct buffer_head * bh)
 {
-	unsigned int sector, count;
+	unsigned int count;
 	struct request * req;
 	int rw_ahead, max_req;
+	int major=MAJOR(dev);
 
+	
 /* WRITEA/READA is special case - it is not really needed, so if the */
 /* buffer is locked, we just forget about it, else it's a normal read */
 	rw_ahead = (rw == READA || rw == WRITEA);
 	if (rw_ahead) {
-		if (bh->b_lock)
+		if (bh->b_lock && bh->b_dev==bh->b_rdev)
+		{
+		  printk ("Blast #1\n");
 			return;
+		}
 		if (rw == READA)
 			rw = READ;
 		else
@@ -300,23 +307,29 @@
 		return;
 	}
 	count = bh->b_size >> 9;
-	sector = bh->b_blocknr * count;
 	if (blk_size[major])
-		if (blk_size[major][MINOR(bh->b_dev)] < (sector + count)>>1) {
+		if (blk_size[major][MINOR(dev)] < (sector + count)>>1) {
 			bh->b_dirt = bh->b_uptodate = 0;
 			bh->b_req = 0;
 			return;
 		}
 	/* Uhhuh.. Nasty dead-lock possible here.. */
-	if (bh->b_lock)
+	if (bh->b_lock && bh->b_dev==bh->b_rdev)
+	{
+	  printk ("Blast #2\n");
 		return;
+	}
 	/* Maybe the above fixes it, and maybe it doesn't boot. Life is interesting */
-	lock_buffer(bh);
+	if (bh->b_dev==bh->b_rdev)
+	  lock_buffer(bh);
 	if ((rw == WRITE && !bh->b_dirt) || (rw == READ && bh->b_uptodate)) {
 		unlock_buffer(bh);
 		return;
 	}
 
+	bh->b_reqshared=NULL;
+	bh->b_sister_req=NULL;
+
 /* we don't allow the write-requests to fill up the queue completely:
  * we want some room for reads: they take precedence. The last third
  * of the requests are only for reads.
@@ -326,12 +339,14 @@
 /* look for a free request. */
 	cli();
 
-/* The scsi disk drivers and the IDE driver completely remove the request
- * from the queue when they start processing an entry.  For this reason
- * it is safe to continue to add links to the top entry for those devices.
+/* The scsi disk drivers, the IDE driver and the MD driver completely
+ * remove the request from the queue when they start processing an
+ * entry.  For this reason it is safe to continue to add links to the
+ * top entry for those devices.
  */
 	if ((   major == IDE0_MAJOR	/* same as HD_MAJOR */
 	     || major == IDE1_MAJOR
+	     || major == MD_MAJOR
 	     || major == FLOPPY_MAJOR
 	     || major == SCSI_DISK_MAJOR
 	     || major == SCSI_CDROM_MAJOR)
@@ -344,7 +359,7 @@
 #endif CONFIG_BLK_DEV_HD
 			req = req->next;
 		while (req) {
-			if (req->dev == bh->b_dev &&
+			if (req->dev == dev &&
 			    !req->sem &&
 			    req->cmd == rw &&
 			    req->sector + req->nr_sectors == sector &&
@@ -358,7 +373,7 @@
 				return;
 			}
 
-			if (req->dev == bh->b_dev &&
+			if (req->dev == dev &&
 			    !req->sem &&
 			    req->cmd == rw &&
 			    req->sector - count == sector &&
@@ -380,21 +395,22 @@
 	}
 
 /* find an unused request. */
-	req = get_request(max_req, bh->b_dev);
+	req = get_request(max_req, dev);
 	sti();
 
 /* if no request available: if rw_ahead, forget it; otherwise try again blocking.. */
 	if (!req) {
-		if (rw_ahead) {
+		if (rw_ahead && bh->b_dev==bh->b_rdev) {
 			unlock_buffer(bh);
 			return;
 		}
-		req = __get_request_wait(max_req, bh->b_dev);
+		req = __get_request_wait(max_req, dev);
 	}
 
 /* fill up the request-info, and add it to the queue */
 	req->cmd = rw;
 	req->errors = 0;
+	req->shared_count = 0;
 	req->sector = sector;
 	req->nr_sectors = count;
 	req->current_nr_sectors = count;
@@ -406,6 +422,147 @@
 	add_request(major+blk_dev,req);
 }
 
+#ifdef CONFIG_BLK_DEV_MD
+
+/* make_double_request does the same things that does make_request,
+ * exept that it runs on 2 devices at the same time. This is mandatory
+ * since RAID-1 uses 2 write requests for a single buffer. Note that
+ * using both functions on the same dev with the same command (i.e.
+ * READ or WRITE) will lead to a nice panic, because of shared buffer
+ * list. Checking have been removed, so special care should be taken
+ * when calling this function. */
+void make_double_request(dev_t dev0, dev_t dev1, unsigned int sector,
+			 int rw, struct buffer_head * bh)
+{
+  unsigned int count;
+  struct request * req, *newreq[2];
+  int max_req, i, major;
+  dev_t dev[2]={dev0, dev1};
+  enum
+  {
+    NEW, TAIL, HEAD,
+  } pos[2];
+  
+  max_req = (rw == READ) ? NR_REQUEST : ((NR_REQUEST*2)/3);
+  count = bh->b_size >> 9;  
+  pos[0]=pos[1]=NEW;
+  
+  cli();
+  
+  for (i=0; i<2; i++)
+  {
+    major=MAJOR(dev[i]);
+    if ((   major == IDE0_MAJOR	/* same as HD_MAJOR */
+	 || major == IDE1_MAJOR
+	 || major == MD_MAJOR
+	 || major == FLOPPY_MAJOR
+	 || major == SCSI_DISK_MAJOR
+	 || major == SCSI_CDROM_MAJOR)
+	&& (req = blk_dev[major].current_request))
+    {
+#ifdef CONFIG_BLK_DEV_HD
+      if (major == HD_MAJOR || major == FLOPPY_MAJOR)
+#else
+      if (major == FLOPPY_MAJOR)
+#endif CONFIG_BLK_DEV_HD
+	req = req->next;
+      
+      while (req && pos[i]==NEW)
+      {
+	if (req->dev == dev[i] &&
+	    !req->sem &&
+	    req->cmd == rw &&
+	    req->sector + req->nr_sectors == sector &&
+	    req->nr_sectors < 244)
+	{
+	  newreq[i]=req;
+	  pos[i]=TAIL;
+	  continue;
+	}
+	
+	if (req->dev == dev[i] &&
+	    !req->sem &&
+	    req->cmd == rw &&
+	    req->sector - count == sector &&
+	    req->nr_sectors < 244)
+	{
+	  newreq[i]=req;
+	  pos[i]=HEAD;
+	  continue;
+	}
+	
+	req = req->next;
+      }
+    }
+    
+    if (pos[0]==NEW)	/* Do not look at second dev if first failed */
+      break;
+  }
+  
+  if (pos[0]!=pos[1])
+  pos[0]=pos[1]=NEW;
+  
+  bh->b_reqshared=NULL;
+
+  cli ();
+  
+  switch (pos[0])
+  {
+    case NEW:
+    newreq[0] = get_request_wait (max_req, dev[0]);
+    newreq[1] = get_request_wait (max_req, dev[1]);
+    
+    newreq[0]->cmd = newreq[1]->cmd = rw;
+    newreq[0]->errors = newreq[1]->errors = 0;
+    newreq[0]->sector = newreq[1]->sector = sector;
+    newreq[0]->nr_sectors = newreq[1]->nr_sectors = count;
+    newreq[0]->current_nr_sectors = newreq[1]->current_nr_sectors = count;
+    newreq[0]->buffer = newreq[1]->buffer = bh->b_data;
+    newreq[0]->sem = newreq[1]->sem = NULL;
+    newreq[0]->bh = newreq[1]->bh = bh;
+    newreq[0]->bhtail = newreq[1]->bhtail = bh;
+    newreq[0]->next = newreq[1]->next = NULL;
+    newreq[0]->shared_count = 0;
+    newreq[1]->shared_count = 1;
+    bh->b_sister_req = newreq[1];
+    bh->b_dirt=2;		/* mark_buffer_clean is called twice... */
+    
+    add_request(MAJOR(dev[0])+blk_dev,newreq[0]);
+    cli ();
+    add_request(MAJOR(dev[1])+blk_dev,newreq[1]);
+    break;
+    
+    case TAIL:
+    newreq[0]->bhtail->b_reqnext =
+      newreq[0]->bhtail->b_reqshared =
+	newreq[1]->bhtail->b_reqnext =
+	  newreq[1]->bhtail->b_reqshared = bh;
+    newreq[0]->bhtail = newreq[1]->bhtail = bh;
+    newreq[0]->nr_sectors += count;
+    newreq[1]->nr_sectors += count;
+    bh->b_sister_req = newreq[1];
+    newreq[1]->shared_count++;
+    mark_buffer_clean(bh);
+    break;
+    
+    case HEAD:
+    bh->b_reqshared = bh->b_reqnext = newreq[0]->bh;
+    newreq[0]->nr_sectors += count;
+    newreq[1]->nr_sectors += count;
+    newreq[0]->buffer = newreq[1]->buffer = bh->b_data;
+    newreq[0]->current_nr_sectors = newreq[1]->current_nr_sectors = count;
+    newreq[0]->sector = newreq[1]->sector = sector;
+    bh->b_sister_req = newreq[1];
+    newreq[1]->shared_count++;
+    mark_buffer_clean(bh);
+    newreq[0]->bh = bh;
+    newreq[1]->bh = bh;
+  }
+  sti ();
+}
+
+#endif
+
 void ll_rw_page(int rw, int dev, unsigned long page, char * buffer)
 {
 	struct request * req;
@@ -427,6 +584,7 @@
 /* fill up the request-info, and add it to the queue */
 	req->cmd = rw;
 	req->errors = 0;
+	req->shared_count = 0;
 	req->sector = sector;
 	req->nr_sectors = PAGE_SIZE / 512;
 	req->current_nr_sectors = PAGE_SIZE / 512;
@@ -444,7 +602,7 @@
 
 void ll_rw_block(int rw, int nr, struct buffer_head * bh[])
 {
-	unsigned int major;
+	unsigned int major, sector;
 	struct request plug;
 	int correct_size;
 	struct blk_dev_struct * dev;
@@ -500,7 +658,13 @@
 	for (i = 0; i < nr; i++) {
 		if (bh[i]) {
 			bh[i]->b_req = 1;
-			make_request(major, rw, bh[i]);
+			
+			/* Md needs this for redirection and error recovery */
+			bh[i]->b_rdev = bh[i]->b_dev;
+			bh[i]->b_rw = rw;
+
+			sector = bh[i]->b_blocknr * (bh[i]->b_size >> 9);
+			make_request(bh[i]->b_dev, sector, rw, bh[i]);
 			if (rw == READ || rw == READA)
 				kstat.pgpgin++;
 			else
@@ -557,6 +721,7 @@
 			}
 			req[j]->cmd = rw;
 			req[j]->errors = 0;
+			req[j]->shared_count = 0;
 			req[j]->sector = (b[i] * buffersize) >> 9;
 			req[j]->nr_sectors = buffersize >> 9;
 			req[j]->current_nr_sectors = buffersize >> 9;
@@ -612,7 +777,10 @@
 #ifdef CONFIG_SBPCD
 	mem_start = sbpcd_init(mem_start, mem_end);
 #endif CONFIG_SBPCD
-	if (ramdisk_size)
+#ifdef CONFIG_BLK_DEV_MD
+	mem_start = md_init(mem_start, mem_end);
+#endif CONFIG_BLK_DEV_MD
+	  if (ramdisk_size)
 		mem_start += rd_init(mem_start, ramdisk_size*1024);
 	return mem_start;
 }
diff -ru --new-file /usr/src/linux/drivers/block/md.c linux/drivers/block/md.c
--- /usr/src/linux/drivers/block/md.c
+++ linux/drivers/block/md.c	Fri Jun 30 19:08:03 1995
@@ -0,0 +1,640 @@
+
+/*
+   md.c : Multiple Devices driver for Linux
+          Copyright (C) 1994, 1995 Marc ZYNGIER
+	  <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	  <maz@gloups.fdn.fr>
+
+   A lot of inspiration came from hd.c ...
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#include <linux/autoconf.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/malloc.h>
+#include <linux/mm.h>
+#include <linux/md.h>
+#include <linux/hdreg.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <errno.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+
+#include "blk.h"
+
+static struct hd_struct md_hd_struct[MAX_MD_DEV];
+static int md_blocksizes[MAX_MD_DEV];
+
+int md_size[MAX_MD_DEV]={0, };
+struct request *md_cur_req=NULL;
+
+static void md_geninit (void);
+
+static struct gendisk md_gendisk=
+{
+  MD_MAJOR,
+  "md",
+  0,
+  1,
+  MAX_MD_DEV,
+  md_geninit,
+  md_hd_struct,
+  md_size,
+  MAX_MD_DEV,
+  NULL,
+  NULL
+};
+
+static struct md_personality *pers[MAX_PERSONALITY]={NULL, };
+
+struct real_dev devices[MAX_MD_DEV][MAX_REAL];
+struct md_dev md_dev[MAX_MD_DEV];
+
+static struct gendisk *find_gendisk (dev_t dev)
+{
+  struct gendisk *tmp=gendisk_head;
+
+  while (tmp != NULL)
+  {
+    if (tmp->major==MAJOR(dev))
+      return (tmp);
+    
+    tmp=tmp->next;
+  }
+
+  return (NULL);
+}
+
+
+/* Picked up from genhd.c */
+char *partition_name (dev_t dev)
+{
+  static char name[10];		/* This should be long
+				   enough for a device name ! */
+  struct gendisk *hd=find_gendisk (dev);
+  char base_name;
+  int minor=MINOR(dev);
+
+  if (!hd)
+  {
+    printk ("No gendisk entry for dev %04x\n", dev);
+    sprintf (name, "dev %04x", dev);
+    return (name);
+  }
+
+  base_name = (hd->major == IDE1_MAJOR) ? 'c' : 'a';
+  sprintf(name, "%s%c%d",
+	  hd->major_name,
+	  base_name + (minor >> hd->minor_shift),
+	  minor & ((1 << hd->minor_shift) - 1));
+  return (name);
+}
+
+
+static void set_ra (void)
+{
+  int i, j, current=INT_MAX;
+
+  for (i=0; i<MAX_MD_DEV; i++)
+  {
+    if (!md_dev[i].pers)
+      continue;
+    
+    for (j=0; j<MAX_REAL; j++)
+      if (read_ahead[MAJOR(devices[i][j].dev)]<current)
+	current=read_ahead[MAJOR(devices[i][j].dev)];
+  }
+  
+  read_ahead[MD_MAJOR]=current;
+}
+
+
+static int md_ioctl (struct inode *inode, struct file *file,
+                     unsigned int cmd, unsigned long arg)
+{
+  int minor, index, err, current_ra;
+  struct gendisk *gen_real;
+  struct hd_geometry *loc = (struct hd_geometry *) arg;
+
+  if (!suser())
+    return -EACCES;
+
+  if (((minor=MINOR(inode->i_rdev)) & 0x80) &&
+      (minor & 0x7f) < MAX_PERSONALITY &&
+      pers[minor & 0x7f] &&
+      pers[minor & 0x7f]->ioctl)
+    return (pers[minor & 0x7f]->ioctl (inode, file, cmd, arg));
+  
+  if (minor >= MAX_MD_DEV)
+    return -EINVAL;
+
+  switch (cmd)
+  {
+    case REGISTER_DEV:
+    if (MAJOR((dev_t) arg)==MD_MAJOR || md_dev[minor].nb_dev==MAX_REAL)
+      return -EINVAL;
+
+    if (!fs_may_mount ((dev_t) arg) || md_dev[minor].pers)
+      return -EBUSY;
+
+    if (!(gen_real=find_gendisk ((dev_t) arg)))
+      return -ENOENT;
+
+    index=md_dev[minor].nb_dev++;
+    devices[minor][index].dev=(dev_t) arg;
+
+    /* Lock the device by inserting a dummy inode. This doesn't
+       smeel very good, but I need to be consistent with the
+       mount stuff, specially with fs_may_mount. If someone have
+       a better idea, please help ! */
+    
+    devices[minor][index].inode=get_empty_inode ();
+    devices[minor][index].inode->i_dev=(dev_t) arg; /* don't care about
+						       other fields */
+    insert_inode_hash (devices[minor][index].inode);
+    
+    /* Devices sizes are rounded to a multiple of page (needed for
+       paging). This is NOT done by fdisk when partitionning,
+       but that's a DOS thing anyway... */
+    
+    devices[minor][index].size=gen_real->sizes[MINOR((dev_t) arg)] & (PAGE_MASK>>10);
+    devices[minor][index].offset=index ?
+      (devices[minor][index-1].offset + devices[minor][index-1].size) : 0;
+
+    if (!index)
+      md_size[minor]=devices[minor][index].size;
+    else
+      md_size[minor]+=devices[minor][index].size;
+
+    printk("REGISTER_DEV %s to md%x done\n", partition_name((dev_t) arg), minor);
+    break;
+
+    case START_MD:
+    if (!md_dev[minor].nb_dev)
+      return -EINVAL;
+
+    if (md_dev[minor].pers)
+      return -EBUSY;
+
+    md_dev[minor].repartition=(int) arg;
+    
+    if ((index=PERSONALITY(md_dev+minor) >> (PERSONALITY_SHIFT))
+	>= MAX_PERSONALITY ||
+	!pers[index])
+      return -EINVAL;
+
+    md_dev[minor].pers=pers[index];
+
+    if ((err=md_dev[minor].pers->run (minor, md_dev+minor)))
+    {
+      md_dev[minor].pers=NULL;
+      return (err);
+    }
+
+    /* FIXME : We assume here we have blocks
+       that are twice as large as sectors.
+       THIS MAY NOT BE TRUE !!! */
+    md_hd_struct[minor].start_sect=0;
+    md_hd_struct[minor].nr_sects=md_size[minor]<<1;
+
+    /* It would be better to have a per-md-dev read_ahead. Currently,
+       we only use the smallest read_ahead among md-attached devices */
+
+    current_ra=read_ahead[MD_MAJOR];
+    
+    for (index=0; index<md_dev[minor].nb_dev; index++)
+    {
+      if (current_ra>read_ahead[MAJOR(devices[minor][index].dev)])
+	current_ra=read_ahead[MAJOR(devices[minor][index].dev)];
+
+      devices[minor][index].fault_count=0;
+      devices[minor][index].invalid=VALID;
+    }
+
+    read_ahead[MD_MAJOR]=current_ra;
+
+    printk ("START_DEV md%x %s\n", minor, md_dev[minor].pers->name);
+    break;
+
+    case STOP_MD:
+    if (inode->i_count>1 || md_dev[minor].busy>1) /* ioctl : one open channel */
+    {
+      printk ("STOP_MD md%x failed : i_count=%d, busy=%d\n", minor, inode->i_count, md_dev[minor].busy);
+      return -EBUSY;
+    }
+
+    if (md_dev[minor].pers)
+    {
+      /*  The device won't exist anymore -> flush it now */
+      fsync_dev (inode->i_rdev);
+      invalidate_buffers (inode->i_rdev);
+      md_dev[minor].pers->stop (minor, md_dev+minor);
+    }
+
+    /* Remove locks. */
+    for (index=0; index<md_dev[minor].nb_dev; index++)
+      clear_inode (devices[minor][index].inode);
+
+    md_dev[minor].nb_dev=md_size[minor]=0;
+    md_dev[minor].pers=NULL;
+
+    set_ra ();			/* calculate new read_ahead */
+    
+    printk ("STOP_DEV md%x\n", minor);
+    break;
+
+    case MD_INVALID:
+    if (!(err=md_valid_device (minor, (dev_t) arg, INVALID_ALWAYS)))
+      printk ("md%d : %s disabled\n", minor, partition_name ((dev_t) arg));
+
+    return (err);
+
+    case MD_VALID:
+    if (!(err=md_valid_device (minor, (dev_t) arg, VALID)))
+      printk ("md%d : %s enabled\n", minor, partition_name ((dev_t) arg));
+
+    return (err);
+    
+    case BLKGETSIZE:   /* Return device size */
+    if  (!arg)  return -EINVAL;
+    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
+    if (err)
+      return err;
+    put_user (md_hd_struct[MINOR(inode->i_rdev)].nr_sects, (long *) arg);
+    break;
+
+    case BLKFLSBUF:
+    fsync_dev (inode->i_rdev);
+    invalidate_buffers (inode->i_rdev);
+    break;
+
+    case BLKRASET:
+    if (arg > 0xff)
+      return -EINVAL;
+    read_ahead[MAJOR(inode->i_rdev)] = arg;
+    return 0;
+    
+    case BLKRAGET:
+    if  (!arg)  return -EINVAL;
+    err=verify_area (VERIFY_WRITE, (long *) arg, sizeof(long));
+    if (err)
+      return err;
+    put_user (read_ahead[MAJOR(inode->i_rdev)], (long *) arg);
+    break;
+
+    case HDIO_GETGEO:
+    if (!loc)  return -EINVAL;
+    err = verify_area(VERIFY_WRITE, loc, sizeof(*loc));
+    if (err)
+      return err;
+    put_user (2, (char *) &loc->heads);
+    put_user (4, (char *) &loc->sectors);
+    put_user (md_hd_struct[minor].nr_sects/8, (short *) &loc->cylinders);
+    put_user (md_hd_struct[MINOR(inode->i_rdev)].start_sect,
+		(long *) &loc->start);
+    break;
+    
+    default:
+    printk ("Unknown md_ioctl %d\n", cmd);
+    return -EINVAL;
+  }
+
+  return (0);
+}
+
+
+static int md_open (struct inode *inode, struct file *file)
+{
+  int minor=MINOR(inode->i_rdev);
+
+  md_dev[minor].busy++;
+  return (0);			/* Always succed */
+}
+
+
+static void md_release (struct inode *inode, struct file *file)
+{
+  int minor=MINOR(inode->i_rdev);
+
+  sync_dev (inode->i_rdev);
+  md_dev[minor].busy--;
+}
+
+
+static struct file_operations md_fops=
+{
+  NULL,
+  block_read,
+  block_write,
+  NULL,
+  NULL,
+  md_ioctl,
+  NULL,
+  md_open,
+  md_release,
+  block_fsync
+};
+
+
+static inline int remap_request (int minor, struct request *req)
+{
+  if (!md_dev[minor].pers)
+  {
+    printk ("Oops ! md%d not running, giving up !\n", minor);
+    return -1;
+  }
+
+  return (md_dev[minor].pers->map(minor, md_dev+minor, req));
+}
+
+static void do_md_request (void)
+{
+  int minor;
+  struct request *req;
+
+  while (1)
+  {
+    /* Adapted from ide.c for early request removal */
+    if ((req = md_cur_req) == NULL)
+    {
+      req = blk_dev[MD_MAJOR].current_request;
+      if ((req == NULL) || (req->dev == -1))
+	return;
+    }
+    
+    blk_dev[MD_MAJOR].current_request = req->next;
+    md_cur_req = req;
+
+    minor = MINOR(req->dev);
+    if ((MAJOR(req->dev) != MD_MAJOR) || (minor >= MAX_REAL))
+    {
+      printk("md: bad device number: 0x%04x\n", req->dev);
+      end_request(0);
+      continue;
+    }
+    
+    if (req->bh && !req->bh->b_lock)
+    {
+      printk("md%d: block not locked\n", minor);
+      end_request(0);
+      continue;
+    }
+
+    switch (remap_request (minor, req))
+    {
+      case -1:
+      end_request (0);		/* Fail the usual way */
+      break;
+
+      case 0:			/* Allright, redirection was succesful */
+      break;
+
+      default:
+      end_request (1);		/* Recovered error using redundancy */
+    }
+  }
+}
+
+
+static struct symbol_table md_symbol_table=
+{
+#include <linux/symtab_begin.h>
+  X(devices),
+  X(md_size),
+  X(md_cur_req),
+  X(make_request),
+  X(add_request),
+  X(make_double_request),
+  X(register_md_personality),
+  X(unregister_md_personality),
+  X(partition_name),
+  X(md_valid_device),
+  X(md_can_reemit),
+#include <linux/symtab_end.h>
+};
+
+
+static void md_geninit (void)
+{
+  int i;
+  
+  for(i=0;i<MAX_MD_DEV;i++)
+  {
+    md_blocksizes[i] = 1024;
+    md_gendisk.part[i].start_sect=-1;
+    md_dev[i].pers=NULL;
+  }
+
+  blksize_size[MAJOR_NR] = md_blocksizes;
+  register_symtab (&md_symbol_table);
+}
+
+
+int get_md_status (char *page)
+{
+  int sz=0, i, j;
+
+  sz+=sprintf( page+sz, "Personalities : ");
+  for (i=0; i<MAX_PERSONALITY; i++)
+    if (pers[i])
+      sz+=sprintf (page+sz, "[%d %s] ", i, pers[i]->name);
+
+  page[sz-1]='\n';
+
+  sz+=sprintf (page+sz, "read_ahead ");
+  if (read_ahead[MD_MAJOR]==INT_MAX)
+    sz+=sprintf (page+sz, "not set\n");
+  else
+    sz+=sprintf (page+sz, "%d sectors\n", read_ahead[MD_MAJOR]);
+  
+  for (i=0; i<MAX_MD_DEV; i++)
+  {
+    sz+=sprintf (page+sz, "md%d : %sactive", i, md_dev[i].pers ? "" : "in");
+
+    if (md_dev[i].pers)
+      sz+=sprintf (page+sz, " %s", md_dev[i].pers->name);
+
+    for (j=0; j<md_dev[i].nb_dev; j++)
+      sz+=sprintf (page+sz, " %s%s%s",
+		   (devices[i][j].invalid==VALID) ? "" : "(",
+		   partition_name(devices[i][j].dev),
+		   (devices[i][j].invalid==VALID) ? "" : ")");
+    
+    if (md_dev[i].nb_dev)
+      sz+=sprintf (page+sz, " %d blocks", md_size[i]);
+
+    if (!md_dev[i].pers)
+    {
+      sz+=sprintf (page+sz, "\n");
+      continue;
+    }
+
+    sz+=sprintf (page+sz, " %dk chunks", 1 << FACTOR_SHIFT(FACTOR(md_dev+i)));
+    if (md_dev[i].pers->max_invalid_dev)
+      sz+=sprintf (page+sz, " maxfault=%ld\n", MAX_FAULT(md_dev+i));
+    else
+      sz+=sprintf (page+sz, "\n");
+
+    sz+=md_dev[i].pers->status (page+sz, i, md_dev+i);
+  }
+  
+  return (sz);
+}
+
+int md_valid_device (int minor, dev_t dev, int mode)
+{
+  int i;
+
+  for (i=0; i<md_dev[minor].nb_dev; i++)
+    if (devices[minor][i].dev==dev)
+      break;
+
+  if (i>md_dev[minor].nb_dev)
+  {
+    printk ("Oops, dev %04x not found in md_valid_device\n", dev);
+    return -EINVAL;
+  }
+
+  switch (mode)
+  {
+    case VALID:
+    /* Don't consider INVALID_NEXT as a real invalidation.
+       Maybe that's not the good way to treat such a thing,
+       we'll see. */
+    if (devices[minor][i].invalid==INVALID_ALWAYS)
+    {
+      devices[minor][i].fault_count=0; /* reset fault count */
+      if (md_dev[minor].invalid_dev_count)
+	md_dev[minor].invalid_dev_count--;
+    }
+    break;
+
+    case INVALID:
+    if (devices[minor][i].invalid != VALID )
+      return 0;			/* Don't invalidate twice */
+    
+    if (++devices[minor][i].fault_count > MAX_FAULT(md_dev+minor) &&
+	MAX_FAULT(md_dev+minor)!=0xFF)
+    {
+      /* We cannot tolerate this fault.
+	 So sing a song, and say GoodBye to this device... */
+      
+      mode=INVALID_ALWAYS;
+      md_dev[minor].invalid_dev_count++;
+    }
+    else
+      /* FIXME :
+	 If we reached the max_invalid_dev count, doing one
+	 more invalidation will kill the md_dev. So we choose
+	 not to invalid the physical dev in such a case. But
+	 next access will probably fail... */
+      if (md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev)
+	mode=INVALID_NEXT;
+      else
+	mode=VALID;
+    break;
+
+    case INVALID_ALWAYS:	/* Only used via MD_INVALID ioctl */
+    md_dev[minor].invalid_dev_count++;
+  }
+  
+  devices[minor][i].invalid=mode;
+  return 0;
+}
+
+
+int md_can_reemit (int minor)
+{
+  /* FIXME :
+     If the device is raid-1 (md_dev[minor].pers->max_invalid_dev=-1),
+     always pretend that we can reemit the request.
+     Problem : if the 2 devices in the pair are dead, will loop
+     forever. Maybe having a per-personality can_reemit function would
+     help. */
+
+  if (!md_dev[minor].pers)
+    return (0);
+  
+  return(md_dev[minor].pers->max_invalid_dev &&
+	 ((md_dev[minor].pers->max_invalid_dev==-1) ?
+	 1 :
+	 md_dev[minor].invalid_dev_count<=md_dev[minor].pers->max_invalid_dev));
+}
+
+
+int register_md_personality (int p_num, struct md_personality *p)
+{
+  int i=(p_num >> PERSONALITY_SHIFT);
+
+  if (i >= MAX_PERSONALITY)
+    return -EINVAL;
+
+  if (pers[i])
+    return -EBUSY;
+  
+  pers[i]=p;
+  printk ("%s personality registered\n", p->name);
+  return 0;
+}
+
+int unregister_md_personality (int p_num)
+{
+  int i=(p_num >> PERSONALITY_SHIFT);
+
+  if (i >= MAX_PERSONALITY)
+    return -EINVAL;
+
+  printk ("%s personality unregistered\n", pers[i]->name);
+  pers[i]=NULL;
+  return 0;
+} 
+
+void linear_init (void);
+void raid0_init (void);
+void raid1_init (void);
+void raid5_init (void);
+
+unsigned long md_init (unsigned long mem_start,
+		       unsigned long mem_long)
+{
+  printk ("md driver %s MAX_MD_DEV=%d, MAX_REAL=%d\n", MD_VERSION, MAX_MD_DEV, MAX_REAL);
+
+  if (register_blkdev (MD_MAJOR, "md", &md_fops))
+  {
+    printk ("Unable to get major %d for md\n", MD_MAJOR);
+    return (mem_start);
+  }
+
+  blk_dev[MD_MAJOR].request_fn=DEVICE_REQUEST;
+  blk_dev[MD_MAJOR].current_request=NULL;
+  read_ahead[MD_MAJOR]=INT_MAX;
+  md_gendisk.next=gendisk_head;
+
+  gendisk_head=&md_gendisk;
+
+#ifdef CONFIG_MD_LINEAR
+  linear_init ();
+#endif
+#ifdef CONFIG_MD_STRIPED
+  raid0_init ();
+#endif
+#ifdef CONFIG_MD_RAID1
+  raid1_init ();
+#endif
+#ifdef CONFIG_MD_RAID5
+  raid5_init ();
+#endif
+  
+  return (mem_start);
+}
diff -ru --new-file /usr/src/linux/drivers/block/raid0.c linux/drivers/block/raid0.c
--- /usr/src/linux/drivers/block/raid0.c
+++ linux/drivers/block/raid0.c	Fri Jun 30 18:50:18 1995
@@ -0,0 +1,307 @@
+
+/*
+   raid0.c : Multiple Devices driver for Linux
+             Copyright (C) 1994, 1995 Marc ZYNGIER
+	     <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	     <maz@gloups.fdn.fr>
+
+   RAID-0 management functions.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#include <linux/md.h>
+#include <linux/raid0.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include "blk.h"
+
+static void create_strip_zones (int minor, struct md_dev *mddev)
+{
+  int i, j, c=0;
+  int current_offset=0;
+  struct real_dev *smallest_by_zone;
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  
+  data->nr_strip_zones=1;
+  
+  for (i=1; i<mddev->nb_dev; i++)
+  {
+    for (j=0; j<i; j++)
+      if (devices[minor][i].size==devices[minor][j].size)
+      {
+	c=1;
+	break;
+      }
+
+    if (!c)
+      data->nr_strip_zones++;
+
+    c=0;
+  }
+
+  data->strip_zone=kmalloc (sizeof(struct strip_zone)*data->nr_strip_zones,
+			      GFP_KERNEL);
+
+  data->smallest=NULL;
+  
+  for (i=0; i<data->nr_strip_zones; i++)
+  {
+    data->strip_zone[i].dev_offset=current_offset;
+    smallest_by_zone=NULL;
+    c=0;
+
+    for (j=0; j<mddev->nb_dev; j++)
+      if (devices[minor][j].size>current_offset)
+      {
+	data->strip_zone[i].dev[c++]=devices[minor]+j;
+	if (!smallest_by_zone ||
+	    smallest_by_zone->size > devices[minor][j].size)
+	  smallest_by_zone=devices[minor]+j;
+      }
+
+    data->strip_zone[i].nb_dev=c;
+    data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
+
+    if (!data->smallest ||
+	data->smallest->size > data->strip_zone[i].size)
+      data->smallest=data->strip_zone+i;
+
+    data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
+					   data->strip_zone[i-1].size) : 0;
+    current_offset=smallest_by_zone->size;
+  }
+}
+
+static int raid0_run (int minor, struct md_dev *mddev)
+{
+  int current=0, i=0, size, zone0_size, nb_zone, min;
+  struct raid0_data *data;
+
+  min=1 << FACTOR_SHIFT(FACTOR(mddev));
+
+  for (i=0; i<mddev->nb_dev; i++)
+    if (devices[minor][i].size<min)
+    {
+      printk ("Cannot use %dk chunks on dev %s\n", min,
+	      partition_name (devices[minor][i].dev));
+      return -EINVAL;
+    }
+  
+  MOD_INC_USE_COUNT;
+  
+  /* Resize devices according to the factor */
+  md_size[minor]=0;
+  
+  for (i=0; i<mddev->nb_dev; i++)
+  {
+    devices[minor][i].size &= ~((1 << FACTOR_SHIFT(FACTOR(mddev))) - 1);
+    md_size[minor] += devices[minor][i].size;
+  }
+
+  mddev->private=kmalloc (sizeof (struct raid0_data), GFP_KERNEL);
+  data=(struct raid0_data *) mddev->private;
+  
+  create_strip_zones (minor, mddev);
+
+  nb_zone=data->nr_zones=
+    md_size[minor]/data->smallest->size +
+    (md_size[minor]%data->smallest->size ? 1 : 0);
+  
+  data->hash_table=kmalloc (sizeof (struct raid0_hash)*nb_zone, GFP_KERNEL);
+
+  size=data->strip_zone[current].size;
+
+  i=0;
+  while (current<data->nr_strip_zones)
+  {
+    data->hash_table[i].zone0=data->strip_zone+current;
+
+    if (size>=data->smallest->size)/* If we completly fill the slot */
+    {
+      data->hash_table[i++].zone1=NULL;
+      size-=data->smallest->size;
+
+      if (!size)
+      {
+	if (++current==data->nr_strip_zones) continue;
+	size=data->strip_zone[current].size;
+      }
+
+      continue;
+    }
+
+    if (++current==data->nr_strip_zones) /* Last dev, set unit1 as NULL */
+    {
+      data->hash_table[i].zone1=NULL;
+      continue;
+    }
+
+    zone0_size=size;		/* Here, we use a 2nd dev to fill the slot */
+    size=data->strip_zone[current].size;
+    data->hash_table[i++].zone1=data->strip_zone+current;
+    size-=(data->smallest->size - zone0_size);
+  }
+
+  return (0);
+}
+
+
+static int raid0_stop (int minor, struct md_dev *mddev)
+{
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+
+  kfree (data->hash_table);
+  kfree (data->strip_zone);
+  kfree (data);
+
+  MOD_DEC_USE_COUNT;
+  return 0;
+}
+
+
+static int raid0_map (int minor, struct md_dev *mddev, struct request *req)
+{
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  static struct raid0_hash *hash;
+  struct strip_zone *zone;
+  struct real_dev *tmp_dev;
+  int blk_in_chunk, factor, chunk, rw;
+  long block, rblock;
+  struct buffer_head *bh;
+
+  factor=FACTOR(mddev);
+  block=req->sector >> 1;
+  hash=data->hash_table+(block/data->smallest->size);
+
+  if (block >= (hash->zone0->size +
+		hash->zone0->zone_offset))
+  {
+    if (!hash->zone1)
+      printk ("raid0_map : hash->zone1==NULL for block %ld\n", block);
+    zone=hash->zone1;
+  }
+  else
+    zone=hash->zone0;
+  
+  blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
+  chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
+  tmp_dev=zone->dev[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
+  rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
+
+  if ((bh=req->bh))		/* This a buffer request */
+  {
+    if (req->cmd==WRITE)
+      mark_buffer_dirty (bh, 0);/* re-dirtyfy buffer ! */
+
+    rw=req->cmd;
+    end_redirect ();		/* Must be done BEFORE redirection ! */
+    bh->b_rdev=tmp_dev->dev;
+    make_request (tmp_dev->dev, rblock*(bh->b_size>>9), rw, bh);
+  }
+  else				/* This is a paging request */
+  {
+    md_cur_req=NULL;
+    req->dev=tmp_dev->dev;
+    req->sector=rblock << 1;
+    add_request (blk_dev+MAJOR (tmp_dev->dev), req);
+  }
+
+  return 0;
+}
+
+
+static int raid0_status (char *page, int minor, struct md_dev *mddev)
+{
+  int sz=0;
+#define MD_DEBUG
+#ifdef MD_DEBUG
+  int j, k;
+  struct raid0_data *data=(struct raid0_data *) mddev->private;
+  
+  sz+=sprintf (page+sz, "      ");
+  for (j=0; j<data->nr_zones; j++)
+  {
+    sz+=sprintf (page+sz, "[z%d",
+		 data->hash_table[j].zone0-data->strip_zone);
+    if (data->hash_table[j].zone1)
+      sz+=sprintf (page+sz, "/z%d] ",
+		   data->hash_table[j].zone1-data->strip_zone);
+    else
+      sz+=sprintf (page+sz, "] ");
+  }
+  
+  sz+=sprintf (page+sz, "\n");
+  
+  for (j=0; j<data->nr_strip_zones; j++)
+  {
+    sz+=sprintf (page+sz, "      z%d=[", j);
+    for (k=0; k<data->strip_zone[j].nb_dev; k++)
+      sz+=sprintf (page+sz, "%s/",
+		   partition_name(data->strip_zone[j].dev[k]->dev));
+    sz--;
+    sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
+		 data->strip_zone[j].zone_offset,
+		 data->strip_zone[j].dev_offset,
+		 data->strip_zone[j].size);
+  }
+#endif
+  return sz;
+}
+
+
+static struct md_personality raid0_personality=
+{
+  "raid0",
+  raid0_map,
+  raid0_run,
+  raid0_stop,
+  raid0_status,
+  NULL,				/* no ioctls */
+  0
+};
+
+
+#ifndef MODULE
+
+void raid0_init (void)
+{
+  register_md_personality (RAID0, &raid0_personality);
+}
+
+#else
+
+char kernel_version[]= UTS_RELEASE;
+
+int init_module (void)
+{
+  return (register_md_personality (RAID0, &raid0_personality));
+}
+
+void cleanup_module (void)
+{
+  if (MOD_IN_USE)
+    printk ("md raid0 : module still busy...\n");
+  else
+    unregister_md_personality (RAID0);
+}
+
+#endif
diff -ru --new-file /usr/src/linux/drivers/block/raid1.c linux/drivers/block/raid1.c
--- /usr/src/linux/drivers/block/raid1.c
+++ linux/drivers/block/raid1.c	Fri Jun 30 18:50:19 1995
@@ -0,0 +1,389 @@
+
+/*
+   raid1.c : Multiple Devices driver for Linux
+             Copyright (C) 1994, 1995 Marc ZYNGIER
+	     <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	     <maz@gloups.fdn.fr>
+
+   RAID-1 management functions.
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#ifdef MODULE
+#include <linux/module.h>
+#include <linux/version.h>
+#else
+#define MOD_INC_USE_COUNT
+#define MOD_DEC_USE_COUNT
+#endif
+#include <linux/md.h>
+#include <linux/raid1.h>
+#include <linux/malloc.h>
+
+#define MAJOR_NR MD_MAJOR
+#define MD_DRIVER
+#define MD_PERSONALITY
+
+#include "blk.h"
+
+static void create_pairs (int minor, struct md_dev *mddev)
+{
+  int i;
+  struct raid1_data *data=(struct raid1_data *) mddev->private;
+
+  md_size[minor]=0;
+
+  for (i=0; i<(mddev->nb_dev & ~1); i+=2)
+  {
+    data->pairs[i>>1].dev[0]=devices[minor]+i;
+    data->pairs[i>>1].dev[1]=devices[minor]+i+1;
+    data->pairs[i>>1].size=MIN(devices[minor][i].size,
+			       devices[minor][i+1].size);
+    data->pairs[i>>1].last_access=0;
+    md_size[minor]+=data->pairs[i>>1].size;
+  }
+
+  data->nb_pairs=mddev->nb_dev >> 1;
+  data->last_read_pair=NULL;
+}
+
+static void create_strip_zones (int minor, struct md_dev *mddev)
+{
+  int i, j, c=0;
+  int current_offset=0;
+  struct pair *smallest_by_zone;
+  struct raid1_data *data=(struct raid1_data *) mddev->private;
+
+  create_pairs (minor, mddev);
+  
+  data->nr_strip_zones=1;
+  
+  for (i=1; i<data->nb_pairs; i++)
+  {
+    for (j=0; j<i; j++)
+      if (data->pairs[i].size==data->pairs[j].size)
+      {
+	c=1;
+	break;
+      }
+
+    if (!c)
+      data->nr_strip_zones++;
+
+    c=0;
+  }
+
+  data->strip_zone=kmalloc (sizeof(struct strip_zone)*data->nr_strip_zones,
+			      GFP_KERNEL);
+
+  data->smallest=NULL;
+  
+  for (i=0; i<data->nr_strip_zones; i++)
+  {
+    data->strip_zone[i].dev_offset=current_offset;
+    smallest_by_zone=NULL;
+    c=0;
+
+    for (j=0; j<data->nb_pairs; j++)
+      if (data->pairs[j].size>current_offset)
+      {
+	data->strip_zone[i].pair[c++]=data->pairs+j;
+	if (!smallest_by_zone ||
+	    smallest_by_zone->size > data->pairs[j].size)
+	  smallest_by_zone=data->pairs+j;
+      }
+
+    data->strip_zone[i].nb_dev=c;
+    data->strip_zone[i].size=(smallest_by_zone->size-current_offset)*c;
+
+    if (!data->smallest ||
+	data->smallest->size > data->strip_zone[i].size)
+      data->smallest=data->strip_zone+i;
+
+    data->strip_zone[i].zone_offset=i ? (data->strip_zone[i-1].zone_offset+
+					   data->strip_zone[i-1].size) : 0;
+    current_offset=smallest_by_zone->size;
+  }
+}
+
+static int raid1_run (int minor, struct md_dev *mddev)
+{
+  int current=0, i=0, size, zone0_size, nb_zone, min;
+  struct raid1_data *data;
+
+  for (i=0; i<(mddev->nb_dev & ~1); i+=2)
+    if (MAJOR(devices[minor][i].dev)!=MAJOR(devices[minor][i+1].dev) &&
+	(MAJOR(devices[minor][i].dev)==SCSI_DISK_MAJOR ||
+	 MAJOR(devices[minor][i+1].dev)==SCSI_DISK_MAJOR))
+    {
+      printk ("md%d : cannot mix SCSI/non-SCSI devices in a single RAID-1 pair\n", minor);
+      return -EINVAL;
+    }
+  
+  min=1 << FACTOR_SHIFT(FACTOR(mddev));
+
+  for (i=0; i<mddev->nb_dev; i++)
+    if (devices[minor][i].size<min)
+    {
+      printk ("md%d : cannot use %dk chunks on dev %s\n", minor, min,
+	      partition_name (devices[minor][i].dev));
+      return -EINVAL;
+    }
+  
+  MOD_INC_USE_COUNT;
+  
+  /* Resize devices according to the factor */
+  for (i=0; i<mddev->nb_dev; i++)
+    devices[minor][i].size &= ~((1 << FACTOR_SHIFT(FACTOR(mddev))) - 1);
+
+  mddev->private=kmalloc (sizeof (struct raid1_data), GFP_KERNEL);
+  data=(struct raid1_data *) mddev->private;
+  
+  create_strip_zones (minor, mddev);
+
+  nb_zone=data->nr_zones=
+    md_size[minor]/data->smallest->size +
+    (md_size[minor]%data->smallest->size ? 1 : 0);
+  
+  data->hash_table=kmalloc (sizeof (struct raid1_hash)*nb_zone, GFP_KERNEL);
+
+  size=data->strip_zone[current].size;
+
+  i=0;
+  while (current<data->nr_strip_zones)
+  {
+    data->hash_table[i].zone0=data->strip_zone+current;
+
+    if (size>=data->smallest->size)/* If we completly fill the slot */
+    {
+      data->hash_table[i++].zone1=NULL;
+      size-=data->smallest->size;
+
+      if (!size)
+      {
+	if (++current==data->nr_strip_zones) continue;
+	size=data->strip_zone[current].size;
+      }
+
+      continue;
+    }
+
+    if (++current==data->nr_strip_zones) /* Last dev, set unit1 as NULL */
+    {
+      data->hash_table[i].zone1=NULL;
+      continue;
+    }
+
+    zone0_size=size;		/* Here, we use a 2nd dev to fill the slot */
+    size=data->strip_zone[current].size;
+    data->hash_table[i++].zone1=data->strip_zone+current;
+    size-=(data->smallest->size - zone0_size);
+  }
+
+  return (0);
+}
+
+
+static int raid1_stop (int minor, struct md_dev *mddev)
+{
+  struct raid1_data *data=(struct raid1_data *) mddev->private;
+
+  kfree (data->hash_table);
+  kfree (data->strip_zone);
+  kfree (data);
+
+  MOD_DEC_USE_COUNT;  
+  return 0;
+}
+
+
+static int raid1_map (int minor, struct md_dev *mddev, struct request *req)
+{
+  struct raid1_data *data=(struct raid1_data *) mddev->private;
+  static struct raid1_hash *hash;
+  struct strip_zone *zone;
+  struct pair *tmp_pair;
+  int blk_in_chunk, factor, chunk, rw, valid_count, next, i;
+  long block, rblock;
+  struct buffer_head *bh;
+
+  factor=FACTOR(mddev);
+  block=req->sector >> 1;
+  hash=data->hash_table+(block/data->smallest->size);
+
+  if (block >= (hash->zone0->size +
+		hash->zone0->zone_offset))
+  {
+    if (!hash->zone1)
+      printk ("raid1_map : hash->zone1==NULL for block %ld\n", block);
+    zone=hash->zone1;
+  }
+  else
+    zone=hash->zone0;
+  
+  blk_in_chunk=block & ((1UL << FACTOR_SHIFT(factor)) - 1);
+  chunk=(block - zone->zone_offset) / (zone->nb_dev<<FACTOR_SHIFT(factor));
+  tmp_pair=zone->pair[(block >> FACTOR_SHIFT(factor)) % zone->nb_dev];
+
+  if (!(valid_count=((tmp_pair->dev[0]->invalid!=INVALID_ALWAYS) +
+		     (tmp_pair->dev[1]->invalid!=INVALID_ALWAYS))))
+    return (-1);                /* Can't do anything for you :( */
+
+  rblock=(chunk << FACTOR_SHIFT(factor)) + blk_in_chunk + zone->dev_offset;
+
+  if ((bh=req->bh))		/* This a buffer request */
+  {
+    rw=req->cmd;
+    end_redirect ();
+    
+    switch (rw)
+    {
+      case WRITE:
+      case WRITEA:		/* Shouldn't happend */
+
+      mark_buffer_dirty (bh, 0);/* re-dirtyfy buffer ! */
+      bh->b_lock=valid_count;
+
+      if (valid_count==2)
+      {
+	bh->b_rdev=tmp_pair->dev[0]->dev; /* can only store one dev... */
+	make_double_request (tmp_pair->dev[0]->dev, tmp_pair->dev[1]->dev,
+			     rblock*(bh->b_size>>9), rw, bh);
+      }
+      else
+	for (i=0; i<2; i++)
+	  if (tmp_pair->dev[i]->invalid!=INVALID_ALWAYS)
+	  {
+	    bh->b_rdev=tmp_pair->dev[i]->dev;
+	    make_request (tmp_pair->dev[i]->dev, rblock*(bh->b_size>>9), rw, bh);
+	  }
+      
+      break;
+
+      case READ:
+      case READA:
+
+      if (tmp_pair && tmp_pair==data->last_read_pair &&
+	  ((rblock+1)==data->last_read_block ||
+	   rblock==(data->last_read_block+1)))
+	next=tmp_pair->last_access;
+      else
+	next=tmp_pair->last_access ? 0 : 1;
+      
+      if (tmp_pair->dev[next]->invalid==INVALID_NEXT)
+      {
+	if (tmp_pair->dev[next==0]->invalid==INVALID_NEXT)
+	  md_valid_device (minor, tmp_pair->dev[next]->dev, VALID);
+	else
+	{
+	  md_valid_device (minor, tmp_pair->dev[next]->dev, VALID);
+	  next=tmp_pair->last_access;
+	}
+      }
+      
+      bh->b_rdev=tmp_pair->dev[next]->dev;
+      make_request (tmp_pair->dev[next]->dev, rblock*(bh->b_size>>9), rw, bh);
+      tmp_pair->last_access=next;
+      data->last_read_pair=tmp_pair;
+      data->last_read_block=rblock;
+    }
+  }
+  else				/* This is a paging request */
+  {
+    printk ("md%d : paging NOT supported on RAID-1 device\n", minor);
+    return (-1);		/* request killer */
+  }
+
+  return 0;
+}
+
+
+static int raid1_status (char *page, int minor, struct md_dev *mddev)
+{
+  int sz=0;
+#undef MD_DEBUG
+#ifdef MD_DEBUG
+  int j, k;
+  struct raid1_data *data=(struct raid1_data *) mddev->private;
+  
+  sz+=sprintf (page+sz, "      ");
+  for (j=0; j<data->nr_zones; j++)
+  {
+    sz+=sprintf (page+sz, "[z%d",
+		 data->hash_table[j].zone0-data->strip_zone);
+    if (data->hash_table[j].zone1)
+      sz+=sprintf (page+sz, "/z%d] ",
+		   data->hash_table[j].zone1-data->strip_zone);
+    else
+      sz+=sprintf (page+sz, "] ");
+  }
+  
+  sz+=sprintf (page+sz, "\n");
+  
+  for (j=0; j<data->nr_strip_zones; j++)
+  {
+    sz+=sprintf (page+sz, "      z%d=[", j);
+    for (k=0; k<data->strip_zone[j].nb_dev; k++)
+    {
+      sz+=sprintf (page+sz, "%s+",
+		   partition_name(data->strip_zone[j].pair[k]->dev[0]->dev));
+      sz+=sprintf (page+sz, "%s/",
+		   partition_name(data->strip_zone[j].pair[k]->dev[1]->dev));
+    }
+
+    sz--;
+    sz+=sprintf (page+sz, "] zo=%d do=%d s=%d\n",
+		 data->strip_zone[j].zone_offset,
+		 data->strip_zone[j].dev_offset,
+		 data->strip_zone[j].size);
+  }
+#endif
+  return sz;
+}
+
+
+static struct md_personality raid1_personality=
+{
+  "raid1",
+  raid1_map,
+  raid1_run,
+  raid1_stop,
+  raid1_status,
+  NULL,				/* no ioctls */
+  -1
+};
+
+
+#ifndef MODULE
+
+void raid1_init (void)
+{
+  register_md_personality (RAID1, &raid1_personality);
+}
+
+#else
+
+char kernel_version[]= UTS_RELEASE;
+
+int init_module (void)
+{
+  return (register_md_personality (RAID1, &raid1_personality));
+}
+
+void cleanup_module (void)
+{
+  if (MOD_IN_USE)
+    printk ("md raid1 : module still busy...\n");
+  else
+    unregister_md_personality (RAID1);
+}
+
+#endif
diff -ru --new-file /usr/src/linux/drivers/scsi/scsi.c linux/drivers/scsi/scsi.c
--- /usr/src/linux/drivers/scsi/scsi.c	Fri Jun 30 20:01:40 1995
+++ linux/drivers/scsi/scsi.c	Fri Jun 30 18:59:06 1995
@@ -778,6 +778,9 @@
     if (req && req->dev <= 0)
 	panic("Invalid device in request_queueable");
     
+    /* See if the request is queuable when shared on a RAID-1 device */
+    if (req && req->shared_count) return NULL;
+    
     SCpnt =  device->host->host_queue;
     while(SCpnt){
 	if(SCpnt->target == device->id && SCpnt->lun == device->lun 
@@ -809,7 +812,7 @@
 	}
 	if(req->nr_sectors && bh && bh->b_reqnext){  /* Any leftovers? */
 	    SCpnt->request.bhtail = bh;
-	    req->bh = bh->b_reqnext; /* Divide request */
+	    req->bh = bh->b_reqnext ? bh->b_reqnext : bh->b_reqshared; /* Divide request */
 	    bh->b_reqnext = NULL;
 	    bh = req->bh;
 	    
@@ -874,6 +877,9 @@
     /* See if this request has already been queued by an interrupt routine */
     if (req && (dev = req->dev) <= 0) return NULL;
     
+    /* See if the request is queuable when shared on a RAID-1 device */
+    if (req && req->shared_count) return NULL;
+
     host = device->host;
     
     if (intr_count && SCSI_BLOCK(host)) return NULL;
@@ -927,7 +933,7 @@
 		}
 		if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */
 		    SCpnt->request.bhtail = bh;
-		    req->bh = bh->b_reqnext; /* Divide request */
+		    req->bh = bh->b_reqnext ? bh->b_reqnext : bh->b_reqshared; /* Divide request */
 		    bh->b_reqnext = NULL;
 		    bh = req->bh;
 		    /* Now reset things so that req looks OK */
diff -ru --new-file /usr/src/linux/drivers/scsi/scsi.h linux/drivers/scsi/scsi.h
--- /usr/src/linux/drivers/scsi/scsi.h	Fri Jun 30 20:01:40 1995
+++ linux/drivers/scsi/scsi.h	Fri Jun 30 19:00:44 1995
@@ -574,8 +574,24 @@
 	    req->bh = bh->b_reqnext;
 	    req->nr_sectors -= bh->b_size >> 9;
 	    req->sector += bh->b_size >> 9;
-	    bh->b_reqnext = NULL;
-	    bh->b_uptodate = uptodate;
+	    /* Only remove from buffer list if last access
+	       for this buffer, since raid-1 does 2 write
+	       access for a single buffer */
+	    if (bh->b_lock==1)
+		    bh->b_reqnext = NULL;
+	    else
+	    {
+		    if (!bh->b_reqnext && bh->b_reqshared)
+			    bh->b_reqnext=bh->b_reqshared;
+		    if (bh->b_sister_req && bh->b_sister_req->shared_count)
+		    {
+			    bh->b_sister_req->shared_count--;
+			    bh->b_sister_req = NULL;
+		    }
+	    }
+	    if (!(bh->b_uptodate = uptodate)) /* Strange, bh->b_req=1
+                                             if !b_uptodate...   */
+		    bh->b_rw |= 0x80;
 	    unlock_buffer(bh);
 	    sectors -= bh->b_size >> 9;
 	    if ((bh = req->bh) != NULL) {
diff -ru --new-file /usr/src/linux/fs/buffer.c linux/fs/buffer.c
--- /usr/src/linux/fs/buffer.c	Fri Jun 30 12:05:19 1995
+++ linux/fs/buffer.c	Fri Jun 30 18:50:20 1995
@@ -723,6 +723,7 @@
 	bh->b_flushtime = 0;
 	bh->b_req=0;
 	bh->b_dev=dev;
+	bh->b_rw&=0x7f;
 	bh->b_blocknr=block;
 	insert_into_queues(bh);
 	return bh;
@@ -904,6 +905,7 @@
 
 	for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
 		bh->b_next_free = unused_list;	/* only make link */
+		bh->b_rw &= 0x7f;
 		unused_list = bh++;
 	}
 }
@@ -921,6 +923,7 @@
 	bh->b_data = NULL;
 	bh->b_size = 0;
 	bh->b_req = 0;
+	bh->b_rw &= 0x7f;
 	return bh;
 }
 
@@ -945,6 +948,7 @@
 		head = bh;
 		bh->b_data = (char *) (page+offset);
 		bh->b_size = size;
+		bh->b_rw &= 0x7f;
 		bh->b_dev = 0xffff;  /* Flag as unused */
 	}
 	return head;
@@ -1069,6 +1073,7 @@
 		bh->b_uptodate = 0;
 		bh->b_req = 0;
 		bh->b_dev = dev;
+		bh->b_rw &= 0x7f;
 		bh->b_blocknr = *(p++);
 		bh->b_list = BUF_CLEAN;
 		nr_buffers++;
@@ -1501,6 +1506,7 @@
 		remove_from_queues(p);
 		p->b_dev=dev;
 		p->b_uptodate = 0;
+		p->b_rw &= 0x7f;
 		p->b_req = 0;
 		p->b_blocknr=starting_block++;
 		insert_into_queues(p);
@@ -1582,6 +1588,7 @@
 		bh->b_req = 0;
 		bh->b_dev = dev;
 		bh->b_list = BUF_CLEAN;
+		bh->b_rw &= 0x7f;
 		bh->b_blocknr = block++;
 		nr_buffers++;
 		nr_buffers_size[isize]++;
diff -ru --new-file /usr/src/linux/fs/proc/array.c linux/fs/proc/array.c
--- /usr/src/linux/fs/proc/array.c	Fri Jun 30 12:05:22 1995
+++ linux/fs/proc/array.c	Fri Jun 30 18:50:20 1995
@@ -42,6 +42,10 @@
 #include <linux/config.h>
 #include <linux/mm.h>
 
+#ifdef CONFIG_BLK_DEV_MD
+#include <linux/md.h>
+#endif
+
 #include <asm/segment.h>
 #include <asm/pgtable.h>
 #include <asm/io.h>
@@ -753,6 +757,11 @@
 
 		case PROC_IOPORTS:
 			return get_ioport_list(page);
+
+#ifdef CONFIG_BLK_DEV_MD
+		case PROC_MD:
+			return get_md_status(page);
+#endif
 	}
 	return -EBADF;
 }
diff -ru --new-file /usr/src/linux/fs/proc/root.c linux/fs/proc/root.c
--- /usr/src/linux/fs/proc/root.c	Fri Jun 30 20:01:44 1995
+++ linux/fs/proc/root.c	Fri Jun 30 18:50:20 1995
@@ -81,6 +81,9 @@
 #ifdef CONFIG_PROFILE
 	{ PROC_PROFILE,		7, "profile"},
 #endif
+#ifdef CONFIG_BLK_DEV_MD
+	{ PROC_MD,              6, "mdstat"},
+#endif
 };
 
 #define NR_ROOT_DIRENTRY ((sizeof (root_dir))/(sizeof (root_dir[0])))
diff -ru --new-file /usr/src/linux/include/linux/blkdev.h linux/include/linux/blkdev.h
--- /usr/src/linux/include/linux/blkdev.h	Tue Dec 27 07:37:13 1994
+++ linux/include/linux/blkdev.h	Fri Jun 30 18:50:20 1995
@@ -15,6 +15,7 @@
 	int dev;		/* -1 if no request */
 	int cmd;		/* READ or WRITE */
 	int errors;
+	int shared_count;
 	unsigned long sector;
 	unsigned long nr_sectors;
 	unsigned long current_nr_sectors;
@@ -39,6 +40,13 @@
 extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
 extern struct wait_queue * wait_for_request;
 extern void resetup_one_dev(struct gendisk *dev, int drive);
+
+/* md needs those functions to requeue requests */
+extern void add_request(struct blk_dev_struct * dev, struct request * req);
+extern void make_request(dev_t dev, unsigned int sector,
+			 int rw, struct buffer_head * bh);
+extern void make_double_request(dev_t dev0, dev_t dev1, unsigned int sector,
+				int rw, struct buffer_head * bh);
 
 extern int * blk_size[MAX_BLKDEV];
 
diff -ru --new-file /usr/src/linux/include/linux/fs.h linux/include/linux/fs.h
--- /usr/src/linux/include/linux/fs.h	Fri Jun 30 12:05:31 1995
+++ linux/include/linux/fs.h	Fri Jun 30 18:50:20 1995
@@ -132,7 +132,9 @@
 	unsigned long b_size;		/* block size */
 	unsigned long b_blocknr;	/* block number */
 	dev_t b_dev;			/* device (0 = free) */
+	dev_t b_rdev;		        /* Real device */
 	unsigned short b_count;		/* users using this block */
+	char b_rw;		        /* Used last for read or write */
 	unsigned char b_uptodate;
 	unsigned char b_dirt;		/* 0-clean,1-dirty */
 	unsigned char b_lock;		/* 0 - ok, 1 -locked */
@@ -149,6 +151,8 @@
 	struct buffer_head * b_next_free;
 	struct buffer_head * b_this_page;	/* circular list of buffers in one page */
 	struct buffer_head * b_reqnext;		/* request queue */
+	struct buffer_head * b_reqshared;       /* shared request queue */
+	struct request * b_sister_req;          /* pointer to sister request */
 };
 
 #include <linux/pipe_fs_i.h>
@@ -449,8 +453,7 @@
 
 extern inline void mark_buffer_clean(struct buffer_head * bh)
 {
-  if(bh->b_dirt) {
-    bh->b_dirt = 0;
+  if(bh->b_dirt && !--bh->b_dirt) {
     if(bh->b_list == BUF_DIRTY) refile_buffer(bh);
   }
 }
diff -ru --new-file /usr/src/linux/include/linux/linear.h linux/include/linux/linear.h
--- /usr/src/linux/include/linux/linear.h
+++ linux/include/linux/linear.h	Fri Jun 30 18:50:20 1995
@@ -0,0 +1,17 @@
+
+#ifndef _LINEAR_H
+#define _LINEAR_H
+
+struct linear_hash
+{
+  struct real_dev *dev0, *dev1;
+};
+
+struct linear_data
+{
+  struct linear_hash *hash_table; /* Dynamically allocated */
+  struct real_dev *smallest;
+  int nr_zones;
+};
+
+#endif
diff -ru --new-file /usr/src/linux/include/linux/locks.h linux/include/linux/locks.h
--- /usr/src/linux/include/linux/locks.h	Wed Dec  1 13:44:15 1993
+++ linux/include/linux/locks.h	Fri Jun 30 18:50:21 1995
@@ -1,6 +1,11 @@
 #ifndef _LINUX_LOCKS_H
 #define _LINUX_LOCKS_H
 
+#include <linux/config.h>
+#ifdef CONFIG_BLK_DEV_MD
+#include <linux/md.h>
+#endif
+
 /*
  * Buffer cache locking - note that interrupts may only unlock, not
  * lock buffers.
@@ -9,8 +14,52 @@
 
 extern inline void wait_on_buffer(struct buffer_head * bh)
 {
-	if (bh->b_lock)
-		__wait_on_buffer(bh);
+  /*
+     maz 03/27/95 : Started coding error recovery. The idea is to disable
+     the faulting device (at least for the next access) and to re-emit
+     the request using redundancy (if redundancy is not avaible, fail the
+     usual way).
+   */
+        while (1)
+	{
+	  if (bh->b_lock)
+	    __wait_on_buffer(bh);
+
+	  if (bh->b_uptodate ||
+	      MAJOR(bh->b_dev)!=MD_MAJOR ||
+	      !(bh->b_rw & 0x80))
+	  {
+	    bh->b_rw &= 0x7f;
+	    return;
+	  }
+	  
+#ifdef CONFIG_BLK_DEV_MD
+	  if (!md_can_reemit (MINOR(bh->b_dev)))
+	  {
+	    printk ("Cannot reemit #1 blk %ld dev %04x rw %d\n",
+		    bh->b_blocknr, bh->b_dev, bh->b_rw);
+	    bh->b_rw &= 0x7f;
+	    return;
+	  }
+
+	  md_valid_device (MINOR(bh->b_dev), bh->b_rdev, INVALID);
+	  if (!md_can_reemit (MINOR(bh->b_dev)))
+	  {
+	    printk ("Cannot reemit #2 blk %ld dev %04x rw %d\n",
+		    bh->b_blocknr, bh->b_dev, bh->b_rw);
+	    bh->b_rw &= 0x7f;
+	    return;
+	  }
+
+	  /* Right here, we know we can reemit the request.
+	     So call ll_rw_block one more time. */
+	  printk ("Reemit blk %ld dev %04x rw %d\n",
+		  bh->b_blocknr, bh->b_dev, bh->b_rw);
+	  if (bh->b_rw==WRITE || bh->b_rw==WRITEA)
+	    mark_buffer_dirty (bh, 0);
+	  ll_rw_block (bh->b_rw & 0x7f, 1, &bh);
+#endif
+	}
 }
 
 extern inline void lock_buffer(struct buffer_head * bh)
@@ -22,8 +71,21 @@
 
 extern inline void unlock_buffer(struct buffer_head * bh)
 {
-	bh->b_lock = 0;
-	wake_up(&bh->b_wait);
+  /*
+   * bh->b_lock now behaves as a counter instead
+   * of a simple flag. So we can have multiple
+   * locks on a single buffer. MD needs it for RAID-1
+   */
+     
+  if (bh->b_lock && --bh->b_lock)
+  {
+/*    printk ("Down lock blk %ld\n", bh->b_blocknr);*/
+    return;
+  }
+
+/*  if (bh->b_rdev!=bh->b_dev)
+    printk ("Unlocking blk %ld\n", bh->b_blocknr);*/
+  wake_up(&bh->b_wait);
 }
 
 /*
diff -ru --new-file /usr/src/linux/include/linux/major.h linux/include/linux/major.h
--- /usr/src/linux/include/linux/major.h	Tue Apr 11 07:03:29 1995
+++ linux/include/linux/major.h	Fri Jun 30 18:50:21 1995
@@ -26,7 +26,7 @@
  *  6 - lp
  *  7 - /dev/vcs*
  *  8 -                        scsi disk
- *  9 - scsi tape
+ *  9 - scsi tape              multiple devices driver
  * 10 - mice
  * 11 -                        scsi cdrom
  * 12 - qic02 tape
@@ -60,6 +60,7 @@
 #define VCS_MAJOR	7
 #define SCSI_DISK_MAJOR	8
 #define SCSI_TAPE_MAJOR	9
+#define MD_MAJOR        9
 #define MOUSE_MAJOR	10
 #define SCSI_CDROM_MAJOR 11
 #define QIC02_TAPE_MAJOR 12
diff -ru --new-file /usr/src/linux/include/linux/md.h linux/include/linux/md.h
--- /usr/src/linux/include/linux/md.h
+++ linux/include/linux/md.h	Fri Jun 30 18:50:21 1995
@@ -0,0 +1,132 @@
+
+/*
+   md.h : Multiple Devices driver for Linux
+          Copyright (C) 1994, 1995 Marc ZYNGIER
+	  <zyngier@amertume.ufr-info-p7.ibp.fr> or
+	  <maz@gloups.fdn.fr>
+	  
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2, or (at your option)
+   any later version.
+   
+   You should have received a copy of the GNU General Public License
+   (for example /usr/src/linux/COPYING); if not, write to the Free
+   Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.  
+*/
+
+#ifndef _MD_H
+#define _MD_H
+
+#include <sys/types.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/mm.h>
+#include <linux/blkdev.h>
+
+#define MD_VERSION "v0.27"
+
+/* ioctls */
+#define REGISTER_DEV (MD_MAJOR<<8 | 1)
+#define START_MD     (MD_MAJOR<<8 | 2)
+#define STOP_MD      (MD_MAJOR<<8 | 3)
+#define MD_INVALID   (MD_MAJOR<<8 | 4)
+#define MD_VALID     (MD_MAJOR<<8 | 5)
+
+/*
+   personalities :
+   Byte 0 : Chunk size factor
+   Byte 1 : Fault tolerance count for each physical device
+            (   0 means no fault tolerance,
+             0xFF means always tolerate faults)
+   Byte 2 : Personality
+   Byte 3 : Reserved.
+ */
+
+#define FAULT_SHIFT       8
+#define PERSONALITY_SHIFT 16
+
+#define FACTOR_MASK       0xFFUL
+#define FAULT_MASK        0xFF00UL
+#define PERSONALITY_MASK  0xFF0000UL
+
+#define MD_RESERVED       0	/* Not used by now */
+#define LINEAR            (1UL << PERSONALITY_SHIFT)
+#define STRIPED           (2UL << PERSONALITY_SHIFT)
+#define STRIPPED          STRIPED /* Long lasting spelling mistake... */
+#define RAID0             STRIPED
+#define RAID1             (3UL << PERSONALITY_SHIFT)
+#define RAID5             (4UL << PERSONALITY_SHIFT)
+#define MAX_PERSONALITY   5
+
+#ifdef __KERNEL__
+
+#define MAX_REAL     8
+#define MAX_MD_DEV   4
+
+#define FACTOR(a)         ((a)->repartition & FACTOR_MASK)
+#define MAX_FAULT(a)      (((a)->repartition & FAULT_MASK)>>8)
+#define PERSONALITY(a)    ((a)->repartition & PERSONALITY_MASK)
+
+#define FACTOR_SHIFT(a) (PAGE_SHIFT + (a) - 10)
+
+/* Invalidation modes */
+#define VALID          0
+#define INVALID_NEXT   1
+#define INVALID_ALWAYS 2
+#define INVALID        3	/* Only useful to md_valid_device */
+
+struct real_dev
+{
+  dev_t dev;			/* Device number */
+  int size;			/* Device size (in blocks) */
+  int offset;			/* Real device offset (in blocks) in md dev
+				   (only used in linear mode) */
+  struct inode *inode;		/* Lock inode */
+  int fault_count;		/* Fault counter for invalidation */
+  int invalid;			/* Indicate if the device is disabled :
+				   VALID          - valid
+				   INVALID_NEXT   - disabled for next access
+				   INVALID_ALWAYS - permanently disabled
+				   (for redundancy modes only) */
+};
+
+struct md_dev;
+
+struct md_personality
+{
+  char *name;
+  int (*map)(int minor, struct md_dev *md_dev, struct request *req);
+  int (*run)(int minor, struct md_dev *md_dev);
+  int (*stop)(int minor, struct md_dev *md_dev);
+  int (*status)(char *page, int minor, struct md_dev *md_dev);
+  int (*ioctl)(struct inode *inode, struct file *file,
+	       unsigned int cmd, unsigned long arg);
+  int max_invalid_dev;
+};
+
+struct md_dev
+{
+  struct md_personality *pers;
+  int repartition;
+  int invalid_dev_count;
+  int busy;
+  int nb_dev;
+  void *private;
+};
+
+extern struct real_dev devices[MAX_MD_DEV][MAX_REAL];
+extern struct md_dev md_dev[MAX_MD_DEV];
+extern int md_size[MAX_MD_DEV];
+extern struct request *md_cur_req;
+
+extern int get_md_status (char *page);
+extern char *partition_name (dev_t dev);
+extern int md_valid_device (int minor, dev_t dev, int mode);
+extern int md_can_reemit (int minor);
+
+extern int register_md_personality (int p_num, struct md_personality *p);
+extern int unregister_md_personality (int p_num);
+
+#endif __KERNEL__
+#endif _MD_H
diff -ru --new-file /usr/src/linux/include/linux/proc_fs.h linux/include/linux/proc_fs.h
--- /usr/src/linux/include/linux/proc_fs.h	Fri Jun 30 20:01:46 1995
+++ linux/include/linux/proc_fs.h	Fri Jun 30 18:50:21 1995
@@ -32,7 +32,10 @@
 	PROC_KSYMS,
 	PROC_DMA,	
 	PROC_IOPORTS,
-	PROC_PROFILE /* whether enabled or not */
+	PROC_PROFILE, /* whether enabled or not */
+#ifdef CONFIG_BLK_DEV_MD
+	PROC_MD
+#endif
 };
 
 enum pid_directory_inos {
diff -ru --new-file /usr/src/linux/include/linux/raid0.h linux/include/linux/raid0.h
--- /usr/src/linux/include/linux/raid0.h
+++ linux/include/linux/raid0.h	Fri Jun 30 18:50:21 1995
@@ -0,0 +1,28 @@
+
+#ifndef _RAID0_H
+#define _RAID0_H
+
+struct strip_zone
+{
+  int zone_offset;		/* Zone offset in md_dev */
+  int dev_offset;		/* Zone offset in real dev */
+  int size;			/* Zone size */
+  int nb_dev;			/* Number of devices attached to the zone */
+  struct real_dev *dev[MAX_REAL]; /* Devices attached to the zone */
+};
+
+struct raid0_hash
+{
+  struct strip_zone *zone0, *zone1;
+};
+
+struct raid0_data
+{
+  struct raid0_hash *hash_table; /* Dynamically allocated */
+  struct strip_zone *strip_zone; /* This one too */
+  int nr_strip_zones;
+  struct strip_zone *smallest;
+  int nr_zones;
+};
+
+#endif
diff -ru --new-file /usr/src/linux/include/linux/raid1.h linux/include/linux/raid1.h
--- /usr/src/linux/include/linux/raid1.h
+++ linux/include/linux/raid1.h	Fri Jun 30 18:50:21 1995
@@ -0,0 +1,41 @@
+
+#ifndef _RAID1_H
+#define _RAID1_H
+
+#define MIN(a,b) (((a)<(b)) ? (a) : (b))
+
+struct pair
+{
+  struct real_dev *dev[2];
+  int size;			/* size=min(dev[0].size, dev[1].size) */
+  int last_access;
+};
+
+struct strip_zone
+{
+  int zone_offset;		/* Zone offset in md_dev */
+  int dev_offset;		/* Zone offset in real dev */
+  int size;			/* Zone size */
+  int nb_dev;			/* Number of devices attached to the zone */
+  struct pair *pair[MAX_REAL/2];/* Device pair attached to the zone */
+};
+
+struct raid1_hash
+{
+  struct strip_zone *zone0, *zone1;
+};
+
+struct raid1_data
+{
+  struct raid1_hash *hash_table; /* Dynamically allocated */
+  struct strip_zone *strip_zone; /* This one too */
+  struct pair pairs[MAX_REAL/2];
+  int nb_pairs;
+  int nr_strip_zones;
+  struct strip_zone *smallest;
+  int nr_zones;
+  struct pair *last_read_pair;	/* Last pair used for a read access */
+  long last_read_block;		/* Last block used for a read access */
+};
+
+#endif
