### RAID2 hook r2_am () { if eval "[ -z \"\$${1}\" ]" ; then modprobe -q "${1}" >/dev/null 2>&1 eval "${1}=1" fi } run_hook () { ## 1) discover which modules are actually needed fil=/etc/mdadm.conf if [ ! -f "${fil}" ] ; then # if user didn't supply proper mdadm.conf, create one; # raid will be assembled as a partitionable one in this case; # user SHOULD provide proper mdadm.conf for their system and # rebuild initramfs afterwards. echo "CREATE mode=0660 owner=0 group=6 auto=part16 symlinks=yes" >"${fil}" mdadm -Es >>"${fil}" fi while read lev ; do [ "${lev%% *}" != "ARRAY" ] && continue lev="${lev##*level=}" lev="${lev%% *}" case "${lev}" in raid[01] ) r2_am "${lev}" ;; raid[456] ) r2_am raid456 ;; linear|multipath|faulty ) r2_am "${lev}" ;; esac done <"${fil}" ## 2) modules ready, now assembly # things to watch out for: # raid-partitions require access to device, to make linux actually # notice exisitng partitions. Without that, there will be no uevents # right after (dis)assembly, and lvm using sysfs will not check them # for lvm volumes (unless sysfs_scan = 0). The easiest thing to do # that, is just to run mdadm -As twice. # Neil knows about it, and according to him, current mdadm (2.6.4) # doesn't do mentioned access in one case. It should get fixed in # one of the future versions. If udev rules access device later # while processing the rules, i.e. through vol_id helper, # it would "self" trigger partitions uevents. # Either way, it's safer to just do it explicitely. mdadm -As >/dev/null 2>&1 mdadm -As >/dev/null 2>&1 } ### RAID2 install install () { MODULES=" $(checked_modules "drivers/md/*" | grep -v "dm-") " BINARIES="/sbin/mdadm" FILES="" SCRIPT="raid2" add_dir "/dev/md" add_file "/etc/mdadm.conf" } help () { cat</dev/null)" ]; then - # udev won't create these md nodes, so we do it ourselves - for dev in $(grep ^ARRAY /etc/mdadm.conf | awk '{print $2}'); do - path=$(echo $dev | sed 's|/[^/]*$||') - node=$(echo $dev | sed "s|^$path/||") - minor=$(echo $node | sed 's|^[^0-9]*||') - [ ! -e $path/$node ] && /bin/mknod $path/$node b 9 $minor - done - status "Activating RAID arrays" /sbin/mdadm --assemble --scan +if grep -q '^ARRAY' /etc/mdadm.conf 2>/dev/null ; then + status "Activating RAID arrays" /sbin/mdadm -As fi # Unmount and free up the old initrd if it exists