/src/sys/dev/raidframe/ |
rf_disks.c | 59 * rf_disks.c -- code to perform operations on the actual disks 97 * initialize the disks comprising the array 99 * We want the spare disks to have regular row,col numbers so that we can 101 * throughout that the array contains numRow by numCol _non-spare_ disks, so 113 RF_RaidDisk_t *disks; local in function:rf_ConfigureDisks 126 disks = raidPtr->Disks; 132 &disks[c], c); 137 if (disks[c].status == rf_ds_optimal) { 144 disks[c].status = rf_ds_failed 243 RF_RaidDisk_t *disks; local in function:rf_ConfigureSpareDisks 362 RF_RaidDisk_t *disks; local in function:rf_AutoConfigureDisks 990 RF_RaidDisk_t *disks; local in function:rf_add_hot_spare [all...] |
rf_reconstruct.c | 256 RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); 268 if (raidPtr->Disks[scol].status == rf_ds_spare) { 269 spareDiskPtr = &raidPtr->Disks[scol]; 309 raidPtr->Disks[col].partitionSize); 312 disks, so at this point the parity is known to be 334 raidPtr->Disks[col].status = rf_ds_failed; 386 if (raidPtr->Disks[col].status != rf_ds_failed) { 389 raidPtr->Disks[col].status = rf_ds_failed; 410 /* RF_ASSERT(raidPtr->Disks[col].status == rf_ds_failed); */ 429 raidPtr->Disks[col].devname) [all...] |
rf_paritymap.c | 363 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 380 if (raidPtr->Disks[raidPtr->numCol+col].status == rf_ds_used_spare) { 640 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 709 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 725 be any spare disks at this point. */ 766 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 773 if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare) 791 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) 803 if (raidPtr->Disks[raidPtr->numCol+col].status != rf_ds_used_spare)
|
rf_netbsdkintf.c | 269 * For example: with a stripe width of 64 blocks (32k) and 5 disks, 278 * disks to participate -- 4 * 32K/disk == 128K again. 460 const char *devname = r->Disks[col].devname; 464 dkwedge_get_parent_name(r->Disks[col].dev); 565 const char *devname = r->Disks[col].devname; 578 parent = dkwedge_get_parent_name(r->Disks[col].dev); 874 if (raidPtr->Disks[c].status == rf_ds_optimal) { 890 if (raidPtr->Disks[sparecol].status == rf_ds_used_spare) { 894 if (raidPtr->Disks[j].spareCol == sparecol) { 934 bdev = bdevsw_lookup(raidPtr->Disks[dumpto].dev) [all...] |
rf_driver.c | 444 printf(" %s", raidPtr->Disks[col].devname); 445 if (RF_DEAD_DISK(raidPtr->Disks[col].status)) { 729 raidPtr->Disks[col].status = rf_ds_dist_spared; 752 if (raidPtr->Disks[fcol].status != rf_ds_failed) { 757 raidPtr->Disks[fcol].status = rf_ds_failed; 768 raidPtr->Disks[fcol].auto_configured); 776 raidPtr->Disks[fcol].auto_configured = 0;
|
rf_compat50.c | 202 rf_disk_to_disk50(&d_cfg->devs[j], &raidPtr->Disks[j]); 205 rf_disk_to_disk50(&d_cfg->spares[i], &raidPtr->Disks[j]);
|
rf_parityscan.c | 343 if (raidPtr->Disks[pda->col].status == rf_ds_reconstructing) { 368 RF_RowCol_t spCol = raidPtr->Disks[pda->col].spareCol; 375 if (RF_DEAD_DISK(raidPtr->Disks[pda->col].status)) 391 * disks. 395 * all disks. This is because the new data intended for the failed
|
rf_decluster.c | 352 RF_ASSERT(raidPtr->Disks[*col].status == rf_ds_reconstructing || raidPtr->Disks[*col].status == rf_ds_dist_spared); 412 RF_ASSERT(raidPtr->Disks[*col].status == rf_ds_reconstructing || raidPtr->Disks[*col].status == rf_ds_dist_spared); 430 /* returns an array of ints identifying the disks that comprise the stripe containing the indicated address.
|
rf_raid.h | 100 /* an exception is Disks[][].status, which requires locking when it is 104 u_int numCol; /* number of columns of disks, typically == # 105 * of disks/rank */ 106 u_int numSpare; /* number of spare disks */ 118 RF_RaidDisk_t *Disks; /* all information related to physical disks */
|
rf_diskqueue.c | 257 raidPtr->Disks[c].dev, 269 raidPtr->Disks[raidPtr->numCol + r].dev, 280 * I/Os outstanding on the physical disks when possible.
|
rf_pqdegdags.c | 250 /* skip over dead disks */ 251 if (RF_DEAD_DISK(raidPtr->Disks[npda.col].status)) 331 /* skip over dead disks */ 332 if (RF_DEAD_DISK(raidPtr->Disks[npda.col].status))
|
rf_map.c | 89 RF_RaidDisk_t *disks = raidPtr->Disks; local in function:rf_MapAccess 184 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 0); 211 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1); 237 rf_ASMCheckStatus(raidPtr, pda_p, asm_p, disks, 1); 238 rf_ASMCheckStatus(raidPtr, pda_q, asm_p, disks, 1); 272 RF_RaidDisk_t *disks = raidPtr->Disks; local in function:rf_MarkFailuresInASMList 284 if (RF_DEAD_DISK(disks[pda->col].status)) { 291 if (pda && RF_DEAD_DISK(disks[pda->col].status)) [all...] |
rf_raid5.c | 71 /* the stripe identifier must identify the disks in each stripe, IN 179 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n"); 238 failedPDA->col = raidPtr->Disks[fcol].spareCol;
|
rf_dagutils.c | 1235 * n = number of disks in array 1249 for (f = 0; ((!RF_DEAD_DISK(raidPtr->Disks[f].status)) && (f < n)); f++) 1322 RF_RaidDisk_t *disks = raidPtr->Disks; local in function:rf_SelectMirrorDiskIdle 1345 if (RF_DEAD_DISK(disks[colMirror].status)) { 1348 if (RF_DEAD_DISK(disks[colData].status)) { 1385 * the data and parity disks are laid out identically. 1395 RF_RaidDisk_t *disks = raidPtr->Disks; local in function:rf_SelectMirrorDiskPartition 1403 if (RF_DEAD_DISK(disks[colMirror].status)) [all...] |
rf_evenodd_dagfuncs.c | 272 * This function is used in case where one data disk failed and both redundant disks 284 * This function is to encode one sector in one of the data disks to the E disk. 688 2. determine the column numbers of the two failed disks. 710 /* skip over dead disks */ 711 if (RF_DEAD_DISK(raidPtr->Disks[npda.col].status)) 724 * disks. */ 883 /* skip over dead disks */ 884 if (RF_DEAD_DISK(raidPtr->Disks[npda.col].status))
|
rf_raid1.c | 88 /* this implementation of RAID level 1 uses one row of numCol disks 136 * returns a list of disks for a given redundancy group 191 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n"); 219 failedPDA->col = raidPtr->Disks[fcol].spareCol;
|
rf_dagdegrd.c | 840 /* skip over dead disks */ 841 if (RF_DEAD_DISK(raidPtr->Disks[pda_p->col].status)) 986 /* skip over dead disks */ 987 if (RF_DEAD_DISK(raidPtr->Disks[pda_p->col].status))
|
rf_paritylogging.c | 105 /* the stripe identifier must identify the disks in each stripe, IN 777 /* given a logical RAID address, determine the participating disks in 825 RF_ERRORMSG("Multiple disks failed in a single group! Aborting I/O operation.\n"); 879 failedPDA->col = raidPtr->Disks[fcol].spareCol;
|
rf_dagdegwr.c | 665 /* skip over dead disks */ 666 if (RF_DEAD_DISK(raidPtr->Disks[pda_p->col].status)) 740 * (not incl. failed disks) Wp = Write P (could be two) Wq = Write Q
|
/src/usr.sbin/sysinst/ |
msg.pm.en | 65 message avdisks {Available disks:} 68 {This is the extended partition manager. All disks, partitions, etc. are 81 message raid_disks_fmt {Disks} 97 message raid_disks {Disks in RAID:} 122 message lvm_disks {Disks in VG}
|
/src/usr.sbin/sysinst/arch/i386/ |
msg.md.de | 100 {Diese Festplatte entspricht den folgenden BIOS Disks:
|