lvmcache.c revision 1.1.1.1.2.1 1 /* $NetBSD: lvmcache.c,v 1.1.1.1.2.1 2009/05/13 18:52:42 jym Exp $ */
2
3 /*
4 * Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
6 *
7 * This file is part of LVM2.
8 *
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
12 *
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18 #include "lib.h"
19 #include "lvmcache.h"
20 #include "toolcontext.h"
21 #include "dev-cache.h"
22 #include "locking.h"
23 #include "metadata.h"
24 #include "filter.h"
25 #include "memlock.h"
26 #include "str_list.h"
27 #include "format-text.h"
28 #include "format_pool.h"
29 #include "format1.h"
30
31 static struct dm_hash_table *_pvid_hash = NULL;
32 static struct dm_hash_table *_vgid_hash = NULL;
33 static struct dm_hash_table *_vgname_hash = NULL;
34 static struct dm_hash_table *_lock_hash = NULL;
35 static struct dm_list _vginfos;
36 static int _scanning_in_progress = 0;
37 static int _has_scanned = 0;
38 static int _vgs_locked = 0;
39 static int _vg_global_lock_held = 0; /* Global lock held when cache wiped? */
40
41 int lvmcache_init(void)
42 {
43 dm_list_init(&_vginfos);
44
45 if (!(_vgname_hash = dm_hash_create(128)))
46 return 0;
47
48 if (!(_vgid_hash = dm_hash_create(128)))
49 return 0;
50
51 if (!(_pvid_hash = dm_hash_create(128)))
52 return 0;
53
54 if (!(_lock_hash = dm_hash_create(128)))
55 return 0;
56
57 if (_vg_global_lock_held)
58 lvmcache_lock_vgname(VG_GLOBAL, 0);
59
60 return 1;
61 }
62
63 /* Volume Group metadata cache functions */
64 static void _free_cached_vgmetadata(struct lvmcache_vginfo *vginfo)
65 {
66 if (!vginfo || !vginfo->vgmetadata)
67 return;
68
69 dm_free(vginfo->vgmetadata);
70
71 vginfo->vgmetadata = NULL;
72
73 log_debug("Metadata cache: VG %s wiped.", vginfo->vgname);
74 }
75
76 static void _store_metadata(struct lvmcache_vginfo *vginfo,
77 struct volume_group *vg, unsigned precommitted)
78 {
79 int size;
80
81 if (vginfo->vgmetadata)
82 _free_cached_vgmetadata(vginfo);
83
84 if (!(size = export_vg_to_buffer(vg, &vginfo->vgmetadata))) {
85 stack;
86 return;
87 }
88
89 vginfo->precommitted = precommitted;
90
91 log_debug("Metadata cache: VG %s stored (%d bytes%s).", vginfo->vgname,
92 size, precommitted ? ", precommitted" : "");
93 }
94
95 static void _update_cache_info_lock_state(struct lvmcache_info *info,
96 int locked,
97 int *cached_vgmetadata_valid)
98 {
99 int was_locked = (info->status & CACHE_LOCKED) ? 1 : 0;
100
101 /*
102 * Cache becomes invalid whenever lock state changes unless
103 * exclusive VG_GLOBAL is held (i.e. while scanning).
104 */
105 if (!vgname_is_locked(VG_GLOBAL) && (was_locked != locked)) {
106 info->status |= CACHE_INVALID;
107 *cached_vgmetadata_valid = 0;
108 }
109
110 if (locked)
111 info->status |= CACHE_LOCKED;
112 else
113 info->status &= ~CACHE_LOCKED;
114 }
115
116 static void _update_cache_vginfo_lock_state(struct lvmcache_vginfo *vginfo,
117 int locked)
118 {
119 struct lvmcache_info *info;
120 int cached_vgmetadata_valid = 1;
121
122 dm_list_iterate_items(info, &vginfo->infos)
123 _update_cache_info_lock_state(info, locked,
124 &cached_vgmetadata_valid);
125
126 if (!cached_vgmetadata_valid)
127 _free_cached_vgmetadata(vginfo);
128 }
129
130 static void _update_cache_lock_state(const char *vgname, int locked)
131 {
132 struct lvmcache_vginfo *vginfo;
133
134 if (!(vginfo = vginfo_from_vgname(vgname, NULL)))
135 return;
136
137 _update_cache_vginfo_lock_state(vginfo, locked);
138 }
139
140 static void _drop_metadata(const char *vgname)
141 {
142 struct lvmcache_vginfo *vginfo;
143 struct lvmcache_info *info;
144
145 if (!(vginfo = vginfo_from_vgname(vgname, NULL)))
146 return;
147
148 /*
149 * Invalidate cached PV labels.
150 * If cached precommitted metadata exists that means we
151 * already invalidated the PV labels (before caching it)
152 * and we must not do it again.
153 */
154
155 if (!vginfo->precommitted)
156 dm_list_iterate_items(info, &vginfo->infos)
157 info->status |= CACHE_INVALID;
158
159 _free_cached_vgmetadata(vginfo);
160 }
161
162 void lvmcache_drop_metadata(const char *vgname)
163 {
164 /* For VG_ORPHANS, we need to invalidate all labels on orphan PVs. */
165 if (!strcmp(vgname, VG_ORPHANS)) {
166 _drop_metadata(FMT_TEXT_ORPHAN_VG_NAME);
167 _drop_metadata(FMT_LVM1_ORPHAN_VG_NAME);
168 _drop_metadata(FMT_POOL_ORPHAN_VG_NAME);
169
170 /* Indicate that PVs could now be missing from the cache */
171 init_full_scan_done(0);
172 } else if (!vgname_is_locked(VG_GLOBAL))
173 _drop_metadata(vgname);
174 }
175
176 void lvmcache_lock_vgname(const char *vgname, int read_only __attribute((unused)))
177 {
178 if (!_lock_hash && !lvmcache_init()) {
179 log_error("Internal cache initialisation failed");
180 return;
181 }
182
183 if (dm_hash_lookup(_lock_hash, vgname))
184 log_error("Internal error: Nested locking attempted on VG %s.",
185 vgname);
186
187 if (!dm_hash_insert(_lock_hash, vgname, (void *) 1))
188 log_error("Cache locking failure for %s", vgname);
189
190 _update_cache_lock_state(vgname, 1);
191
192 if (strcmp(vgname, VG_GLOBAL))
193 _vgs_locked++;
194 }
195
196 int vgname_is_locked(const char *vgname)
197 {
198 if (!_lock_hash)
199 return 0;
200
201 return dm_hash_lookup(_lock_hash, vgname) ? 1 : 0;
202 }
203
204 void lvmcache_unlock_vgname(const char *vgname)
205 {
206 if (!dm_hash_lookup(_lock_hash, vgname))
207 log_error("Internal error: Attempt to unlock unlocked VG %s.",
208 vgname);
209
210 _update_cache_lock_state(vgname, 0);
211
212 dm_hash_remove(_lock_hash, vgname);
213
214 /* FIXME Do this per-VG */
215 if (strcmp(vgname, VG_GLOBAL) && !--_vgs_locked)
216 dev_close_all();
217 }
218
219 int vgs_locked(void)
220 {
221 return _vgs_locked;
222 }
223
224 static void _vginfo_attach_info(struct lvmcache_vginfo *vginfo,
225 struct lvmcache_info *info)
226 {
227 if (!vginfo)
228 return;
229
230 info->vginfo = vginfo;
231 dm_list_add(&vginfo->infos, &info->list);
232 }
233
234 static void _vginfo_detach_info(struct lvmcache_info *info)
235 {
236 if (!dm_list_empty(&info->list)) {
237 dm_list_del(&info->list);
238 dm_list_init(&info->list);
239 }
240
241 info->vginfo = NULL;
242 }
243
244 /* If vgid supplied, require a match. */
245 struct lvmcache_vginfo *vginfo_from_vgname(const char *vgname, const char *vgid)
246 {
247 struct lvmcache_vginfo *vginfo;
248
249 if (!vgname)
250 return vginfo_from_vgid(vgid);
251
252 if (!_vgname_hash)
253 return NULL;
254
255 if (!(vginfo = dm_hash_lookup(_vgname_hash, vgname)))
256 return NULL;
257
258 if (vgid)
259 do
260 if (!strncmp(vgid, vginfo->vgid, ID_LEN))
261 return vginfo;
262 while ((vginfo = vginfo->next));
263
264 return vginfo;
265 }
266
267 const struct format_type *fmt_from_vgname(const char *vgname, const char *vgid)
268 {
269 struct lvmcache_vginfo *vginfo;
270 struct lvmcache_info *info;
271 struct label *label;
272 struct dm_list *devh, *tmp;
273 struct dm_list devs;
274 struct device_list *devl;
275 char vgid_found[ID_LEN + 1] __attribute((aligned(8)));
276
277 if (!(vginfo = vginfo_from_vgname(vgname, vgid)))
278 return NULL;
279
280 /* This function is normally called before reading metadata so
281 * we check cached labels here. Unfortunately vginfo is volatile. */
282 dm_list_init(&devs);
283 dm_list_iterate_items(info, &vginfo->infos) {
284 if (!(devl = dm_malloc(sizeof(*devl)))) {
285 log_error("device_list element allocation failed");
286 return NULL;
287 }
288 devl->dev = info->dev;
289 dm_list_add(&devs, &devl->list);
290 }
291
292 memcpy(vgid_found, vginfo->vgid, sizeof(vgid_found));
293
294 dm_list_iterate_safe(devh, tmp, &devs) {
295 devl = dm_list_item(devh, struct device_list);
296 label_read(devl->dev, &label, UINT64_C(0));
297 dm_list_del(&devl->list);
298 dm_free(devl);
299 }
300
301 /* If vginfo changed, caller needs to rescan */
302 if (!(vginfo = vginfo_from_vgname(vgname, vgid_found)) ||
303 strncmp(vginfo->vgid, vgid_found, ID_LEN))
304 return NULL;
305
306 return vginfo->fmt;
307 }
308
309 struct lvmcache_vginfo *vginfo_from_vgid(const char *vgid)
310 {
311 struct lvmcache_vginfo *vginfo;
312 char id[ID_LEN + 1] __attribute((aligned(8)));
313
314 if (!_vgid_hash || !vgid)
315 return NULL;
316
317 /* vgid not necessarily NULL-terminated */
318 strncpy(&id[0], vgid, ID_LEN);
319 id[ID_LEN] = '\0';
320
321 if (!(vginfo = dm_hash_lookup(_vgid_hash, id)))
322 return NULL;
323
324 return vginfo;
325 }
326
327 const char *vgname_from_vgid(struct dm_pool *mem, const char *vgid)
328 {
329 struct lvmcache_vginfo *vginfo;
330 const char *vgname = NULL;
331
332 if ((vginfo = vginfo_from_vgid(vgid)))
333 vgname = vginfo->vgname;
334
335 if (mem && vgname)
336 return dm_pool_strdup(mem, vgname);
337
338 return vgname;
339 }
340
341 static int _info_is_valid(struct lvmcache_info *info)
342 {
343 if (info->status & CACHE_INVALID)
344 return 0;
345
346 /*
347 * The caller must hold the VG lock to manipulate metadata.
348 * In a cluster, remote nodes sometimes read metadata in the
349 * knowledge that the controlling node is holding the lock.
350 * So if the VG appears to be unlocked here, it should be safe
351 * to use the cached value.
352 */
353 if (info->vginfo && !vgname_is_locked(info->vginfo->vgname))
354 return 1;
355
356 if (!(info->status & CACHE_LOCKED))
357 return 0;
358
359 return 1;
360 }
361
362 static int _vginfo_is_valid(struct lvmcache_vginfo *vginfo)
363 {
364 struct lvmcache_info *info;
365
366 /* Invalid if any info is invalid */
367 dm_list_iterate_items(info, &vginfo->infos)
368 if (!_info_is_valid(info))
369 return 0;
370
371 return 1;
372 }
373
374 /* vginfo is invalid if it does not contain at least one valid info */
375 static int _vginfo_is_invalid(struct lvmcache_vginfo *vginfo)
376 {
377 struct lvmcache_info *info;
378
379 dm_list_iterate_items(info, &vginfo->infos)
380 if (_info_is_valid(info))
381 return 0;
382
383 return 1;
384 }
385
386 /*
387 * If valid_only is set, data will only be returned if the cached data is
388 * known still to be valid.
389 */
390 struct lvmcache_info *info_from_pvid(const char *pvid, int valid_only)
391 {
392 struct lvmcache_info *info;
393 char id[ID_LEN + 1] __attribute((aligned(8)));
394
395 if (!_pvid_hash || !pvid)
396 return NULL;
397
398 strncpy(&id[0], pvid, ID_LEN);
399 id[ID_LEN] = '\0';
400
401 if (!(info = dm_hash_lookup(_pvid_hash, id)))
402 return NULL;
403
404 if (valid_only && !_info_is_valid(info))
405 return NULL;
406
407 return info;
408 }
409
410 static void _rescan_entry(struct lvmcache_info *info)
411 {
412 struct label *label;
413
414 if (info->status & CACHE_INVALID)
415 label_read(info->dev, &label, UINT64_C(0));
416 }
417
418 static int _scan_invalid(void)
419 {
420 dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _rescan_entry);
421
422 return 1;
423 }
424
425 int lvmcache_label_scan(struct cmd_context *cmd, int full_scan)
426 {
427 struct label *label;
428 struct dev_iter *iter;
429 struct device *dev;
430 struct format_type *fmt;
431
432 int r = 0;
433
434 /* Avoid recursion when a PVID can't be found! */
435 if (_scanning_in_progress)
436 return 0;
437
438 _scanning_in_progress = 1;
439
440 if (!_vgname_hash && !lvmcache_init()) {
441 log_error("Internal cache initialisation failed");
442 goto out;
443 }
444
445 if (_has_scanned && !full_scan) {
446 r = _scan_invalid();
447 goto out;
448 }
449
450 if (!(iter = dev_iter_create(cmd->filter, (full_scan == 2) ? 1 : 0))) {
451 log_error("dev_iter creation failed");
452 goto out;
453 }
454
455 while ((dev = dev_iter_get(iter)))
456 label_read(dev, &label, UINT64_C(0));
457
458 dev_iter_destroy(iter);
459
460 _has_scanned = 1;
461
462 /* Perform any format-specific scanning e.g. text files */
463 dm_list_iterate_items(fmt, &cmd->formats) {
464 if (fmt->ops->scan && !fmt->ops->scan(fmt))
465 goto out;
466 }
467
468 r = 1;
469
470 out:
471 _scanning_in_progress = 0;
472
473 return r;
474 }
475
476 struct volume_group *lvmcache_get_vg(const char *vgid, unsigned precommitted)
477 {
478 struct lvmcache_vginfo *vginfo;
479 struct volume_group *vg;
480 struct format_instance *fid;
481
482 if (!vgid || !(vginfo = vginfo_from_vgid(vgid)) || !vginfo->vgmetadata)
483 return NULL;
484
485 if (!_vginfo_is_valid(vginfo))
486 return NULL;
487
488 /*
489 * Don't return cached data if either:
490 * (i) precommitted metadata is requested but we don't have it cached
491 * - caller should read it off disk;
492 * (ii) live metadata is requested but we have precommitted metadata cached
493 * and no devices are suspended so caller may read it off disk.
494 *
495 * If live metadata is requested but we have precommitted metadata cached
496 * and devices are suspended, we assume this precommitted metadata has
497 * already been preloaded and committed so it's OK to return it as live.
498 * Note that we do not clear the PRECOMMITTED flag.
499 */
500 if ((precommitted && !vginfo->precommitted) ||
501 (!precommitted && vginfo->precommitted && !memlock()))
502 return NULL;
503
504 if (!(fid = vginfo->fmt->ops->create_instance(vginfo->fmt,
505 vginfo->vgname,
506 vgid, NULL)))
507 return_NULL;
508
509 if (!(vg = import_vg_from_buffer(vginfo->vgmetadata, fid)) ||
510 !vg_validate(vg)) {
511 _free_cached_vgmetadata(vginfo);
512 return_NULL;
513 }
514
515 log_debug("Using cached %smetadata for VG %s.",
516 vginfo->precommitted ? "pre-committed" : "", vginfo->vgname);
517
518 return vg;
519 }
520
521 struct dm_list *lvmcache_get_vgids(struct cmd_context *cmd, int full_scan)
522 {
523 struct dm_list *vgids;
524 struct lvmcache_vginfo *vginfo;
525
526 lvmcache_label_scan(cmd, full_scan);
527
528 if (!(vgids = str_list_create(cmd->mem))) {
529 log_error("vgids list allocation failed");
530 return NULL;
531 }
532
533 dm_list_iterate_items(vginfo, &_vginfos) {
534 if (!str_list_add(cmd->mem, vgids,
535 dm_pool_strdup(cmd->mem, vginfo->vgid))) {
536 log_error("strlist allocation failed");
537 return NULL;
538 }
539 }
540
541 return vgids;
542 }
543
544 struct dm_list *lvmcache_get_vgnames(struct cmd_context *cmd, int full_scan)
545 {
546 struct dm_list *vgnames;
547 struct lvmcache_vginfo *vginfo;
548
549 lvmcache_label_scan(cmd, full_scan);
550
551 if (!(vgnames = str_list_create(cmd->mem))) {
552 log_error("vgnames list allocation failed");
553 return NULL;
554 }
555
556 dm_list_iterate_items(vginfo, &_vginfos) {
557 if (!str_list_add(cmd->mem, vgnames,
558 dm_pool_strdup(cmd->mem, vginfo->vgname))) {
559 log_error("strlist allocation failed");
560 return NULL;
561 }
562 }
563
564 return vgnames;
565 }
566
567 struct dm_list *lvmcache_get_pvids(struct cmd_context *cmd, const char *vgname,
568 const char *vgid)
569 {
570 struct dm_list *pvids;
571 struct lvmcache_vginfo *vginfo;
572 struct lvmcache_info *info;
573
574 if (!(pvids = str_list_create(cmd->mem))) {
575 log_error("pvids list allocation failed");
576 return NULL;
577 }
578
579 if (!(vginfo = vginfo_from_vgname(vgname, vgid)))
580 return pvids;
581
582 dm_list_iterate_items(info, &vginfo->infos) {
583 if (!str_list_add(cmd->mem, pvids,
584 dm_pool_strdup(cmd->mem, info->dev->pvid))) {
585 log_error("strlist allocation failed");
586 return NULL;
587 }
588 }
589
590 return pvids;
591 }
592
593 struct device *device_from_pvid(struct cmd_context *cmd, struct id *pvid)
594 {
595 struct label *label;
596 struct lvmcache_info *info;
597
598 /* Already cached ? */
599 if ((info = info_from_pvid((char *) pvid, 0))) {
600 if (label_read(info->dev, &label, UINT64_C(0))) {
601 info = (struct lvmcache_info *) label->info;
602 if (id_equal(pvid, (struct id *) &info->dev->pvid))
603 return info->dev;
604 }
605 }
606
607 lvmcache_label_scan(cmd, 0);
608
609 /* Try again */
610 if ((info = info_from_pvid((char *) pvid, 0))) {
611 if (label_read(info->dev, &label, UINT64_C(0))) {
612 info = (struct lvmcache_info *) label->info;
613 if (id_equal(pvid, (struct id *) &info->dev->pvid))
614 return info->dev;
615 }
616 }
617
618 if (memlock())
619 return NULL;
620
621 lvmcache_label_scan(cmd, 2);
622
623 /* Try again */
624 if ((info = info_from_pvid((char *) pvid, 0))) {
625 if (label_read(info->dev, &label, UINT64_C(0))) {
626 info = (struct lvmcache_info *) label->info;
627 if (id_equal(pvid, (struct id *) &info->dev->pvid))
628 return info->dev;
629 }
630 }
631
632 return NULL;
633 }
634
635 static int _free_vginfo(struct lvmcache_vginfo *vginfo)
636 {
637 struct lvmcache_vginfo *primary_vginfo, *vginfo2;
638 int r = 1;
639
640 _free_cached_vgmetadata(vginfo);
641
642 vginfo2 = primary_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
643
644 if (vginfo == primary_vginfo) {
645 dm_hash_remove(_vgname_hash, vginfo->vgname);
646 if (vginfo->next && !dm_hash_insert(_vgname_hash, vginfo->vgname,
647 vginfo->next)) {
648 log_error("_vgname_hash re-insertion for %s failed",
649 vginfo->vgname);
650 r = 0;
651 }
652 } else do
653 if (vginfo2->next == vginfo) {
654 vginfo2->next = vginfo->next;
655 break;
656 }
657 while ((vginfo2 = primary_vginfo->next));
658
659 if (vginfo->vgname)
660 dm_free(vginfo->vgname);
661
662 if (vginfo->creation_host)
663 dm_free(vginfo->creation_host);
664
665 if (*vginfo->vgid && _vgid_hash &&
666 vginfo_from_vgid(vginfo->vgid) == vginfo)
667 dm_hash_remove(_vgid_hash, vginfo->vgid);
668
669 dm_list_del(&vginfo->list);
670
671 dm_free(vginfo);
672
673 return r;
674 }
675
676 /*
677 * vginfo must be info->vginfo unless info is NULL
678 */
679 static int _drop_vginfo(struct lvmcache_info *info, struct lvmcache_vginfo *vginfo)
680 {
681 if (info)
682 _vginfo_detach_info(info);
683
684 /* vginfo still referenced? */
685 if (!vginfo || is_orphan_vg(vginfo->vgname) ||
686 !dm_list_empty(&vginfo->infos))
687 return 1;
688
689 if (!_free_vginfo(vginfo))
690 return_0;
691
692 return 1;
693 }
694
695 /* Unused
696 void lvmcache_del(struct lvmcache_info *info)
697 {
698 if (info->dev->pvid[0] && _pvid_hash)
699 dm_hash_remove(_pvid_hash, info->dev->pvid);
700
701 _drop_vginfo(info, info->vginfo);
702
703 info->label->labeller->ops->destroy_label(info->label->labeller,
704 info->label);
705 dm_free(info);
706
707 return;
708 } */
709
710 static int _lvmcache_update_pvid(struct lvmcache_info *info, const char *pvid)
711 {
712 /*
713 * Nothing to do if already stored with same pvid.
714 */
715 if (((dm_hash_lookup(_pvid_hash, pvid)) == info) &&
716 !strcmp(info->dev->pvid, pvid))
717 return 1;
718 if (*info->dev->pvid)
719 dm_hash_remove(_pvid_hash, info->dev->pvid);
720 strncpy(info->dev->pvid, pvid, sizeof(info->dev->pvid));
721 if (!dm_hash_insert(_pvid_hash, pvid, info)) {
722 log_error("_lvmcache_update: pvid insertion failed: %s", pvid);
723 return 0;
724 }
725
726 return 1;
727 }
728
729 /*
730 * vginfo must be info->vginfo unless info is NULL (orphans)
731 */
732 static int _lvmcache_update_vgid(struct lvmcache_info *info,
733 struct lvmcache_vginfo *vginfo,
734 const char *vgid)
735 {
736 if (!vgid || !vginfo ||
737 !strncmp(vginfo->vgid, vgid, ID_LEN))
738 return 1;
739
740 if (vginfo && *vginfo->vgid)
741 dm_hash_remove(_vgid_hash, vginfo->vgid);
742 if (!vgid) {
743 log_debug("lvmcache: %s: clearing VGID", info ? dev_name(info->dev) : vginfo->vgname);
744 return 1;
745 }
746
747 strncpy(vginfo->vgid, vgid, ID_LEN);
748 vginfo->vgid[ID_LEN] = '\0';
749 if (!dm_hash_insert(_vgid_hash, vginfo->vgid, vginfo)) {
750 log_error("_lvmcache_update: vgid hash insertion failed: %s",
751 vginfo->vgid);
752 return 0;
753 }
754
755 if (!is_orphan_vg(vginfo->vgname))
756 log_debug("lvmcache: %s: setting %s VGID to %s",
757 dev_name(info->dev), vginfo->vgname,
758 vginfo->vgid);
759
760 return 1;
761 }
762
763 static int _insert_vginfo(struct lvmcache_vginfo *new_vginfo, const char *vgid,
764 uint32_t vgstatus, const char *creation_host,
765 struct lvmcache_vginfo *primary_vginfo)
766 {
767 struct lvmcache_vginfo *last_vginfo = primary_vginfo;
768 char uuid_primary[64] __attribute((aligned(8)));
769 char uuid_new[64] __attribute((aligned(8)));
770 int use_new = 0;
771
772 /* Pre-existing VG takes precedence. Unexported VG takes precedence. */
773 if (primary_vginfo) {
774 if (!id_write_format((const struct id *)vgid, uuid_new, sizeof(uuid_new)))
775 return_0;
776
777 if (!id_write_format((const struct id *)&primary_vginfo->vgid, uuid_primary,
778 sizeof(uuid_primary)))
779 return_0;
780
781 /*
782 * If Primary not exported, new exported => keep
783 * Else Primary exported, new not exported => change
784 * Else Primary has hostname for this machine => keep
785 * Else Primary has no hostname, new has one => change
786 * Else New has hostname for this machine => change
787 * Else Keep primary.
788 */
789 if (!(primary_vginfo->status & EXPORTED_VG) &&
790 (vgstatus & EXPORTED_VG))
791 log_error("WARNING: Duplicate VG name %s: "
792 "Existing %s takes precedence over "
793 "exported %s", new_vginfo->vgname,
794 uuid_primary, uuid_new);
795 else if ((primary_vginfo->status & EXPORTED_VG) &&
796 !(vgstatus & EXPORTED_VG)) {
797 log_error("WARNING: Duplicate VG name %s: "
798 "%s takes precedence over exported %s",
799 new_vginfo->vgname, uuid_new,
800 uuid_primary);
801 use_new = 1;
802 } else if (primary_vginfo->creation_host &&
803 !strcmp(primary_vginfo->creation_host,
804 primary_vginfo->fmt->cmd->hostname))
805 log_error("WARNING: Duplicate VG name %s: "
806 "Existing %s (created here) takes precedence "
807 "over %s", new_vginfo->vgname, uuid_primary,
808 uuid_new);
809 else if (!primary_vginfo->creation_host && creation_host) {
810 log_error("WARNING: Duplicate VG name %s: "
811 "%s (with creation_host) takes precedence over %s",
812 new_vginfo->vgname, uuid_new,
813 uuid_primary);
814 use_new = 1;
815 } else if (creation_host &&
816 !strcmp(creation_host,
817 primary_vginfo->fmt->cmd->hostname)) {
818 log_error("WARNING: Duplicate VG name %s: "
819 "%s (created here) takes precedence over %s",
820 new_vginfo->vgname, uuid_new,
821 uuid_primary);
822 use_new = 1;
823 }
824
825 if (!use_new) {
826 while (last_vginfo->next)
827 last_vginfo = last_vginfo->next;
828 last_vginfo->next = new_vginfo;
829 return 1;
830 }
831
832 dm_hash_remove(_vgname_hash, primary_vginfo->vgname);
833 }
834
835 if (!dm_hash_insert(_vgname_hash, new_vginfo->vgname, new_vginfo)) {
836 log_error("cache_update: vg hash insertion failed: %s",
837 new_vginfo->vgname);
838 return 0;
839 }
840
841 if (primary_vginfo)
842 new_vginfo->next = primary_vginfo;
843
844 return 1;
845 }
846
847 static int _lvmcache_update_vgname(struct lvmcache_info *info,
848 const char *vgname, const char *vgid,
849 uint32_t vgstatus, const char *creation_host,
850 const struct format_type *fmt)
851 {
852 struct lvmcache_vginfo *vginfo, *primary_vginfo, *orphan_vginfo;
853 struct lvmcache_info *info2, *info3;
854 char mdabuf[32];
855 // struct lvmcache_vginfo *old_vginfo, *next;
856
857 if (!vgname || (info && info->vginfo && !strcmp(info->vginfo->vgname, vgname)))
858 return 1;
859
860 /* Remove existing vginfo entry */
861 if (info)
862 _drop_vginfo(info, info->vginfo);
863
864 /* Get existing vginfo or create new one */
865 if (!(vginfo = vginfo_from_vgname(vgname, vgid))) {
866 /*** FIXME - vginfo ends up duplicated instead of renamed.
867 // Renaming? This lookup fails.
868 if ((vginfo = vginfo_from_vgid(vgid))) {
869 next = vginfo->next;
870 old_vginfo = vginfo_from_vgname(vginfo->vgname, NULL);
871 if (old_vginfo == vginfo) {
872 dm_hash_remove(_vgname_hash, old_vginfo->vgname);
873 if (old_vginfo->next) {
874 if (!dm_hash_insert(_vgname_hash, old_vginfo->vgname, old_vginfo->next)) {
875 log_error("vg hash re-insertion failed: %s",
876 old_vginfo->vgname);
877 return 0;
878 }
879 }
880 } else do {
881 if (old_vginfo->next == vginfo) {
882 old_vginfo->next = vginfo->next;
883 break;
884 }
885 } while ((old_vginfo = old_vginfo->next));
886 vginfo->next = NULL;
887
888 dm_free(vginfo->vgname);
889 if (!(vginfo->vgname = dm_strdup(vgname))) {
890 log_error("cache vgname alloc failed for %s", vgname);
891 return 0;
892 }
893
894 // Rename so can assume new name does not already exist
895 if (!dm_hash_insert(_vgname_hash, vginfo->vgname, vginfo->next)) {
896 log_error("vg hash re-insertion failed: %s",
897 vginfo->vgname);
898 return 0;
899 }
900 } else {
901 ***/
902 if (!(vginfo = dm_malloc(sizeof(*vginfo)))) {
903 log_error("lvmcache_update_vgname: list alloc failed");
904 return 0;
905 }
906 memset(vginfo, 0, sizeof(*vginfo));
907 if (!(vginfo->vgname = dm_strdup(vgname))) {
908 dm_free(vginfo);
909 log_error("cache vgname alloc failed for %s", vgname);
910 return 0;
911 }
912 dm_list_init(&vginfo->infos);
913
914 /*
915 * If we're scanning and there's an invalidated entry, remove it.
916 * Otherwise we risk bogus warnings of duplicate VGs.
917 */
918 while ((primary_vginfo = vginfo_from_vgname(vgname, NULL)) &&
919 _scanning_in_progress && _vginfo_is_invalid(primary_vginfo))
920 dm_list_iterate_items_safe(info2, info3, &primary_vginfo->infos) {
921 orphan_vginfo = vginfo_from_vgname(primary_vginfo->fmt->orphan_vg_name, NULL);
922 _drop_vginfo(info2, primary_vginfo);
923 _vginfo_attach_info(orphan_vginfo, info2);
924 if (info2->mdas.n)
925 sprintf(mdabuf, " with %u mdas",
926 dm_list_size(&info2->mdas));
927 else
928 mdabuf[0] = '\0';
929 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
930 dev_name(info2->dev),
931 vgname, orphan_vginfo->vgid[0] ? " (" : "",
932 orphan_vginfo->vgid[0] ? orphan_vginfo->vgid : "",
933 orphan_vginfo->vgid[0] ? ")" : "", mdabuf);
934 }
935
936 if (!_insert_vginfo(vginfo, vgid, vgstatus, creation_host,
937 primary_vginfo)) {
938 dm_free(vginfo->vgname);
939 dm_free(vginfo);
940 return 0;
941 }
942 /* Ensure orphans appear last on list_iterate */
943 if (is_orphan_vg(vgname))
944 dm_list_add(&_vginfos, &vginfo->list);
945 else
946 dm_list_add_h(&_vginfos, &vginfo->list);
947 /***
948 }
949 ***/
950 }
951
952 if (info)
953 _vginfo_attach_info(vginfo, info);
954 else if (!_lvmcache_update_vgid(NULL, vginfo, vgid)) /* Orphans */
955 return_0;
956
957 _update_cache_vginfo_lock_state(vginfo, vgname_is_locked(vgname));
958
959 /* FIXME Check consistency of list! */
960 vginfo->fmt = fmt;
961
962 if (info) {
963 if (info->mdas.n)
964 sprintf(mdabuf, " with %u mdas", dm_list_size(&info->mdas));
965 else
966 mdabuf[0] = '\0';
967 log_debug("lvmcache: %s: now in VG %s%s%s%s%s",
968 dev_name(info->dev),
969 vgname, vginfo->vgid[0] ? " (" : "",
970 vginfo->vgid[0] ? vginfo->vgid : "",
971 vginfo->vgid[0] ? ")" : "", mdabuf);
972 } else
973 log_debug("lvmcache: initialised VG %s", vgname);
974
975 return 1;
976 }
977
978 static int _lvmcache_update_vgstatus(struct lvmcache_info *info, uint32_t vgstatus,
979 const char *creation_host)
980 {
981 if (!info || !info->vginfo)
982 return 1;
983
984 if ((info->vginfo->status & EXPORTED_VG) != (vgstatus & EXPORTED_VG))
985 log_debug("lvmcache: %s: VG %s %s exported",
986 dev_name(info->dev), info->vginfo->vgname,
987 vgstatus & EXPORTED_VG ? "now" : "no longer");
988
989 info->vginfo->status = vgstatus;
990
991 if (!creation_host)
992 return 1;
993
994 if (info->vginfo->creation_host && !strcmp(creation_host,
995 info->vginfo->creation_host))
996 return 1;
997
998 if (info->vginfo->creation_host)
999 dm_free(info->vginfo->creation_host);
1000
1001 if (!(info->vginfo->creation_host = dm_strdup(creation_host))) {
1002 log_error("cache creation host alloc failed for %s",
1003 creation_host);
1004 return 0;
1005 }
1006
1007 log_debug("lvmcache: %s: VG %s: Set creation host to %s.",
1008 dev_name(info->dev), info->vginfo->vgname, creation_host);
1009
1010 return 1;
1011 }
1012
1013 int lvmcache_add_orphan_vginfo(const char *vgname, struct format_type *fmt)
1014 {
1015 if (!_lock_hash && !lvmcache_init()) {
1016 log_error("Internal cache initialisation failed");
1017 return 0;
1018 }
1019
1020 return _lvmcache_update_vgname(NULL, vgname, vgname, 0, "", fmt);
1021 }
1022
1023 int lvmcache_update_vgname_and_id(struct lvmcache_info *info,
1024 const char *vgname, const char *vgid,
1025 uint32_t vgstatus, const char *creation_host)
1026 {
1027 if (!vgname && !info->vginfo) {
1028 log_error("Internal error: NULL vgname handed to cache");
1029 /* FIXME Remove this */
1030 vgname = info->fmt->orphan_vg_name;
1031 vgid = vgname;
1032 }
1033
1034 /* If PV without mdas is already in a real VG, don't make it orphan */
1035 if (is_orphan_vg(vgname) && info->vginfo && !dm_list_size(&info->mdas) &&
1036 !is_orphan_vg(info->vginfo->vgname) && memlock())
1037 return 1;
1038
1039 /* If moving PV from orphan to real VG, always mark it valid */
1040 if (!is_orphan_vg(vgname))
1041 info->status &= ~CACHE_INVALID;
1042
1043 if (!_lvmcache_update_vgname(info, vgname, vgid, vgstatus,
1044 creation_host, info->fmt) ||
1045 !_lvmcache_update_vgid(info, info->vginfo, vgid) ||
1046 !_lvmcache_update_vgstatus(info, vgstatus, creation_host))
1047 return_0;
1048
1049 return 1;
1050 }
1051
1052 int lvmcache_update_vg(struct volume_group *vg, unsigned precommitted)
1053 {
1054 struct pv_list *pvl;
1055 struct lvmcache_info *info;
1056 struct lvmcache_vginfo *vginfo;
1057 char pvid_s[ID_LEN + 1] __attribute((aligned(8)));
1058
1059 pvid_s[sizeof(pvid_s) - 1] = '\0';
1060
1061 dm_list_iterate_items(pvl, &vg->pvs) {
1062 strncpy(pvid_s, (char *) &pvl->pv->id, sizeof(pvid_s) - 1);
1063 /* FIXME Could pvl->pv->dev->pvid ever be different? */
1064 if ((info = info_from_pvid(pvid_s, 0)) &&
1065 !lvmcache_update_vgname_and_id(info, vg->name,
1066 (char *) &vg->id,
1067 vg->status, NULL))
1068 return_0;
1069 }
1070
1071 /* store text representation of vg to cache */
1072 if (vg->cmd->current_settings.cache_vgmetadata &&
1073 (vginfo = vginfo_from_vgname(vg->name, NULL)))
1074 _store_metadata(vginfo, vg, precommitted);
1075
1076 return 1;
1077 }
1078
1079 struct lvmcache_info *lvmcache_add(struct labeller *labeller, const char *pvid,
1080 struct device *dev,
1081 const char *vgname, const char *vgid,
1082 uint32_t vgstatus)
1083 {
1084 struct label *label;
1085 struct lvmcache_info *existing, *info;
1086 char pvid_s[ID_LEN + 1] __attribute((aligned(8)));
1087
1088 if (!_vgname_hash && !lvmcache_init()) {
1089 log_error("Internal cache initialisation failed");
1090 return NULL;
1091 }
1092
1093 strncpy(pvid_s, pvid, sizeof(pvid_s));
1094 pvid_s[sizeof(pvid_s) - 1] = '\0';
1095
1096 if (!(existing = info_from_pvid(pvid_s, 0)) &&
1097 !(existing = info_from_pvid(dev->pvid, 0))) {
1098 if (!(label = label_create(labeller)))
1099 return_NULL;
1100 if (!(info = dm_malloc(sizeof(*info)))) {
1101 log_error("lvmcache_info allocation failed");
1102 label_destroy(label);
1103 return NULL;
1104 }
1105 memset(info, 0, sizeof(*info));
1106
1107 label->info = info;
1108 info->label = label;
1109 dm_list_init(&info->list);
1110 info->dev = dev;
1111 } else {
1112 if (existing->dev != dev) {
1113 /* Is the existing entry a duplicate pvid e.g. md ? */
1114 if (MAJOR(existing->dev->dev) == md_major() &&
1115 MAJOR(dev->dev) != md_major()) {
1116 log_very_verbose("Ignoring duplicate PV %s on "
1117 "%s - using md %s",
1118 pvid, dev_name(dev),
1119 dev_name(existing->dev));
1120 return NULL;
1121 } else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1122 !dm_is_dm_major(MAJOR(dev->dev))) {
1123 log_very_verbose("Ignoring duplicate PV %s on "
1124 "%s - using dm %s",
1125 pvid, dev_name(dev),
1126 dev_name(existing->dev));
1127 return NULL;
1128 } else if (MAJOR(existing->dev->dev) != md_major() &&
1129 MAJOR(dev->dev) == md_major())
1130 log_very_verbose("Duplicate PV %s on %s - "
1131 "using md %s", pvid,
1132 dev_name(existing->dev),
1133 dev_name(dev));
1134 else if (!dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1135 dm_is_dm_major(MAJOR(dev->dev)))
1136 log_very_verbose("Duplicate PV %s on %s - "
1137 "using dm %s", pvid,
1138 dev_name(existing->dev),
1139 dev_name(dev));
1140 /* FIXME If both dm, check dependencies */
1141 //else if (dm_is_dm_major(MAJOR(existing->dev->dev)) &&
1142 //dm_is_dm_major(MAJOR(dev->dev)))
1143 //
1144 else if (!strcmp(pvid_s, existing->dev->pvid))
1145 log_error("Found duplicate PV %s: using %s not "
1146 "%s", pvid, dev_name(dev),
1147 dev_name(existing->dev));
1148 }
1149 if (strcmp(pvid_s, existing->dev->pvid))
1150 log_debug("Updating pvid cache to %s (%s) from %s (%s)",
1151 pvid_s, dev_name(dev),
1152 existing->dev->pvid, dev_name(existing->dev));
1153 /* Switch over to new preferred device */
1154 existing->dev = dev;
1155 info = existing;
1156 /* Has labeller changed? */
1157 if (info->label->labeller != labeller) {
1158 label_destroy(info->label);
1159 if (!(info->label = label_create(labeller)))
1160 /* FIXME leaves info without label! */
1161 return_NULL;
1162 info->label->info = info;
1163 }
1164 label = info->label;
1165 }
1166
1167 info->fmt = (const struct format_type *) labeller->private;
1168 info->status |= CACHE_INVALID;
1169
1170 if (!_lvmcache_update_pvid(info, pvid_s)) {
1171 if (!existing) {
1172 dm_free(info);
1173 label_destroy(label);
1174 }
1175 return NULL;
1176 }
1177
1178 if (!lvmcache_update_vgname_and_id(info, vgname, vgid, vgstatus, NULL)) {
1179 if (!existing) {
1180 dm_hash_remove(_pvid_hash, pvid_s);
1181 strcpy(info->dev->pvid, "");
1182 dm_free(info);
1183 label_destroy(label);
1184 }
1185 return NULL;
1186 }
1187
1188 return info;
1189 }
1190
1191 static void _lvmcache_destroy_entry(struct lvmcache_info *info)
1192 {
1193 _vginfo_detach_info(info);
1194 strcpy(info->dev->pvid, "");
1195 label_destroy(info->label);
1196 dm_free(info);
1197 }
1198
1199 static void _lvmcache_destroy_vgnamelist(struct lvmcache_vginfo *vginfo)
1200 {
1201 struct lvmcache_vginfo *next;
1202
1203 do {
1204 next = vginfo->next;
1205 if (!_free_vginfo(vginfo))
1206 stack;
1207 } while ((vginfo = next));
1208 }
1209
1210 static void _lvmcache_destroy_lockname(struct dm_hash_node *n)
1211 {
1212 char *vgname;
1213
1214 if (!dm_hash_get_data(_lock_hash, n))
1215 return;
1216
1217 vgname = dm_hash_get_key(_lock_hash, n);
1218
1219 if (!strcmp(vgname, VG_GLOBAL))
1220 _vg_global_lock_held = 1;
1221 else
1222 log_error("Internal error: Volume Group %s was not unlocked",
1223 dm_hash_get_key(_lock_hash, n));
1224 }
1225
1226 void lvmcache_destroy(struct cmd_context *cmd, int retain_orphans)
1227 {
1228 struct dm_hash_node *n;
1229 log_verbose("Wiping internal VG cache");
1230
1231 _has_scanned = 0;
1232
1233 if (_vgid_hash) {
1234 dm_hash_destroy(_vgid_hash);
1235 _vgid_hash = NULL;
1236 }
1237
1238 if (_pvid_hash) {
1239 dm_hash_iter(_pvid_hash, (dm_hash_iterate_fn) _lvmcache_destroy_entry);
1240 dm_hash_destroy(_pvid_hash);
1241 _pvid_hash = NULL;
1242 }
1243
1244 if (_vgname_hash) {
1245 dm_hash_iter(_vgname_hash,
1246 (dm_hash_iterate_fn) _lvmcache_destroy_vgnamelist);
1247 dm_hash_destroy(_vgname_hash);
1248 _vgname_hash = NULL;
1249 }
1250
1251 if (_lock_hash) {
1252 dm_hash_iterate(n, _lock_hash)
1253 _lvmcache_destroy_lockname(n);
1254 dm_hash_destroy(_lock_hash);
1255 _lock_hash = NULL;
1256 }
1257
1258 if (!dm_list_empty(&_vginfos))
1259 log_error("Internal error: _vginfos list should be empty");
1260 dm_list_init(&_vginfos);
1261
1262 if (retain_orphans)
1263 init_lvmcache_orphans(cmd);
1264 }
1265