ctf-create.c revision 1.1.1.4 1 /* CTF dict creation.
2 Copyright (C) 2019-2024 Free Software Foundation, Inc.
3
4 This file is part of libctf.
5
6 libctf is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
14 See the GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; see the file COPYING. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 #include <ctf-impl.h>
21 #include <sys/param.h>
22 #include <string.h>
23 #include <unistd.h>
24
25 #ifndef EOVERFLOW
26 #define EOVERFLOW ERANGE
27 #endif
28
29 #ifndef roundup
30 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
31 #endif
32
33 /* The initial size of a dynamic type's vlen in members. Arbitrary: the bigger
34 this is, the less allocation needs to be done for small structure
35 initialization, and the more memory is wasted for small structures during CTF
36 construction. No effect on generated CTF or ctf_open()ed CTF. */
37 #define INITIAL_VLEN 16
38
39 /* Make sure the ptrtab has enough space for at least one more type.
40
41 We start with 4KiB of ptrtab, enough for a thousand types, then grow it 25%
42 at a time. */
43
44 static int
45 ctf_grow_ptrtab (ctf_dict_t *fp)
46 {
47 size_t new_ptrtab_len = fp->ctf_ptrtab_len;
48
49 /* We allocate one more ptrtab entry than we need, for the initial zero,
50 plus one because the caller will probably allocate a new type.
51
52 Equally, if the ptrtab is small -- perhaps due to ctf_open of a small
53 dict -- boost it by quite a lot at first, so we don't need to keep
54 realloc()ing. */
55
56 if (fp->ctf_ptrtab == NULL || fp->ctf_ptrtab_len < 1024)
57 new_ptrtab_len = 1024;
58 else if ((fp->ctf_typemax + 2) > fp->ctf_ptrtab_len)
59 new_ptrtab_len = fp->ctf_ptrtab_len * 1.25;
60
61 if (new_ptrtab_len != fp->ctf_ptrtab_len)
62 {
63 uint32_t *new_ptrtab;
64
65 if ((new_ptrtab = realloc (fp->ctf_ptrtab,
66 new_ptrtab_len * sizeof (uint32_t))) == NULL)
67 return (ctf_set_errno (fp, ENOMEM));
68
69 fp->ctf_ptrtab = new_ptrtab;
70 memset (fp->ctf_ptrtab + fp->ctf_ptrtab_len, 0,
71 (new_ptrtab_len - fp->ctf_ptrtab_len) * sizeof (uint32_t));
72 fp->ctf_ptrtab_len = new_ptrtab_len;
73 }
74 return 0;
75 }
76
77 /* Make sure a vlen has enough space: expand it otherwise. Unlike the ptrtab,
78 which grows quite slowly, the vlen grows in big jumps because it is quite
79 expensive to expand: the caller has to scan the old vlen for string refs
80 first and remove them, then re-add them afterwards. The initial size is
81 more or less arbitrary. */
82 static int
83 ctf_grow_vlen (ctf_dict_t *fp, ctf_dtdef_t *dtd, size_t vlen)
84 {
85 unsigned char *old = dtd->dtd_vlen;
86
87 if (dtd->dtd_vlen_alloc > vlen)
88 return 0;
89
90 if ((dtd->dtd_vlen = realloc (dtd->dtd_vlen,
91 dtd->dtd_vlen_alloc * 2)) == NULL)
92 {
93 dtd->dtd_vlen = old;
94 return (ctf_set_errno (fp, ENOMEM));
95 }
96 memset (dtd->dtd_vlen + dtd->dtd_vlen_alloc, 0, dtd->dtd_vlen_alloc);
97 dtd->dtd_vlen_alloc *= 2;
98 return 0;
99 }
100
101 /* To create an empty CTF dict, we just declare a zeroed header and call
102 ctf_bufopen() on it. If ctf_bufopen succeeds, we mark the new dict r/w and
103 initialize the dynamic members. We start assigning type IDs at 1 because
104 type ID 0 is used as a sentinel and a not-found indicator. */
105
106 ctf_dict_t *
107 ctf_create (int *errp)
108 {
109 static const ctf_header_t hdr = { .cth_preamble = { CTF_MAGIC, CTF_VERSION, 0 } };
110
111 ctf_dynhash_t *structs = NULL, *unions = NULL, *enums = NULL, *names = NULL;
112 ctf_sect_t cts;
113 ctf_dict_t *fp;
114
115 libctf_init_debug();
116
117 structs = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
118 NULL, NULL);
119 unions = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
120 NULL, NULL);
121 enums = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
122 NULL, NULL);
123 names = ctf_dynhash_create (ctf_hash_string, ctf_hash_eq_string,
124 NULL, NULL);
125 if (!structs || !unions || !enums || !names)
126 {
127 ctf_set_open_errno (errp, EAGAIN);
128 goto err;
129 }
130
131 cts.cts_name = _CTF_SECTION;
132 cts.cts_data = &hdr;
133 cts.cts_size = sizeof (hdr);
134 cts.cts_entsize = 1;
135
136 if ((fp = ctf_bufopen (&cts, NULL, NULL, errp)) == NULL)
137 goto err;
138
139 /* These hashes will have been initialized with a starting size of zero,
140 which is surely wrong. Use ones with slightly larger sizes. */
141 ctf_dynhash_destroy (fp->ctf_structs);
142 ctf_dynhash_destroy (fp->ctf_unions);
143 ctf_dynhash_destroy (fp->ctf_enums);
144 ctf_dynhash_destroy (fp->ctf_names);
145 fp->ctf_structs = structs;
146 fp->ctf_unions = unions;
147 fp->ctf_enums = enums;
148 fp->ctf_names = names;
149 fp->ctf_dtoldid = 0;
150 fp->ctf_snapshot_lu = 0;
151
152 /* Make sure the ptrtab starts out at a reasonable size. */
153
154 ctf_set_ctl_hashes (fp);
155 if (ctf_grow_ptrtab (fp) < 0)
156 {
157 ctf_set_open_errno (errp, ctf_errno (fp));
158 ctf_dict_close (fp);
159 return NULL;
160 }
161
162 return fp;
163
164 err:
165 ctf_dynhash_destroy (structs);
166 ctf_dynhash_destroy (unions);
167 ctf_dynhash_destroy (enums);
168 ctf_dynhash_destroy (names);
169 return NULL;
170 }
171
172 /* Compatibility: just update the threshold for ctf_discard. */
173 int
174 ctf_update (ctf_dict_t *fp)
175 {
176 fp->ctf_dtoldid = fp->ctf_typemax;
177 return 0;
178 }
179
180 ctf_dynhash_t *
181 ctf_name_table (ctf_dict_t *fp, int kind)
182 {
183 switch (kind)
184 {
185 case CTF_K_STRUCT:
186 return fp->ctf_structs;
187 case CTF_K_UNION:
188 return fp->ctf_unions;
189 case CTF_K_ENUM:
190 return fp->ctf_enums;
191 default:
192 return fp->ctf_names;
193 }
194 }
195
196 int
197 ctf_dtd_insert (ctf_dict_t *fp, ctf_dtdef_t *dtd, int flag, int kind)
198 {
199 const char *name;
200 if (ctf_dynhash_insert (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type,
201 dtd) < 0)
202 return ctf_set_errno (fp, ENOMEM);
203
204 if (flag == CTF_ADD_ROOT && dtd->dtd_data.ctt_name
205 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL)
206 {
207 if (ctf_dynhash_insert (ctf_name_table (fp, kind),
208 (char *) name, (void *) (uintptr_t)
209 dtd->dtd_type) < 0)
210 {
211 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t)
212 dtd->dtd_type);
213 return ctf_set_errno (fp, ENOMEM);
214 }
215 }
216 ctf_list_append (&fp->ctf_dtdefs, dtd);
217 return 0;
218 }
219
220 void
221 ctf_dtd_delete (ctf_dict_t *fp, ctf_dtdef_t *dtd)
222 {
223 int kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
224 size_t vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
225 int name_kind = kind;
226 const char *name;
227
228 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
229
230 switch (kind)
231 {
232 case CTF_K_STRUCT:
233 case CTF_K_UNION:
234 {
235 ctf_lmember_t *memb = (ctf_lmember_t *) dtd->dtd_vlen;
236 size_t i;
237
238 for (i = 0; i < vlen; i++)
239 ctf_str_remove_ref (fp, ctf_strraw (fp, memb[i].ctlm_name),
240 &memb[i].ctlm_name);
241 }
242 break;
243 case CTF_K_ENUM:
244 {
245 ctf_enum_t *en = (ctf_enum_t *) dtd->dtd_vlen;
246 size_t i;
247
248 for (i = 0; i < vlen; i++)
249 ctf_str_remove_ref (fp, ctf_strraw (fp, en[i].cte_name),
250 &en[i].cte_name);
251 }
252 break;
253 case CTF_K_FORWARD:
254 name_kind = dtd->dtd_data.ctt_type;
255 break;
256 }
257 free (dtd->dtd_vlen);
258 dtd->dtd_vlen_alloc = 0;
259
260 if (dtd->dtd_data.ctt_name
261 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL)
262 {
263 if (LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
264 ctf_dynhash_remove (ctf_name_table (fp, name_kind), name);
265 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
266 }
267
268 ctf_list_delete (&fp->ctf_dtdefs, dtd);
269 free (dtd);
270 }
271
272 ctf_dtdef_t *
273 ctf_dtd_lookup (const ctf_dict_t *fp, ctf_id_t type)
274 {
275 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, type))
276 fp = fp->ctf_parent;
277
278 return (ctf_dtdef_t *)
279 ctf_dynhash_lookup (fp->ctf_dthash, (void *) (uintptr_t) type);
280 }
281
282 ctf_dtdef_t *
283 ctf_dynamic_type (const ctf_dict_t *fp, ctf_id_t id)
284 {
285 ctf_id_t idx;
286
287 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, id))
288 fp = fp->ctf_parent;
289
290 idx = LCTF_TYPE_TO_INDEX(fp, id);
291
292 if ((unsigned long) idx <= fp->ctf_typemax)
293 return ctf_dtd_lookup (fp, id);
294 return NULL;
295 }
296
297 static int
298 ctf_static_type (const ctf_dict_t *fp, ctf_id_t id)
299 {
300 ctf_id_t idx;
301
302 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, id))
303 fp = fp->ctf_parent;
304
305 idx = LCTF_TYPE_TO_INDEX(fp, id);
306
307 return ((unsigned long) idx <= fp->ctf_stypes);
308 }
309
310 int
311 ctf_dvd_insert (ctf_dict_t *fp, ctf_dvdef_t *dvd)
312 {
313 if (ctf_dynhash_insert (fp->ctf_dvhash, dvd->dvd_name, dvd) < 0)
314 return ctf_set_errno (fp, ENOMEM);
315 ctf_list_append (&fp->ctf_dvdefs, dvd);
316 return 0;
317 }
318
319 void
320 ctf_dvd_delete (ctf_dict_t *fp, ctf_dvdef_t *dvd)
321 {
322 ctf_dynhash_remove (fp->ctf_dvhash, dvd->dvd_name);
323 free (dvd->dvd_name);
324
325 ctf_list_delete (&fp->ctf_dvdefs, dvd);
326 free (dvd);
327 }
328
329 ctf_dvdef_t *
330 ctf_dvd_lookup (const ctf_dict_t *fp, const char *name)
331 {
332 return (ctf_dvdef_t *) ctf_dynhash_lookup (fp->ctf_dvhash, name);
333 }
334
335 /* Discard all of the dynamic type definitions and variable definitions that
336 have been added to the dict since the last call to ctf_update(). We locate
337 such types by scanning the dtd list and deleting elements that have type IDs
338 greater than ctf_dtoldid, which is set by ctf_update(), above, and by
339 scanning the variable list and deleting elements that have update IDs equal
340 to the current value of the last-update snapshot count (indicating that they
341 were added after the most recent call to ctf_update()). */
342 int
343 ctf_discard (ctf_dict_t *fp)
344 {
345 ctf_snapshot_id_t last_update =
346 { fp->ctf_dtoldid,
347 fp->ctf_snapshot_lu + 1 };
348
349 return (ctf_rollback (fp, last_update));
350 }
351
352 ctf_snapshot_id_t
353 ctf_snapshot (ctf_dict_t *fp)
354 {
355 ctf_snapshot_id_t snapid;
356 snapid.dtd_id = fp->ctf_typemax;
357 snapid.snapshot_id = fp->ctf_snapshots++;
358 return snapid;
359 }
360
361 /* Like ctf_discard(), only discards everything after a particular ID. */
362 int
363 ctf_rollback (ctf_dict_t *fp, ctf_snapshot_id_t id)
364 {
365 ctf_dtdef_t *dtd, *ntd;
366 ctf_dvdef_t *dvd, *nvd;
367
368 if (id.snapshot_id < fp->ctf_stypes)
369 return (ctf_set_errno (fp, ECTF_RDONLY));
370
371 if (fp->ctf_snapshot_lu >= id.snapshot_id)
372 return (ctf_set_errno (fp, ECTF_OVERROLLBACK));
373
374 for (dtd = ctf_list_next (&fp->ctf_dtdefs); dtd != NULL; dtd = ntd)
375 {
376 int kind;
377 const char *name;
378
379 ntd = ctf_list_next (dtd);
380
381 if (LCTF_TYPE_TO_INDEX (fp, dtd->dtd_type) <= id.dtd_id)
382 continue;
383
384 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
385 if (kind == CTF_K_FORWARD)
386 kind = dtd->dtd_data.ctt_type;
387
388 if (dtd->dtd_data.ctt_name
389 && (name = ctf_strraw (fp, dtd->dtd_data.ctt_name)) != NULL
390 && LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info))
391 {
392 ctf_dynhash_remove (ctf_name_table (fp, kind), name);
393 ctf_str_remove_ref (fp, name, &dtd->dtd_data.ctt_name);
394 }
395
396 ctf_dynhash_remove (fp->ctf_dthash, (void *) (uintptr_t) dtd->dtd_type);
397 ctf_dtd_delete (fp, dtd);
398 }
399
400 for (dvd = ctf_list_next (&fp->ctf_dvdefs); dvd != NULL; dvd = nvd)
401 {
402 nvd = ctf_list_next (dvd);
403
404 if (dvd->dvd_snapshots <= id.snapshot_id)
405 continue;
406
407 ctf_dvd_delete (fp, dvd);
408 }
409
410 fp->ctf_typemax = id.dtd_id;
411 fp->ctf_snapshots = id.snapshot_id;
412
413 return 0;
414 }
415
416 /* Note: vlen is the amount of space *allocated* for the vlen. It may well not
417 be the amount of space used (yet): the space used is declared in per-kind
418 fashion in the dtd_data's info word. */
419 static ctf_id_t
420 ctf_add_generic (ctf_dict_t *fp, uint32_t flag, const char *name, int kind,
421 size_t vlen, ctf_dtdef_t **rp)
422 {
423 ctf_dtdef_t *dtd;
424 ctf_id_t type;
425
426 if (flag != CTF_ADD_NONROOT && flag != CTF_ADD_ROOT)
427 return (ctf_set_typed_errno (fp, EINVAL));
428
429 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) >= CTF_MAX_TYPE)
430 return (ctf_set_typed_errno (fp, ECTF_FULL));
431
432 if (LCTF_INDEX_TO_TYPE (fp, fp->ctf_typemax, 1) == (CTF_MAX_PTYPE - 1))
433 return (ctf_set_typed_errno (fp, ECTF_FULL));
434
435 /* Prohibit addition of a root-visible type that is already present
436 in the non-dynamic portion. */
437
438 if (flag == CTF_ADD_ROOT && name != NULL && name[0] != '\0')
439 {
440 ctf_id_t existing;
441
442 if (((existing = ctf_dynhash_lookup_type (ctf_name_table (fp, kind),
443 name)) > 0)
444 && ctf_static_type (fp, existing))
445 return (ctf_set_typed_errno (fp, ECTF_RDONLY));
446 }
447
448 /* Make sure ptrtab always grows to be big enough for all types. */
449 if (ctf_grow_ptrtab (fp) < 0)
450 return CTF_ERR; /* errno is set for us. */
451
452 if ((dtd = calloc (1, sizeof (ctf_dtdef_t))) == NULL)
453 return (ctf_set_typed_errno (fp, EAGAIN));
454
455 dtd->dtd_vlen_alloc = vlen;
456 if (vlen > 0)
457 {
458 if ((dtd->dtd_vlen = calloc (1, vlen)) == NULL)
459 goto oom;
460 }
461 else
462 dtd->dtd_vlen = NULL;
463
464 type = ++fp->ctf_typemax;
465 type = LCTF_INDEX_TO_TYPE (fp, type, (fp->ctf_flags & LCTF_CHILD));
466
467 dtd->dtd_data.ctt_name = ctf_str_add_ref (fp, name, &dtd->dtd_data.ctt_name);
468 dtd->dtd_type = type;
469
470 if (dtd->dtd_data.ctt_name == 0 && name != NULL && name[0] != '\0')
471 goto oom;
472
473 if (ctf_dtd_insert (fp, dtd, flag, kind) < 0)
474 goto err; /* errno is set for us. */
475
476 *rp = dtd;
477 return type;
478
479 oom:
480 ctf_set_errno (fp, EAGAIN);
481 err:
482 free (dtd->dtd_vlen);
483 free (dtd);
484 return CTF_ERR;
485 }
486
487 /* When encoding integer sizes, we want to convert a byte count in the range
488 1-8 to the closest power of 2 (e.g. 3->4, 5->8, etc). The clp2() function
489 is a clever implementation from "Hacker's Delight" by Henry Warren, Jr. */
490 static size_t
491 clp2 (size_t x)
492 {
493 x--;
494
495 x |= (x >> 1);
496 x |= (x >> 2);
497 x |= (x >> 4);
498 x |= (x >> 8);
499 x |= (x >> 16);
500
501 return (x + 1);
502 }
503
504 ctf_id_t
505 ctf_add_encoded (ctf_dict_t *fp, uint32_t flag,
506 const char *name, const ctf_encoding_t *ep, uint32_t kind)
507 {
508 ctf_dtdef_t *dtd;
509 ctf_id_t type;
510 uint32_t encoding;
511
512 if (ep == NULL)
513 return (ctf_set_typed_errno (fp, EINVAL));
514
515 if (name == NULL || name[0] == '\0')
516 return (ctf_set_typed_errno (fp, ECTF_NONAME));
517
518 if (!ctf_assert (fp, kind == CTF_K_INTEGER || kind == CTF_K_FLOAT))
519 return CTF_ERR; /* errno is set for us. */
520
521 if ((type = ctf_add_generic (fp, flag, name, kind, sizeof (uint32_t),
522 &dtd)) == CTF_ERR)
523 return CTF_ERR; /* errno is set for us. */
524
525 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
526 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
527 / CHAR_BIT);
528 switch (kind)
529 {
530 case CTF_K_INTEGER:
531 encoding = CTF_INT_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits);
532 break;
533 case CTF_K_FLOAT:
534 encoding = CTF_FP_DATA (ep->cte_format, ep->cte_offset, ep->cte_bits);
535 break;
536 default:
537 /* ctf_assert is opaque with -fno-inline. This dead code avoids
538 a warning about "encoding" being used uninitialized. */
539 return CTF_ERR;
540 }
541 memcpy (dtd->dtd_vlen, &encoding, sizeof (encoding));
542
543 return type;
544 }
545
546 ctf_id_t
547 ctf_add_reftype (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref, uint32_t kind)
548 {
549 ctf_dtdef_t *dtd;
550 ctf_id_t type;
551 ctf_dict_t *tmp = fp;
552 int child = fp->ctf_flags & LCTF_CHILD;
553
554 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
555 return (ctf_set_typed_errno (fp, EINVAL));
556
557 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
558 return CTF_ERR; /* errno is set for us. */
559
560 if ((type = ctf_add_generic (fp, flag, NULL, kind, 0, &dtd)) == CTF_ERR)
561 return CTF_ERR; /* errno is set for us. */
562
563 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, flag, 0);
564 dtd->dtd_data.ctt_type = (uint32_t) ref;
565
566 if (kind != CTF_K_POINTER)
567 return type;
568
569 /* If we are adding a pointer, update the ptrtab, pointing at this type from
570 the type it points to. Note that ctf_typemax is at this point one higher
571 than we want to check against, because it's just been incremented for the
572 addition of this type. The pptrtab is lazily-updated as needed, so is not
573 touched here. */
574
575 uint32_t type_idx = LCTF_TYPE_TO_INDEX (fp, type);
576 uint32_t ref_idx = LCTF_TYPE_TO_INDEX (fp, ref);
577
578 if (LCTF_TYPE_ISCHILD (fp, ref) == child
579 && ref_idx < fp->ctf_typemax)
580 fp->ctf_ptrtab[ref_idx] = type_idx;
581
582 return type;
583 }
584
585 ctf_id_t
586 ctf_add_slice (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref,
587 const ctf_encoding_t *ep)
588 {
589 ctf_dtdef_t *dtd;
590 ctf_slice_t slice;
591 ctf_id_t resolved_ref = ref;
592 ctf_id_t type;
593 int kind;
594 const ctf_type_t *tp;
595 ctf_dict_t *tmp = fp;
596
597 if (ep == NULL)
598 return (ctf_set_typed_errno (fp, EINVAL));
599
600 if ((ep->cte_bits > 255) || (ep->cte_offset > 255))
601 return (ctf_set_typed_errno (fp, ECTF_SLICEOVERFLOW));
602
603 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
604 return (ctf_set_typed_errno (fp, EINVAL));
605
606 if (ref != 0 && ((tp = ctf_lookup_by_id (&tmp, ref)) == NULL))
607 return CTF_ERR; /* errno is set for us. */
608
609 /* Make sure we ultimately point to an integral type. We also allow slices to
610 point to the unimplemented type, for now, because the compiler can emit
611 such slices, though they're not very much use. */
612
613 resolved_ref = ctf_type_resolve_unsliced (fp, ref);
614 kind = ctf_type_kind_unsliced (fp, resolved_ref);
615
616 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) &&
617 (kind != CTF_K_ENUM)
618 && (ref != 0))
619 return (ctf_set_typed_errno (fp, ECTF_NOTINTFP));
620
621 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_SLICE,
622 sizeof (ctf_slice_t), &dtd)) == CTF_ERR)
623 return CTF_ERR; /* errno is set for us. */
624
625 memset (&slice, 0, sizeof (ctf_slice_t));
626
627 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_SLICE, flag, 0);
628 dtd->dtd_data.ctt_size = clp2 (P2ROUNDUP (ep->cte_bits, CHAR_BIT)
629 / CHAR_BIT);
630 slice.cts_type = (uint32_t) ref;
631 slice.cts_bits = ep->cte_bits;
632 slice.cts_offset = ep->cte_offset;
633 memcpy (dtd->dtd_vlen, &slice, sizeof (ctf_slice_t));
634
635 return type;
636 }
637
638 ctf_id_t
639 ctf_add_integer (ctf_dict_t *fp, uint32_t flag,
640 const char *name, const ctf_encoding_t *ep)
641 {
642 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_INTEGER));
643 }
644
645 ctf_id_t
646 ctf_add_float (ctf_dict_t *fp, uint32_t flag,
647 const char *name, const ctf_encoding_t *ep)
648 {
649 return (ctf_add_encoded (fp, flag, name, ep, CTF_K_FLOAT));
650 }
651
652 ctf_id_t
653 ctf_add_pointer (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
654 {
655 return (ctf_add_reftype (fp, flag, ref, CTF_K_POINTER));
656 }
657
658 ctf_id_t
659 ctf_add_array (ctf_dict_t *fp, uint32_t flag, const ctf_arinfo_t *arp)
660 {
661 ctf_dtdef_t *dtd;
662 ctf_array_t cta;
663 ctf_id_t type;
664 ctf_dict_t *tmp = fp;
665
666 if (arp == NULL)
667 return (ctf_set_typed_errno (fp, EINVAL));
668
669 if (arp->ctr_contents != 0
670 && ctf_lookup_by_id (&tmp, arp->ctr_contents) == NULL)
671 return CTF_ERR; /* errno is set for us. */
672
673 tmp = fp;
674 if (ctf_lookup_by_id (&tmp, arp->ctr_index) == NULL)
675 return CTF_ERR; /* errno is set for us. */
676
677 if (ctf_type_kind (fp, arp->ctr_index) == CTF_K_FORWARD)
678 {
679 ctf_err_warn (fp, 1, ECTF_INCOMPLETE,
680 _("ctf_add_array: index type %lx is incomplete"),
681 arp->ctr_contents);
682 return (ctf_set_typed_errno (fp, ECTF_INCOMPLETE));
683 }
684
685 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_ARRAY,
686 sizeof (ctf_array_t), &dtd)) == CTF_ERR)
687 return CTF_ERR; /* errno is set for us. */
688
689 memset (&cta, 0, sizeof (ctf_array_t));
690
691 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ARRAY, flag, 0);
692 dtd->dtd_data.ctt_size = 0;
693 cta.cta_contents = (uint32_t) arp->ctr_contents;
694 cta.cta_index = (uint32_t) arp->ctr_index;
695 cta.cta_nelems = arp->ctr_nelems;
696 memcpy (dtd->dtd_vlen, &cta, sizeof (ctf_array_t));
697
698 return type;
699 }
700
701 int
702 ctf_set_array (ctf_dict_t *fp, ctf_id_t type, const ctf_arinfo_t *arp)
703 {
704 ctf_dict_t *ofp = fp;
705 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
706 ctf_array_t *vlen;
707
708 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, type))
709 fp = fp->ctf_parent;
710
711 /* You can only call ctf_set_array on a type you have added, not a
712 type that was read in via ctf_open(). */
713 if (type < fp->ctf_stypes)
714 return (ctf_set_errno (ofp, ECTF_RDONLY));
715
716 if (dtd == NULL
717 || LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info) != CTF_K_ARRAY)
718 return (ctf_set_errno (ofp, ECTF_BADID));
719
720 vlen = (ctf_array_t *) dtd->dtd_vlen;
721 vlen->cta_contents = (uint32_t) arp->ctr_contents;
722 vlen->cta_index = (uint32_t) arp->ctr_index;
723 vlen->cta_nelems = arp->ctr_nelems;
724
725 return 0;
726 }
727
728 ctf_id_t
729 ctf_add_function (ctf_dict_t *fp, uint32_t flag,
730 const ctf_funcinfo_t *ctc, const ctf_id_t *argv)
731 {
732 ctf_dtdef_t *dtd;
733 ctf_id_t type;
734 uint32_t vlen;
735 uint32_t *vdat;
736 ctf_dict_t *tmp = fp;
737 size_t initial_vlen;
738 size_t i;
739
740 if (ctc == NULL || (ctc->ctc_flags & ~CTF_FUNC_VARARG) != 0
741 || (ctc->ctc_argc != 0 && argv == NULL))
742 return (ctf_set_typed_errno (fp, EINVAL));
743
744 vlen = ctc->ctc_argc;
745 if (ctc->ctc_flags & CTF_FUNC_VARARG)
746 vlen++; /* Add trailing zero to indicate varargs (see below). */
747
748 if (ctc->ctc_return != 0
749 && ctf_lookup_by_id (&tmp, ctc->ctc_return) == NULL)
750 return CTF_ERR; /* errno is set for us. */
751
752 if (vlen > CTF_MAX_VLEN)
753 return (ctf_set_typed_errno (fp, EOVERFLOW));
754
755 /* One word extra allocated for padding for 4-byte alignment if need be.
756 Not reflected in vlen: we don't want to copy anything into it, and
757 it's in addition to (e.g.) the trailing 0 indicating varargs. */
758
759 initial_vlen = (sizeof (uint32_t) * (vlen + (vlen & 1)));
760 if ((type = ctf_add_generic (fp, flag, NULL, CTF_K_FUNCTION,
761 initial_vlen, &dtd)) == CTF_ERR)
762 return CTF_ERR; /* errno is set for us. */
763
764 vdat = (uint32_t *) dtd->dtd_vlen;
765
766 for (i = 0; i < ctc->ctc_argc; i++)
767 {
768 tmp = fp;
769 if (argv[i] != 0 && ctf_lookup_by_id (&tmp, argv[i]) == NULL)
770 return CTF_ERR; /* errno is set for us. */
771 vdat[i] = (uint32_t) argv[i];
772 }
773
774 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FUNCTION, flag, vlen);
775 dtd->dtd_data.ctt_type = (uint32_t) ctc->ctc_return;
776
777 if (ctc->ctc_flags & CTF_FUNC_VARARG)
778 vdat[vlen - 1] = 0; /* Add trailing zero to indicate varargs. */
779
780 return type;
781 }
782
783 ctf_id_t
784 ctf_add_struct_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
785 size_t size)
786 {
787 ctf_dtdef_t *dtd;
788 ctf_id_t type = 0;
789 size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN;
790
791 /* Promote root-visible forwards to structs. */
792 if (name != NULL)
793 type = ctf_lookup_by_rawname (fp, CTF_K_STRUCT, name);
794
795 /* Prohibit promotion if this type was ctf_open()ed. */
796 if (type > 0 && type < fp->ctf_stypes)
797 return (ctf_set_errno (fp, ECTF_RDONLY));
798
799 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
800 dtd = ctf_dtd_lookup (fp, type);
801 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_STRUCT,
802 initial_vlen, &dtd)) == CTF_ERR)
803 return CTF_ERR; /* errno is set for us. */
804
805 /* Forwards won't have any vlen yet. */
806 if (dtd->dtd_vlen_alloc == 0)
807 {
808 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
809 return (ctf_set_typed_errno (fp, ENOMEM));
810 dtd->dtd_vlen_alloc = initial_vlen;
811 }
812
813 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_STRUCT, flag, 0);
814 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
815 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
816 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
817
818 return type;
819 }
820
821 ctf_id_t
822 ctf_add_struct (ctf_dict_t *fp, uint32_t flag, const char *name)
823 {
824 return (ctf_add_struct_sized (fp, flag, name, 0));
825 }
826
827 ctf_id_t
828 ctf_add_union_sized (ctf_dict_t *fp, uint32_t flag, const char *name,
829 size_t size)
830 {
831 ctf_dtdef_t *dtd;
832 ctf_id_t type = 0;
833 size_t initial_vlen = sizeof (ctf_lmember_t) * INITIAL_VLEN;
834
835 /* Promote root-visible forwards to unions. */
836 if (name != NULL)
837 type = ctf_lookup_by_rawname (fp, CTF_K_UNION, name);
838
839 /* Prohibit promotion if this type was ctf_open()ed. */
840 if (type > 0 && type < fp->ctf_stypes)
841 return (ctf_set_errno (fp, ECTF_RDONLY));
842
843 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
844 dtd = ctf_dtd_lookup (fp, type);
845 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNION,
846 initial_vlen, &dtd)) == CTF_ERR)
847 return CTF_ERR; /* errno is set for us. */
848
849 /* Forwards won't have any vlen yet. */
850 if (dtd->dtd_vlen_alloc == 0)
851 {
852 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
853 return (ctf_set_typed_errno (fp, ENOMEM));
854 dtd->dtd_vlen_alloc = initial_vlen;
855 }
856
857 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNION, flag, 0);
858 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
859 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (size);
860 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (size);
861
862 return type;
863 }
864
865 ctf_id_t
866 ctf_add_union (ctf_dict_t *fp, uint32_t flag, const char *name)
867 {
868 return (ctf_add_union_sized (fp, flag, name, 0));
869 }
870
871 ctf_id_t
872 ctf_add_enum (ctf_dict_t *fp, uint32_t flag, const char *name)
873 {
874 ctf_dtdef_t *dtd;
875 ctf_id_t type = 0;
876 size_t initial_vlen = sizeof (ctf_enum_t) * INITIAL_VLEN;
877
878 /* Promote root-visible forwards to enums. */
879 if (name != NULL)
880 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
881
882 /* Prohibit promotion if this type was ctf_open()ed. */
883 if (type > 0 && type < fp->ctf_stypes)
884 return (ctf_set_errno (fp, ECTF_RDONLY));
885
886 if (type != 0 && ctf_type_kind (fp, type) == CTF_K_FORWARD)
887 dtd = ctf_dtd_lookup (fp, type);
888 else if ((type = ctf_add_generic (fp, flag, name, CTF_K_ENUM,
889 initial_vlen, &dtd)) == CTF_ERR)
890 return CTF_ERR; /* errno is set for us. */
891
892 /* Forwards won't have any vlen yet. */
893 if (dtd->dtd_vlen_alloc == 0)
894 {
895 if ((dtd->dtd_vlen = calloc (1, initial_vlen)) == NULL)
896 return (ctf_set_typed_errno (fp, ENOMEM));
897 dtd->dtd_vlen_alloc = initial_vlen;
898 }
899
900 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_ENUM, flag, 0);
901 dtd->dtd_data.ctt_size = fp->ctf_dmodel->ctd_int;
902
903 return type;
904 }
905
906 ctf_id_t
907 ctf_add_enum_encoded (ctf_dict_t *fp, uint32_t flag, const char *name,
908 const ctf_encoding_t *ep)
909 {
910 ctf_id_t type = 0;
911
912 /* First, create the enum if need be, using most of the same machinery as
913 ctf_add_enum(), to ensure that we do not allow things past that are not
914 enums or forwards to them. (This includes other slices: you cannot slice a
915 slice, which would be a useless thing to do anyway.) */
916
917 if (name != NULL)
918 type = ctf_lookup_by_rawname (fp, CTF_K_ENUM, name);
919
920 if (type != 0)
921 {
922 if ((ctf_type_kind (fp, type) != CTF_K_FORWARD) &&
923 (ctf_type_kind_unsliced (fp, type) != CTF_K_ENUM))
924 return (ctf_set_typed_errno (fp, ECTF_NOTINTFP));
925 }
926 else if ((type = ctf_add_enum (fp, flag, name)) == CTF_ERR)
927 return CTF_ERR; /* errno is set for us. */
928
929 /* Now attach a suitable slice to it. */
930
931 return ctf_add_slice (fp, flag, type, ep);
932 }
933
934 ctf_id_t
935 ctf_add_forward (ctf_dict_t *fp, uint32_t flag, const char *name,
936 uint32_t kind)
937 {
938 ctf_dtdef_t *dtd;
939 ctf_id_t type = 0;
940
941 if (!ctf_forwardable_kind (kind))
942 return (ctf_set_typed_errno (fp, ECTF_NOTSUE));
943
944 if (name == NULL || name[0] == '\0')
945 return (ctf_set_typed_errno (fp, ECTF_NONAME));
946
947 /* If the type is already defined or exists as a forward tag, just return
948 the ctf_id_t of the existing definition. Since this changes nothing,
949 it's safe to do even on the read-only portion of the dict. */
950
951 type = ctf_lookup_by_rawname (fp, kind, name);
952
953 if (type)
954 return type;
955
956 if ((type = ctf_add_generic (fp, flag, name, kind, 0, &dtd)) == CTF_ERR)
957 return CTF_ERR; /* errno is set for us. */
958
959 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_FORWARD, flag, 0);
960 dtd->dtd_data.ctt_type = kind;
961
962 return type;
963 }
964
965 ctf_id_t
966 ctf_add_unknown (ctf_dict_t *fp, uint32_t flag, const char *name)
967 {
968 ctf_dtdef_t *dtd;
969 ctf_id_t type = 0;
970
971 /* If a type is already defined with this name, error (if not CTF_K_UNKNOWN)
972 or just return it. */
973
974 if (name != NULL && name[0] != '\0' && flag == CTF_ADD_ROOT
975 && (type = ctf_lookup_by_rawname (fp, CTF_K_UNKNOWN, name)))
976 {
977 if (ctf_type_kind (fp, type) == CTF_K_UNKNOWN)
978 return type;
979 else
980 {
981 ctf_err_warn (fp, 1, ECTF_CONFLICT,
982 _("ctf_add_unknown: cannot add unknown type "
983 "named %s: type of this name already defined"),
984 name ? name : _("(unnamed type)"));
985 return (ctf_set_typed_errno (fp, ECTF_CONFLICT));
986 }
987 }
988
989 if ((type = ctf_add_generic (fp, flag, name, CTF_K_UNKNOWN, 0, &dtd)) == CTF_ERR)
990 return CTF_ERR; /* errno is set for us. */
991
992 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_UNKNOWN, flag, 0);
993 dtd->dtd_data.ctt_type = 0;
994
995 return type;
996 }
997
998 ctf_id_t
999 ctf_add_typedef (ctf_dict_t *fp, uint32_t flag, const char *name,
1000 ctf_id_t ref)
1001 {
1002 ctf_dtdef_t *dtd;
1003 ctf_id_t type;
1004 ctf_dict_t *tmp = fp;
1005
1006 if (ref == CTF_ERR || ref > CTF_MAX_TYPE)
1007 return (ctf_set_typed_errno (fp, EINVAL));
1008
1009 if (name == NULL || name[0] == '\0')
1010 return (ctf_set_typed_errno (fp, ECTF_NONAME));
1011
1012 if (ref != 0 && ctf_lookup_by_id (&tmp, ref) == NULL)
1013 return CTF_ERR; /* errno is set for us. */
1014
1015 if ((type = ctf_add_generic (fp, flag, name, CTF_K_TYPEDEF, 0,
1016 &dtd)) == CTF_ERR)
1017 return CTF_ERR; /* errno is set for us. */
1018
1019 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (CTF_K_TYPEDEF, flag, 0);
1020 dtd->dtd_data.ctt_type = (uint32_t) ref;
1021
1022 return type;
1023 }
1024
1025 ctf_id_t
1026 ctf_add_volatile (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1027 {
1028 return (ctf_add_reftype (fp, flag, ref, CTF_K_VOLATILE));
1029 }
1030
1031 ctf_id_t
1032 ctf_add_const (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1033 {
1034 return (ctf_add_reftype (fp, flag, ref, CTF_K_CONST));
1035 }
1036
1037 ctf_id_t
1038 ctf_add_restrict (ctf_dict_t *fp, uint32_t flag, ctf_id_t ref)
1039 {
1040 return (ctf_add_reftype (fp, flag, ref, CTF_K_RESTRICT));
1041 }
1042
1043 int
1044 ctf_add_enumerator (ctf_dict_t *fp, ctf_id_t enid, const char *name,
1045 int value)
1046 {
1047 ctf_dict_t *ofp = fp;
1048 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, enid);
1049 unsigned char *old_vlen;
1050 ctf_enum_t *en;
1051
1052 uint32_t kind, vlen, root;
1053
1054 if (name == NULL)
1055 return (ctf_set_errno (fp, EINVAL));
1056
1057 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, enid))
1058 fp = fp->ctf_parent;
1059
1060 if (enid < fp->ctf_stypes)
1061 return (ctf_set_errno (ofp, ECTF_RDONLY));
1062
1063 if (dtd == NULL)
1064 return (ctf_set_errno (ofp, ECTF_BADID));
1065
1066 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1067 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
1068 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
1069
1070 /* Enumeration constant names are only added, and only checked for duplicates,
1071 if the enum they are part of is a root-visible type. */
1072
1073 if (root == CTF_ADD_ROOT && ctf_dynhash_lookup (fp->ctf_names, name))
1074 {
1075 if (fp->ctf_flags & LCTF_STRICT_NO_DUP_ENUMERATORS)
1076 return (ctf_set_errno (ofp, ECTF_DUPLICATE));
1077
1078 if (ctf_track_enumerator (fp, enid, name) < 0)
1079 return (ctf_set_errno (ofp, ctf_errno (fp)));
1080 }
1081
1082 if (kind != CTF_K_ENUM)
1083 return (ctf_set_errno (ofp, ECTF_NOTENUM));
1084
1085 if (vlen == CTF_MAX_VLEN)
1086 return (ctf_set_errno (ofp, ECTF_DTFULL));
1087
1088 old_vlen = dtd->dtd_vlen;
1089
1090 if (ctf_grow_vlen (fp, dtd, sizeof (ctf_enum_t) * (vlen + 1)) < 0)
1091 return -1; /* errno is set for us. */
1092
1093 en = (ctf_enum_t *) dtd->dtd_vlen;
1094
1095 /* Remove refs in the old vlen region and reapply them. */
1096
1097 ctf_str_move_refs (fp, old_vlen, sizeof (ctf_enum_t) * vlen, dtd->dtd_vlen);
1098
1099 /* Check for constant duplication within any given enum: only needed for
1100 non-root-visible types, since the duplicate detection above does the job
1101 for root-visible types just fine. */
1102
1103 if (root == CTF_ADD_NONROOT && (fp->ctf_flags & LCTF_STRICT_NO_DUP_ENUMERATORS))
1104 {
1105 size_t i;
1106
1107 for (i = 0; i < vlen; i++)
1108 if (strcmp (ctf_strptr (fp, en[i].cte_name), name) == 0)
1109 return (ctf_set_errno (ofp, ECTF_DUPLICATE));
1110 }
1111
1112 en[vlen].cte_name = ctf_str_add_movable_ref (fp, name, &en[vlen].cte_name);
1113 en[vlen].cte_value = value;
1114
1115 if (en[vlen].cte_name == 0 && name != NULL && name[0] != '\0')
1116 return (ctf_set_errno (ofp, ctf_errno (fp)));
1117
1118 /* Put the newly-added enumerator name into the name table if this type is
1119 root-visible. */
1120
1121 if (root == CTF_ADD_ROOT)
1122 {
1123 if (ctf_dynhash_insert (fp->ctf_names,
1124 (char *) ctf_strptr (fp, en[vlen].cte_name),
1125 (void *) (uintptr_t) enid) < 0)
1126 return ctf_set_errno (fp, ENOMEM);
1127 }
1128
1129 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
1130
1131 return 0;
1132 }
1133
1134 int
1135 ctf_add_member_offset (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1136 ctf_id_t type, unsigned long bit_offset)
1137 {
1138 ctf_dict_t *ofp = fp;
1139 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, souid);
1140
1141 ssize_t msize, malign, ssize;
1142 uint32_t kind, vlen, root;
1143 size_t i;
1144 int is_incomplete = 0;
1145 unsigned char *old_vlen;
1146 ctf_lmember_t *memb;
1147
1148 if ((fp->ctf_flags & LCTF_CHILD) && LCTF_TYPE_ISPARENT (fp, souid))
1149 {
1150 /* Adding a child type to a parent, even via the child, is prohibited.
1151 Otherwise, climb to the parent and do all work there. */
1152
1153 if (LCTF_TYPE_ISCHILD (fp, type))
1154 return (ctf_set_errno (ofp, ECTF_BADID));
1155
1156 fp = fp->ctf_parent;
1157 }
1158
1159 if (souid < fp->ctf_stypes)
1160 return (ctf_set_errno (ofp, ECTF_RDONLY));
1161
1162 if (dtd == NULL)
1163 return (ctf_set_errno (ofp, ECTF_BADID));
1164
1165 if (name != NULL && name[0] == '\0')
1166 name = NULL;
1167
1168 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1169 root = LCTF_INFO_ISROOT (fp, dtd->dtd_data.ctt_info);
1170 vlen = LCTF_INFO_VLEN (fp, dtd->dtd_data.ctt_info);
1171
1172 if (kind != CTF_K_STRUCT && kind != CTF_K_UNION)
1173 return (ctf_set_errno (ofp, ECTF_NOTSOU));
1174
1175 if (vlen == CTF_MAX_VLEN)
1176 return (ctf_set_errno (ofp, ECTF_DTFULL));
1177
1178 old_vlen = dtd->dtd_vlen;
1179 if (ctf_grow_vlen (fp, dtd, sizeof (ctf_lmember_t) * (vlen + 1)) < 0)
1180 return (ctf_set_errno (ofp, ctf_errno (fp)));
1181 memb = (ctf_lmember_t *) dtd->dtd_vlen;
1182
1183 /* Remove pending refs in the old vlen region and reapply them. */
1184
1185 ctf_str_move_refs (fp, old_vlen, sizeof (ctf_lmember_t) * vlen, dtd->dtd_vlen);
1186
1187 if (name != NULL)
1188 {
1189 for (i = 0; i < vlen; i++)
1190 if (strcmp (ctf_strptr (fp, memb[i].ctlm_name), name) == 0)
1191 return (ctf_set_errno (ofp, ECTF_DUPLICATE));
1192 }
1193
1194 if ((msize = ctf_type_size (fp, type)) < 0 ||
1195 (malign = ctf_type_align (fp, type)) < 0)
1196 {
1197 /* The unimplemented type, and any type that resolves to it, has no size
1198 and no alignment: it can correspond to any number of compiler-inserted
1199 types. We allow incomplete types through since they are routinely
1200 added to the ends of structures, and can even be added elsewhere in
1201 structures by the deduplicator. They are assumed to be zero-size with
1202 no alignment: this is often wrong, but problems can be avoided in this
1203 case by explicitly specifying the size of the structure via the _sized
1204 functions. The deduplicator always does this. */
1205
1206 msize = 0;
1207 malign = 0;
1208 if (ctf_errno (fp) == ECTF_NONREPRESENTABLE)
1209 ctf_set_errno (fp, 0);
1210 else if (ctf_errno (fp) == ECTF_INCOMPLETE)
1211 is_incomplete = 1;
1212 else
1213 return -1; /* errno is set for us. */
1214 }
1215
1216 memb[vlen].ctlm_name = ctf_str_add_movable_ref (fp, name, &memb[vlen].ctlm_name);
1217 memb[vlen].ctlm_type = type;
1218 if (memb[vlen].ctlm_name == 0 && name != NULL && name[0] != '\0')
1219 return -1; /* errno is set for us. */
1220
1221 if (kind == CTF_K_STRUCT && vlen != 0)
1222 {
1223 if (bit_offset == (unsigned long) - 1)
1224 {
1225 /* Natural alignment. */
1226
1227 ctf_id_t ltype = ctf_type_resolve (fp, memb[vlen - 1].ctlm_type);
1228 size_t off = CTF_LMEM_OFFSET(&memb[vlen - 1]);
1229
1230 ctf_encoding_t linfo;
1231 ssize_t lsize;
1232
1233 /* Propagate any error from ctf_type_resolve. If the last member was
1234 of unimplemented type, this may be -ECTF_NONREPRESENTABLE: we
1235 cannot insert right after such a member without explicit offset
1236 specification, because its alignment and size is not known. */
1237 if (ltype == CTF_ERR)
1238 return -1; /* errno is set for us. */
1239
1240 if (is_incomplete)
1241 {
1242 ctf_err_warn (ofp, 1, ECTF_INCOMPLETE,
1243 _("ctf_add_member_offset: cannot add member %s of "
1244 "incomplete type %lx to struct %lx without "
1245 "specifying explicit offset\n"),
1246 name ? name : _("(unnamed member)"), type, souid);
1247 return (ctf_set_errno (ofp, ECTF_INCOMPLETE));
1248 }
1249
1250 if (ctf_type_encoding (fp, ltype, &linfo) == 0)
1251 off += linfo.cte_bits;
1252 else if ((lsize = ctf_type_size (fp, ltype)) > 0)
1253 off += lsize * CHAR_BIT;
1254 else if (lsize == -1 && ctf_errno (fp) == ECTF_INCOMPLETE)
1255 {
1256 const char *lname = ctf_strraw (fp, memb[vlen - 1].ctlm_name);
1257
1258 ctf_err_warn (ofp, 1, ECTF_INCOMPLETE,
1259 _("ctf_add_member_offset: cannot add member %s of "
1260 "type %lx to struct %lx without specifying "
1261 "explicit offset after member %s of type %lx, "
1262 "which is an incomplete type\n"),
1263 name ? name : _("(unnamed member)"), type, souid,
1264 lname ? lname : _("(unnamed member)"), ltype);
1265 return (ctf_set_errno (ofp, ECTF_INCOMPLETE));
1266 }
1267
1268 /* Round up the offset of the end of the last member to
1269 the next byte boundary, convert 'off' to bytes, and
1270 then round it up again to the next multiple of the
1271 alignment required by the new member. Finally,
1272 convert back to bits and store the result in
1273 dmd_offset. Technically we could do more efficient
1274 packing if the new member is a bit-field, but we're
1275 the "compiler" and ANSI says we can do as we choose. */
1276
1277 off = roundup (off, CHAR_BIT) / CHAR_BIT;
1278 off = roundup (off, MAX (malign, 1));
1279 memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (off * CHAR_BIT);
1280 memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (off * CHAR_BIT);
1281 ssize = off + msize;
1282 }
1283 else
1284 {
1285 /* Specified offset in bits. */
1286
1287 memb[vlen].ctlm_offsethi = CTF_OFFSET_TO_LMEMHI (bit_offset);
1288 memb[vlen].ctlm_offsetlo = CTF_OFFSET_TO_LMEMLO (bit_offset);
1289 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1290 ssize = MAX (ssize, ((signed) bit_offset / CHAR_BIT) + msize);
1291 }
1292 }
1293 else
1294 {
1295 memb[vlen].ctlm_offsethi = 0;
1296 memb[vlen].ctlm_offsetlo = 0;
1297 ssize = ctf_get_ctt_size (fp, &dtd->dtd_data, NULL, NULL);
1298 ssize = MAX (ssize, msize);
1299 }
1300
1301 dtd->dtd_data.ctt_size = CTF_LSIZE_SENT;
1302 dtd->dtd_data.ctt_lsizehi = CTF_SIZE_TO_LSIZE_HI (ssize);
1303 dtd->dtd_data.ctt_lsizelo = CTF_SIZE_TO_LSIZE_LO (ssize);
1304 dtd->dtd_data.ctt_info = CTF_TYPE_INFO (kind, root, vlen + 1);
1305
1306 return 0;
1307 }
1308
1309 int
1310 ctf_add_member_encoded (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1311 ctf_id_t type, unsigned long bit_offset,
1312 const ctf_encoding_t encoding)
1313 {
1314 ctf_dtdef_t *dtd = ctf_dtd_lookup (fp, type);
1315 int kind;
1316 int otype = type;
1317
1318 if (dtd == NULL)
1319 return (ctf_set_errno (fp, ECTF_BADID));
1320
1321 kind = LCTF_INFO_KIND (fp, dtd->dtd_data.ctt_info);
1322
1323 if ((kind != CTF_K_INTEGER) && (kind != CTF_K_FLOAT) && (kind != CTF_K_ENUM))
1324 return (ctf_set_errno (fp, ECTF_NOTINTFP));
1325
1326 if ((type = ctf_add_slice (fp, CTF_ADD_NONROOT, otype, &encoding)) == CTF_ERR)
1327 return -1; /* errno is set for us. */
1328
1329 return ctf_add_member_offset (fp, souid, name, type, bit_offset);
1330 }
1331
1332 int
1333 ctf_add_member (ctf_dict_t *fp, ctf_id_t souid, const char *name,
1334 ctf_id_t type)
1335 {
1336 return ctf_add_member_offset (fp, souid, name, type, (unsigned long) - 1);
1337 }
1338
1339 /* Add a variable regardless of whether or not it is already present.
1340
1341 Internal use only. */
1342 int
1343 ctf_add_variable_forced (ctf_dict_t *fp, const char *name, ctf_id_t ref)
1344 {
1345 ctf_dvdef_t *dvd;
1346 ctf_dict_t *tmp = fp;
1347
1348 if (ctf_lookup_by_id (&tmp, ref) == NULL)
1349 return -1; /* errno is set for us. */
1350
1351 /* Make sure this type is representable. */
1352 if ((ctf_type_resolve (fp, ref) == CTF_ERR)
1353 && (ctf_errno (fp) == ECTF_NONREPRESENTABLE))
1354 return -1;
1355
1356 if ((dvd = malloc (sizeof (ctf_dvdef_t))) == NULL)
1357 return (ctf_set_errno (fp, EAGAIN));
1358
1359 if (name != NULL && (dvd->dvd_name = strdup (name)) == NULL)
1360 {
1361 free (dvd);
1362 return (ctf_set_errno (fp, EAGAIN));
1363 }
1364 dvd->dvd_type = ref;
1365 dvd->dvd_snapshots = fp->ctf_snapshots;
1366
1367 if (ctf_dvd_insert (fp, dvd) < 0)
1368 {
1369 free (dvd->dvd_name);
1370 free (dvd);
1371 return -1; /* errno is set for us. */
1372 }
1373
1374 return 0;
1375 }
1376
1377 int
1378 ctf_add_variable (ctf_dict_t *fp, const char *name, ctf_id_t ref)
1379 {
1380 if (ctf_lookup_variable_here (fp, name) != CTF_ERR)
1381 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1382
1383 if (ctf_errno (fp) != ECTF_NOTYPEDAT)
1384 return -1; /* errno is set for us. */
1385
1386 return ctf_add_variable_forced (fp, name, ref);
1387 }
1388
1389 /* Add a function or object symbol regardless of whether or not it is already
1390 present (already existing symbols are silently overwritten).
1391
1392 Internal use only. */
1393 int
1394 ctf_add_funcobjt_sym_forced (ctf_dict_t *fp, int is_function, const char *name, ctf_id_t id)
1395 {
1396 ctf_dict_t *tmp = fp;
1397 char *dupname;
1398 ctf_dynhash_t *h = is_function ? fp->ctf_funchash : fp->ctf_objthash;
1399
1400 if (ctf_lookup_by_id (&tmp, id) == NULL)
1401 return -1; /* errno is set for us. */
1402
1403 if (is_function && ctf_type_kind (fp, id) != CTF_K_FUNCTION)
1404 return (ctf_set_errno (fp, ECTF_NOTFUNC));
1405
1406 if ((dupname = strdup (name)) == NULL)
1407 return (ctf_set_errno (fp, ENOMEM));
1408
1409 if (ctf_dynhash_insert (h, dupname, (void *) (uintptr_t) id) < 0)
1410 {
1411 free (dupname);
1412 return (ctf_set_errno (fp, ENOMEM));
1413 }
1414 return 0;
1415 }
1416
1417 int
1418 ctf_add_funcobjt_sym (ctf_dict_t *fp, int is_function, const char *name, ctf_id_t id)
1419 {
1420 if (ctf_lookup_by_sym_or_name (fp, 0, name, 0, is_function) != CTF_ERR)
1421 return (ctf_set_errno (fp, ECTF_DUPLICATE));
1422
1423 return ctf_add_funcobjt_sym_forced (fp, is_function, name, id);
1424 }
1425
1426 int
1427 ctf_add_objt_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1428 {
1429 return (ctf_add_funcobjt_sym (fp, 0, name, id));
1430 }
1431
1432 int
1433 ctf_add_func_sym (ctf_dict_t *fp, const char *name, ctf_id_t id)
1434 {
1435 return (ctf_add_funcobjt_sym (fp, 1, name, id));
1436 }
1437
1438 /* Add an enumeration constant observed in a given enum type as an identifier.
1439 They appear as names that cite the enum type.
1440
1441 Constants that appear in more than one enum, or which are already the names
1442 of types, appear in ctf_conflicting_enums as well.
1443
1444 This is done for all enumeration types at open time, and for newly-added ones
1445 as well: if the strict-enum flag is turned on, this table must be kept up to
1446 date with enums added in the interim. */
1447
1448 int
1449 ctf_track_enumerator (ctf_dict_t *fp, ctf_id_t type, const char *cte_name)
1450 {
1451 int err;
1452
1453 if (ctf_dynhash_lookup_type (fp->ctf_names, cte_name) == 0)
1454 {
1455 uint32_t name = ctf_str_add (fp, cte_name);
1456
1457 if (name == 0)
1458 return -1; /* errno is set for us. */
1459
1460 err = ctf_dynhash_insert_type (fp, fp->ctf_names, type, name);
1461 }
1462 else
1463 {
1464 err = ctf_dynset_insert (fp->ctf_conflicting_enums, (void *)
1465 cte_name);
1466 if (err != 0)
1467 ctf_set_errno (fp, err * -1);
1468 }
1469 if (err != 0)
1470 return -1; /* errno is set for us. */
1471 return 0;
1472 }
1473
1474 typedef struct ctf_bundle
1475 {
1476 ctf_dict_t *ctb_dict; /* CTF dict handle. */
1477 ctf_id_t ctb_type; /* CTF type identifier. */
1478 ctf_dtdef_t *ctb_dtd; /* CTF dynamic type definition (if any). */
1479 } ctf_bundle_t;
1480
1481 static int
1482 enumcmp (const char *name, int value, void *arg)
1483 {
1484 ctf_bundle_t *ctb = arg;
1485 int bvalue;
1486
1487 if (ctf_enum_value (ctb->ctb_dict, ctb->ctb_type, name, &bvalue) < 0)
1488 {
1489 ctf_err_warn (ctb->ctb_dict, 0, 0,
1490 _("conflict due to enum %s iteration error"), name);
1491 return 1;
1492 }
1493 if (value != bvalue)
1494 {
1495 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1496 _("conflict due to enum value change: %i versus %i"),
1497 value, bvalue);
1498 return 1;
1499 }
1500 return 0;
1501 }
1502
1503 static int
1504 enumadd (const char *name, int value, void *arg)
1505 {
1506 ctf_bundle_t *ctb = arg;
1507
1508 return (ctf_add_enumerator (ctb->ctb_dict, ctb->ctb_type,
1509 name, value) < 0);
1510 }
1511
1512 static int
1513 membcmp (const char *name, ctf_id_t type _libctf_unused_, unsigned long offset,
1514 void *arg)
1515 {
1516 ctf_bundle_t *ctb = arg;
1517 ctf_membinfo_t ctm;
1518
1519 /* Don't check nameless members (e.g. anonymous structs/unions) against each
1520 other. */
1521 if (name[0] == 0)
1522 return 0;
1523
1524 if (ctf_member_info (ctb->ctb_dict, ctb->ctb_type, name, &ctm) < 0)
1525 {
1526 ctf_err_warn (ctb->ctb_dict, 0, 0,
1527 _("conflict due to struct member %s iteration error"),
1528 name);
1529 return 1;
1530 }
1531 if (ctm.ctm_offset != offset)
1532 {
1533 ctf_err_warn (ctb->ctb_dict, 1, ECTF_CONFLICT,
1534 _("conflict due to struct member %s offset change: "
1535 "%lx versus %lx"),
1536 name, ctm.ctm_offset, offset);
1537 return 1;
1538 }
1539 return 0;
1540 }
1541
1542 /* Record the correspondence between a source and ctf_add_type()-added
1543 destination type: both types are translated into parent type IDs if need be,
1544 so they relate to the actual dictionary they are in. Outside controlled
1545 circumstances (like linking) it is probably not useful to do more than
1546 compare these pointers, since there is nothing stopping the user closing the
1547 source dict whenever they want to.
1548
1549 Our OOM handling here is just to not do anything, because this is called deep
1550 enough in the call stack that doing anything useful is painfully difficult:
1551 the worst consequence if we do OOM is a bit of type duplication anyway. */
1552
1553 static void
1554 ctf_add_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type,
1555 ctf_dict_t *dst_fp, ctf_id_t dst_type)
1556 {
1557 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1558 src_fp = src_fp->ctf_parent;
1559
1560 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1561
1562 if (LCTF_TYPE_ISPARENT (dst_fp, dst_type) && dst_fp->ctf_parent)
1563 dst_fp = dst_fp->ctf_parent;
1564
1565 dst_type = LCTF_TYPE_TO_INDEX(dst_fp, dst_type);
1566
1567 if (dst_fp->ctf_link_type_mapping == NULL)
1568 {
1569 ctf_hash_fun f = ctf_hash_type_key;
1570 ctf_hash_eq_fun e = ctf_hash_eq_type_key;
1571
1572 if ((dst_fp->ctf_link_type_mapping = ctf_dynhash_create (f, e, free,
1573 NULL)) == NULL)
1574 return;
1575 }
1576
1577 ctf_link_type_key_t *key;
1578 key = calloc (1, sizeof (struct ctf_link_type_key));
1579 if (!key)
1580 return;
1581
1582 key->cltk_fp = src_fp;
1583 key->cltk_idx = src_type;
1584
1585 /* No OOM checking needed, because if this doesn't work the worst we'll do is
1586 add a few more duplicate types (which will probably run out of memory
1587 anyway). */
1588 ctf_dynhash_insert (dst_fp->ctf_link_type_mapping, key,
1589 (void *) (uintptr_t) dst_type);
1590 }
1591
1592 /* Look up a type mapping: return 0 if none. The DST_FP is modified to point to
1593 the parent if need be. The ID returned is from the dst_fp's perspective. */
1594 static ctf_id_t
1595 ctf_type_mapping (ctf_dict_t *src_fp, ctf_id_t src_type, ctf_dict_t **dst_fp)
1596 {
1597 ctf_link_type_key_t key;
1598 ctf_dict_t *target_fp = *dst_fp;
1599 ctf_id_t dst_type = 0;
1600
1601 if (LCTF_TYPE_ISPARENT (src_fp, src_type) && src_fp->ctf_parent)
1602 src_fp = src_fp->ctf_parent;
1603
1604 src_type = LCTF_TYPE_TO_INDEX(src_fp, src_type);
1605 key.cltk_fp = src_fp;
1606 key.cltk_idx = src_type;
1607
1608 if (target_fp->ctf_link_type_mapping)
1609 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1610 &key);
1611
1612 if (dst_type != 0)
1613 {
1614 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1615 target_fp->ctf_parent != NULL);
1616 *dst_fp = target_fp;
1617 return dst_type;
1618 }
1619
1620 if (target_fp->ctf_parent)
1621 target_fp = target_fp->ctf_parent;
1622 else
1623 return 0;
1624
1625 if (target_fp->ctf_link_type_mapping)
1626 dst_type = (uintptr_t) ctf_dynhash_lookup (target_fp->ctf_link_type_mapping,
1627 &key);
1628
1629 if (dst_type)
1630 dst_type = LCTF_INDEX_TO_TYPE (target_fp, dst_type,
1631 target_fp->ctf_parent != NULL);
1632
1633 *dst_fp = target_fp;
1634 return dst_type;
1635 }
1636
1637 /* The ctf_add_type routine is used to copy a type from a source CTF dictionary
1638 to a dynamic destination dictionary. This routine operates recursively by
1639 following the source type's links and embedded member types. If the
1640 destination dict already contains a named type which has the same attributes,
1641 then we succeed and return this type but no changes occur. */
1642 static ctf_id_t
1643 ctf_add_type_internal (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type,
1644 ctf_dict_t *proc_tracking_fp)
1645 {
1646 ctf_id_t dst_type = CTF_ERR;
1647 uint32_t dst_kind = CTF_K_UNKNOWN;
1648 ctf_dict_t *tmp_fp = dst_fp;
1649 ctf_id_t tmp;
1650
1651 const char *name;
1652 uint32_t kind, forward_kind, flag, vlen;
1653
1654 const ctf_type_t *src_tp, *dst_tp;
1655 ctf_bundle_t src, dst;
1656 ctf_encoding_t src_en, dst_en;
1657 ctf_arinfo_t src_ar, dst_ar;
1658
1659 ctf_funcinfo_t ctc;
1660
1661 ctf_id_t orig_src_type = src_type;
1662
1663 if ((src_tp = ctf_lookup_by_id (&src_fp, src_type)) == NULL)
1664 return (ctf_set_typed_errno (dst_fp, ctf_errno (src_fp)));
1665
1666 if ((ctf_type_resolve (src_fp, src_type) == CTF_ERR)
1667 && (ctf_errno (src_fp) == ECTF_NONREPRESENTABLE))
1668 return (ctf_set_typed_errno (dst_fp, ECTF_NONREPRESENTABLE));
1669
1670 name = ctf_strptr (src_fp, src_tp->ctt_name);
1671 kind = LCTF_INFO_KIND (src_fp, src_tp->ctt_info);
1672 flag = LCTF_INFO_ISROOT (src_fp, src_tp->ctt_info);
1673 vlen = LCTF_INFO_VLEN (src_fp, src_tp->ctt_info);
1674
1675 /* If this is a type we are currently in the middle of adding, hand it
1676 straight back. (This lets us handle self-referential structures without
1677 considering forwards and empty structures the same as their completed
1678 forms.) */
1679
1680 tmp = ctf_type_mapping (src_fp, src_type, &tmp_fp);
1681
1682 if (tmp != 0)
1683 {
1684 if (ctf_dynhash_lookup (proc_tracking_fp->ctf_add_processing,
1685 (void *) (uintptr_t) src_type))
1686 return tmp;
1687
1688 /* If this type has already been added from this dictionary, and is the
1689 same kind and (if a struct or union) has the same number of members,
1690 hand it straight back. */
1691
1692 if (ctf_type_kind_unsliced (tmp_fp, tmp) == (int) kind)
1693 {
1694 if (kind == CTF_K_STRUCT || kind == CTF_K_UNION
1695 || kind == CTF_K_ENUM)
1696 {
1697 if ((dst_tp = ctf_lookup_by_id (&tmp_fp, dst_type)) != NULL)
1698 if (vlen == LCTF_INFO_VLEN (tmp_fp, dst_tp->ctt_info))
1699 return tmp;
1700 }
1701 else
1702 return tmp;
1703 }
1704 }
1705
1706 forward_kind = kind;
1707 if (kind == CTF_K_FORWARD)
1708 forward_kind = src_tp->ctt_type;
1709
1710 /* If the source type has a name and is a root type (visible at the top-level
1711 scope), lookup the name in the destination dictionary and verify that it is
1712 of the same kind before we do anything else. */
1713
1714 if ((flag & CTF_ADD_ROOT) && name[0] != '\0'
1715 && (tmp = ctf_lookup_by_rawname (dst_fp, forward_kind, name)) != 0)
1716 {
1717 dst_type = tmp;
1718 dst_kind = ctf_type_kind_unsliced (dst_fp, dst_type);
1719 }
1720
1721 /* If an identically named dst_type exists, fail with ECTF_CONFLICT
1722 unless dst_type is a forward declaration and src_type is a struct,
1723 union, or enum (i.e. the definition of the previous forward decl).
1724
1725 We also allow addition in the opposite order (addition of a forward when a
1726 struct, union, or enum already exists), which is a NOP and returns the
1727 already-present struct, union, or enum. */
1728
1729 if (dst_type != CTF_ERR && dst_kind != kind)
1730 {
1731 if (kind == CTF_K_FORWARD
1732 && (dst_kind == CTF_K_ENUM || dst_kind == CTF_K_STRUCT
1733 || dst_kind == CTF_K_UNION))
1734 {
1735 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1736 return dst_type;
1737 }
1738
1739 if (dst_kind != CTF_K_FORWARD
1740 || (kind != CTF_K_ENUM && kind != CTF_K_STRUCT
1741 && kind != CTF_K_UNION))
1742 {
1743 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1744 _("ctf_add_type: conflict for type %s: "
1745 "kinds differ, new: %i; old (ID %lx): %i"),
1746 name, kind, dst_type, dst_kind);
1747 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
1748 }
1749 }
1750
1751 /* We take special action for an integer, float, or slice since it is
1752 described not only by its name but also its encoding. For integers,
1753 bit-fields exploit this degeneracy. */
1754
1755 if (kind == CTF_K_INTEGER || kind == CTF_K_FLOAT || kind == CTF_K_SLICE)
1756 {
1757 if (ctf_type_encoding (src_fp, src_type, &src_en) != 0)
1758 return (ctf_set_typed_errno (dst_fp, ctf_errno (src_fp)));
1759
1760 if (dst_type != CTF_ERR)
1761 {
1762 ctf_dict_t *fp = dst_fp;
1763
1764 if ((dst_tp = ctf_lookup_by_id (&fp, dst_type)) == NULL)
1765 return CTF_ERR;
1766
1767 if (ctf_type_encoding (dst_fp, dst_type, &dst_en) != 0)
1768 return CTF_ERR; /* errno set for us. */
1769
1770 if (LCTF_INFO_ISROOT (fp, dst_tp->ctt_info) & CTF_ADD_ROOT)
1771 {
1772 /* The type that we found in the hash is also root-visible. If
1773 the two types match then use the existing one; otherwise,
1774 declare a conflict. Note: slices are not certain to match
1775 even if there is no conflict: we must check the contained type
1776 too. */
1777
1778 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1779 {
1780 if (kind != CTF_K_SLICE)
1781 {
1782 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1783 return dst_type;
1784 }
1785 }
1786 else
1787 {
1788 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
1789 }
1790 }
1791 else
1792 {
1793 /* We found a non-root-visible type in the hash. If its encoding
1794 is the same, we can reuse it, unless it is a slice. */
1795
1796 if (memcmp (&src_en, &dst_en, sizeof (ctf_encoding_t)) == 0)
1797 {
1798 if (kind != CTF_K_SLICE)
1799 {
1800 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1801 return dst_type;
1802 }
1803 }
1804 }
1805 }
1806 }
1807
1808 src.ctb_dict = src_fp;
1809 src.ctb_type = src_type;
1810 src.ctb_dtd = NULL;
1811
1812 dst.ctb_dict = dst_fp;
1813 dst.ctb_type = dst_type;
1814 dst.ctb_dtd = NULL;
1815
1816 /* Now perform kind-specific processing. If dst_type is CTF_ERR, then we add
1817 a new type with the same properties as src_type to dst_fp. If dst_type is
1818 not CTF_ERR, then we verify that dst_type has the same attributes as
1819 src_type. We recurse for embedded references. Before we start, we note
1820 that we are processing this type, to prevent infinite recursion: we do not
1821 re-process any type that appears in this list. The list is emptied
1822 wholesale at the end of processing everything in this recursive stack. */
1823
1824 if (ctf_dynhash_insert (proc_tracking_fp->ctf_add_processing,
1825 (void *) (uintptr_t) src_type, (void *) 1) < 0)
1826 return ctf_set_typed_errno (dst_fp, ENOMEM);
1827
1828 switch (kind)
1829 {
1830 case CTF_K_INTEGER:
1831 /* If we found a match we will have either returned it or declared a
1832 conflict. */
1833 dst_type = ctf_add_integer (dst_fp, flag, name, &src_en);
1834 break;
1835
1836 case CTF_K_FLOAT:
1837 /* If we found a match we will have either returned it or declared a
1838 conflict. */
1839 dst_type = ctf_add_float (dst_fp, flag, name, &src_en);
1840 break;
1841
1842 case CTF_K_SLICE:
1843 /* We have checked for conflicting encodings: now try to add the
1844 contained type. */
1845 src_type = ctf_type_reference (src_fp, src_type);
1846 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1847 proc_tracking_fp);
1848
1849 if (src_type == CTF_ERR)
1850 return CTF_ERR; /* errno is set for us. */
1851
1852 dst_type = ctf_add_slice (dst_fp, flag, src_type, &src_en);
1853 break;
1854
1855 case CTF_K_POINTER:
1856 case CTF_K_VOLATILE:
1857 case CTF_K_CONST:
1858 case CTF_K_RESTRICT:
1859 src_type = ctf_type_reference (src_fp, src_type);
1860 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
1861 proc_tracking_fp);
1862
1863 if (src_type == CTF_ERR)
1864 return CTF_ERR; /* errno is set for us. */
1865
1866 dst_type = ctf_add_reftype (dst_fp, flag, src_type, kind);
1867 break;
1868
1869 case CTF_K_ARRAY:
1870 if (ctf_array_info (src_fp, src_type, &src_ar) != 0)
1871 return (ctf_set_typed_errno (dst_fp, ctf_errno (src_fp)));
1872
1873 src_ar.ctr_contents =
1874 ctf_add_type_internal (dst_fp, src_fp, src_ar.ctr_contents,
1875 proc_tracking_fp);
1876 src_ar.ctr_index = ctf_add_type_internal (dst_fp, src_fp,
1877 src_ar.ctr_index,
1878 proc_tracking_fp);
1879 src_ar.ctr_nelems = src_ar.ctr_nelems;
1880
1881 if (src_ar.ctr_contents == CTF_ERR || src_ar.ctr_index == CTF_ERR)
1882 return CTF_ERR; /* errno is set for us. */
1883
1884 if (dst_type != CTF_ERR)
1885 {
1886 if (ctf_array_info (dst_fp, dst_type, &dst_ar) != 0)
1887 return CTF_ERR; /* errno is set for us. */
1888
1889 if (memcmp (&src_ar, &dst_ar, sizeof (ctf_arinfo_t)))
1890 {
1891 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1892 _("conflict for type %s against ID %lx: array info "
1893 "differs, old %lx/%lx/%x; new: %lx/%lx/%x"),
1894 name, dst_type, src_ar.ctr_contents,
1895 src_ar.ctr_index, src_ar.ctr_nelems,
1896 dst_ar.ctr_contents, dst_ar.ctr_index,
1897 dst_ar.ctr_nelems);
1898 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
1899 }
1900 }
1901 else
1902 dst_type = ctf_add_array (dst_fp, flag, &src_ar);
1903 break;
1904
1905 case CTF_K_FUNCTION:
1906 ctc.ctc_return = ctf_add_type_internal (dst_fp, src_fp,
1907 src_tp->ctt_type,
1908 proc_tracking_fp);
1909 ctc.ctc_argc = 0;
1910 ctc.ctc_flags = 0;
1911
1912 if (ctc.ctc_return == CTF_ERR)
1913 return CTF_ERR; /* errno is set for us. */
1914
1915 dst_type = ctf_add_function (dst_fp, flag, &ctc, NULL);
1916 break;
1917
1918 case CTF_K_STRUCT:
1919 case CTF_K_UNION:
1920 {
1921 ctf_next_t *i = NULL;
1922 ssize_t offset;
1923 const char *membname;
1924 ctf_id_t src_membtype;
1925
1926 /* Technically to match a struct or union we need to check both
1927 ways (src members vs. dst, dst members vs. src) but we make
1928 this more optimal by only checking src vs. dst and comparing
1929 the total size of the structure (which we must do anyway)
1930 which covers the possibility of dst members not in src.
1931 This optimization can be defeated for unions, but is so
1932 pathological as to render it irrelevant for our purposes. */
1933
1934 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
1935 && dst_kind != CTF_K_FORWARD)
1936 {
1937 if (ctf_type_size (src_fp, src_type) !=
1938 ctf_type_size (dst_fp, dst_type))
1939 {
1940 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1941 _("conflict for type %s against ID %lx: union "
1942 "size differs, old %li, new %li"), name,
1943 dst_type, (long) ctf_type_size (src_fp, src_type),
1944 (long) ctf_type_size (dst_fp, dst_type));
1945 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
1946 }
1947
1948 if (ctf_member_iter (src_fp, src_type, membcmp, &dst))
1949 {
1950 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
1951 _("conflict for type %s against ID %lx: members "
1952 "differ, see above"), name, dst_type);
1953 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
1954 }
1955
1956 break;
1957 }
1958
1959 dst_type = ctf_add_struct_sized (dst_fp, flag, name,
1960 ctf_type_size (src_fp, src_type));
1961 if (dst_type == CTF_ERR)
1962 return CTF_ERR; /* errno is set for us. */
1963
1964 /* Pre-emptively add this struct to the type mapping so that
1965 structures that refer to themselves work. */
1966 ctf_add_type_mapping (src_fp, src_type, dst_fp, dst_type);
1967
1968 while ((offset = ctf_member_next (src_fp, src_type, &i, &membname,
1969 &src_membtype, 0)) >= 0)
1970 {
1971 ctf_dict_t *dst = dst_fp;
1972 ctf_id_t dst_membtype = ctf_type_mapping (src_fp, src_membtype, &dst);
1973
1974 if (dst_membtype == 0)
1975 {
1976 dst_membtype = ctf_add_type_internal (dst_fp, src_fp,
1977 src_membtype,
1978 proc_tracking_fp);
1979 if (dst_membtype == CTF_ERR)
1980 {
1981 if (ctf_errno (dst_fp) != ECTF_NONREPRESENTABLE)
1982 {
1983 ctf_next_destroy (i);
1984 break;
1985 }
1986 }
1987 }
1988
1989 if (ctf_add_member_offset (dst_fp, dst_type, membname,
1990 dst_membtype, offset) < 0)
1991 {
1992 ctf_next_destroy (i);
1993 break;
1994 }
1995 }
1996 if (ctf_errno (src_fp) != ECTF_NEXT_END)
1997 return CTF_ERR; /* errno is set for us. */
1998 break;
1999 }
2000
2001 case CTF_K_ENUM:
2002 if (dst_type != CTF_ERR && kind != CTF_K_FORWARD
2003 && dst_kind != CTF_K_FORWARD)
2004 {
2005 if (ctf_enum_iter (src_fp, src_type, enumcmp, &dst)
2006 || ctf_enum_iter (dst_fp, dst_type, enumcmp, &src))
2007 {
2008 ctf_err_warn (dst_fp, 1, ECTF_CONFLICT,
2009 _("conflict for enum %s against ID %lx: members "
2010 "differ, see above"), name, dst_type);
2011 return (ctf_set_typed_errno (dst_fp, ECTF_CONFLICT));
2012 }
2013 }
2014 else
2015 {
2016 ctf_snapshot_id_t snap = ctf_snapshot (dst_fp);
2017
2018 dst_type = ctf_add_enum (dst_fp, flag, name);
2019 if ((dst.ctb_type = dst_type) == CTF_ERR
2020 || ctf_enum_iter (src_fp, src_type, enumadd, &dst))
2021 {
2022 ctf_rollback (dst_fp, snap);
2023 return CTF_ERR; /* errno is set for us */
2024 }
2025 }
2026 break;
2027
2028 case CTF_K_FORWARD:
2029 if (dst_type == CTF_ERR)
2030 dst_type = ctf_add_forward (dst_fp, flag, name, forward_kind);
2031 break;
2032
2033 case CTF_K_TYPEDEF:
2034 src_type = ctf_type_reference (src_fp, src_type);
2035 src_type = ctf_add_type_internal (dst_fp, src_fp, src_type,
2036 proc_tracking_fp);
2037
2038 if (src_type == CTF_ERR)
2039 return CTF_ERR; /* errno is set for us. */
2040
2041 /* If dst_type is not CTF_ERR at this point, we should check if
2042 ctf_type_reference(dst_fp, dst_type) != src_type and if so fail with
2043 ECTF_CONFLICT. However, this causes problems with bitness typedefs
2044 that vary based on things like if 32-bit then pid_t is int otherwise
2045 long. We therefore omit this check and assume that if the identically
2046 named typedef already exists in dst_fp, it is correct or
2047 equivalent. */
2048
2049 if (dst_type == CTF_ERR)
2050 dst_type = ctf_add_typedef (dst_fp, flag, name, src_type);
2051
2052 break;
2053
2054 default:
2055 return (ctf_set_typed_errno (dst_fp, ECTF_CORRUPT));
2056 }
2057
2058 if (dst_type != CTF_ERR)
2059 ctf_add_type_mapping (src_fp, orig_src_type, dst_fp, dst_type);
2060 return dst_type;
2061 }
2062
2063 ctf_id_t
2064 ctf_add_type (ctf_dict_t *dst_fp, ctf_dict_t *src_fp, ctf_id_t src_type)
2065 {
2066 ctf_id_t id;
2067
2068 if (!src_fp->ctf_add_processing)
2069 src_fp->ctf_add_processing = ctf_dynhash_create (ctf_hash_integer,
2070 ctf_hash_eq_integer,
2071 NULL, NULL);
2072
2073 /* We store the hash on the source, because it contains only source type IDs:
2074 but callers will invariably expect errors to appear on the dest. */
2075 if (!src_fp->ctf_add_processing)
2076 return (ctf_set_typed_errno (dst_fp, ENOMEM));
2077
2078 id = ctf_add_type_internal (dst_fp, src_fp, src_type, src_fp);
2079 ctf_dynhash_empty (src_fp->ctf_add_processing);
2080
2081 return id;
2082 }
2083