Home | History | Annotate | Download | only in uvm

Lines Matching defs:sdp

247 	struct swapdev *sdp;
248 const size_t bytesperword = sizeof(sdp->swd_encmap[0]);
316 * swaplist_insert: insert swap device "sdp" into the global list
324 swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
366 sdp->swd_priority = priority;
367 TAILQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
381 struct swapdev *sdp;
393 TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
394 if (sdp->swd_vp == vp) {
397 sdp, swd_next);
400 return(sdp);
439 struct swapdev *sdp;
445 TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
446 if (sdp->swd_flags & SWF_FAKE)
448 if (pgno >= sdp->swd_drumoffset &&
449 pgno < (sdp->swd_drumoffset + sdp->swd_drumsize)) {
450 return sdp;
458 * swapdrum_sdp_is: true iff the swap device for pgno is sdp
463 swapdrum_sdp_is(int pgno, struct swapdev *sdp)
468 result = swapdrum_getsdp(pgno) == sdp;
485 swapent_cvt(struct swapent *se, const struct swapdev *sdp, int inuse)
487 se->se_dev = sdp->swd_dev;
488 se->se_flags = sdp->swd_flags;
489 se->se_nblks = sdp->swd_nblks;
491 se->se_priority = sdp->swd_priority;
492 KASSERT(sdp->swd_pathlen < sizeof(se->se_path));
493 strcpy(se->se_path, sdp->swd_path);
516 struct swapdev *sdp;
663 if ((sdp = swaplist_find(vp, true)) == NULL) {
666 swaplist_insert(sdp, spp, priority);
684 sdp = kmem_zalloc(sizeof(*sdp), KM_SLEEP);
686 sdp->swd_flags = SWF_FAKE;
687 sdp->swd_vp = vp;
688 sdp->swd_dev = (vp->v_type == VBLK) ? vp->v_rdev : NODEV;
689 bufq_alloc(&sdp->swd_tab, "disksort", BUFQ_SORT_RAWBLOCK);
694 bufq_free(sdp->swd_tab);
695 kmem_free(sdp, sizeof(*sdp));
699 swaplist_insert(sdp, spp, priority);
703 sdp->swd_pathlen = len;
704 sdp->swd_path = kmem_alloc(len, KM_SLEEP);
705 if (copystr(userpath, sdp->swd_path, len, 0) != 0)
715 if ((error = swap_on(l, sdp)) != 0) {
720 bufq_free(sdp->swd_tab);
721 kmem_free(sdp->swd_path, sdp->swd_pathlen);
722 kmem_free(sdp, sizeof(*sdp));
729 if ((sdp = swaplist_find(vp, false)) == NULL) {
739 if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0) {
748 error = swap_off(l, sdp);
782 struct swapdev *sdp;
804 TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
810 inuse = btodb((uint64_t)sdp->swd_npginuse <<
814 swapent_cvt(&sep, sdp, inuse);
838 swap_on(struct lwp *l, struct swapdev *sdp)
849 * we want to enable swapping on sdp. the swd_vp contains
854 vp = sdp->swd_vp;
855 dev = sdp->swd_dev;
893 sdp->swd_bsize = 1 << vp->v_mount->mnt_fs_bshift;
899 sdp->swd_maxactive = 2; /* XXX */
901 sdp->swd_maxactive = 8; /* XXX */
913 sdp->swd_nblks = nblocks;
950 sdp->swd_blist = blist_create(npages);
952 blist_free(sdp->swd_blist, addr, size);
958 sdp->swd_encmap = kmem_zalloc(encmap_size(npages), KM_SLEEP);
959 sdp->swd_encinit = false;
993 if (rootpages != blist_fill(sdp->swd_blist, addr, rootpages)) {
1026 sdp->swd_drumoffset = (int)result;
1027 sdp->swd_drumsize = npages;
1028 sdp->swd_npages = size;
1030 sdp->swd_flags &= ~SWF_FAKE; /* going live */
1031 sdp->swd_flags |= (SWF_INUSE|SWF_ENABLE);
1042 if (sdp->swd_blist) {
1043 blist_destroy(sdp->swd_blist);
1057 swap_off(struct lwp *l, struct swapdev *sdp)
1059 int npages = sdp->swd_npages;
1063 UVMHIST_CALLARGS(pdhist, " dev=%#jx, npages=%jd", sdp->swd_dev,npages, 0, 0);
1069 sdp->swd_flags &= ~SWF_ENABLE;
1080 if (uao_swap_off(sdp->swd_drumoffset,
1081 sdp->swd_drumoffset + sdp->swd_drumsize) ||
1082 amap_swap_off(sdp->swd_drumoffset,
1083 sdp->swd_drumoffset + sdp->swd_drumsize)) {
1085 } else if (sdp->swd_npginuse > sdp->swd_npgbad) {
1091 sdp->swd_flags |= SWF_ENABLE;
1102 if (sdp->swd_vp->v_type != VBLK) {
1116 vrele(sdp->swd_vp);
1117 if (sdp->swd_vp != rootvp) {
1118 (void) VOP_CLOSE(sdp->swd_vp, FREAD|FWRITE, l->l_cred);
1123 uvmexp.swpginuse -= sdp->swd_npgbad;
1125 if (swaplist_find(sdp->swd_vp, true) == NULL)
1133 vmem_free(swapmap, sdp->swd_drumoffset, sdp->swd_drumsize);
1134 blist_destroy(sdp->swd_blist);
1135 bufq_free(sdp->swd_tab);
1136 kmem_free(__UNVOLATILE(sdp->swd_encmap),
1137 encmap_size(sdp->swd_drumsize));
1138 explicit_memset(&sdp->swd_enckey, 0, sizeof sdp->swd_enckey);
1139 explicit_memset(&sdp->swd_deckey, 0, sizeof sdp->swd_deckey);
1140 kmem_free(sdp, sizeof(*sdp));
1147 struct swapdev *sdp;
1159 TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1160 if (sdp->swd_flags & SWF_FAKE)
1162 if ((sdp->swd_flags & (SWF_INUSE|SWF_ENABLE)) == 0)
1165 printf("\nturning off swap on %s...", sdp->swd_path);
1168 vn_lock(vp = sdp->swd_vp, LK_EXCLUSIVE|LK_RETRY);
1170 error = swap_off(l, sdp);
1175 "with error %d\n", sdp->swd_path, error);
1176 TAILQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1216 struct swapdev *sdp;
1228 sdp = swapdrum_getsdp(pageno);
1230 if (sdp == NULL) {
1242 pageno -= sdp->swd_drumoffset; /* page # on swapdev */
1247 sdp->swd_drumoffset, bn, bp->b_bcount);
1255 vp = sdp->swd_vp; /* swapdev vnode pointer */
1264 * on the swapdev (sdp).
1267 bp->b_dev = sdp->swd_dev; /* swapdev dev_t */
1294 sw_reg_strategy(sdp, bp, bn);
1356 sw_reg_strategy(struct swapdev *sdp, struct buf *bp, int bn)
1375 vnx->vx_sdp = sdp;
1397 error = VOP_BMAP(sdp->swd_vp, byteoff / sdp->swd_bsize,
1431 off = byteoff % sdp->swd_bsize;
1432 sz = (1 + nra) * sdp->swd_bsize - off;
1438 (uintptr_t)sdp->swd_vp, (uintptr_t)vp, byteoff, nbn);
1479 bufq_put(sdp->swd_tab, &nbp->vb_buf);
1480 sw_reg_start(sdp);
1512 sw_reg_start(struct swapdev *sdp)
1519 if ((sdp->swd_flags & SWF_BUSY) != 0)
1522 sdp->swd_flags |= SWF_BUSY;
1524 while (sdp->swd_active < sdp->swd_maxactive) {
1525 bp = bufq_get(sdp->swd_tab);
1528 sdp->swd_active++;
1543 sdp->swd_flags &= ~SWF_BUSY;
1566 struct swapdev *sdp = vnx->vx_sdp;
1624 sdp->swd_active--;
1625 sw_reg_start(sdp);
1643 struct swapdev *sdp;
1673 TAILQ_FOREACH(sdp, &spp->spi_swapdev, swd_next) {
1677 if ((sdp->swd_flags & SWF_ENABLE) == 0)
1679 if (sdp->swd_npginuse + *nslots > sdp->swd_npages)
1681 result = blist_alloc(sdp->swd_blist, *nslots);
1685 KASSERT(result < sdp->swd_drumsize);
1690 TAILQ_REMOVE(&spp->spi_swapdev, sdp, swd_next);
1691 TAILQ_INSERT_TAIL(&spp->spi_swapdev, sdp, swd_next);
1692 sdp->swd_npginuse += *nslots;
1698 *nslots, result + sdp->swd_drumoffset, 0, 0);
1699 return (result + sdp->swd_drumoffset);
1749 struct swapdev *sdp;
1753 sdp = swapdrum_getsdp(startslot);
1754 KASSERT(sdp != NULL);
1765 sdp->swd_npgbad += nslots;
1766 UVMHIST_LOG(pdhist, "now %jd bad", sdp->swd_npgbad, 0,0,0);
1779 struct swapdev *sdp;
1793 * convert drum slot offset back to sdp, free the blocks
1799 sdp = swapdrum_getsdp(startslot);
1801 KASSERT(sdp != NULL);
1802 KASSERT(sdp->swd_npginuse >= nslots);
1803 blist_free(sdp->swd_blist, startslot - sdp->swd_drumoffset, nslots);
1804 sdp->swd_npginuse -= nslots;
1910 struct swapdev *sdp;
1932 sdp = swapdrum_getsdp(startslot);
1933 if (!sdp->swd_encinit) {
1938 uvm_swap_genkey(sdp);
1940 KASSERT(sdp->swd_encinit);
1945 KDASSERT(swapdrum_sdp_is(s, sdp));
1946 KASSERT(s >= sdp->swd_drumoffset);
1947 s -= sdp->swd_drumoffset;
1948 KASSERT(s < sdp->swd_drumsize);
1951 uvm_swap_encryptpage(sdp,
1953 atomic_or_32(&sdp->swd_encmap[s/32],
1956 atomic_and_32(&sdp->swd_encmap[s/32],
2031 struct swapdev *sdp;
2036 * Get the sdp. Everything about it except the encinit
2043 sdp = swapdrum_getsdp(startslot);
2044 encinit = sdp->swd_encinit;
2056 KDASSERT(swapdrum_sdp_is(s, sdp));
2057 KASSERT(s >= sdp->swd_drumoffset);
2058 s -= sdp->swd_drumoffset;
2059 KASSERT(s < sdp->swd_drumsize);
2060 if ((atomic_load_relaxed(&sdp->swd_encmap[s/32]) &
2063 uvm_swap_decryptpage(sdp,
2090 * uvm_swap_genkey(sdp)
2095 uvm_swap_genkey(struct swapdev *sdp)
2099 KASSERT(!sdp->swd_encinit);
2102 aes_setenckey256(&sdp->swd_enckey, key);
2103 aes_setdeckey256(&sdp->swd_deckey, key);
2106 sdp->swd_encinit = true;
2110 * uvm_swap_encryptpage(sdp, kva, slot)
2116 uvm_swap_encryptpage(struct swapdev *sdp, void *kva, int slot)
2122 aes_enc(&sdp->swd_enckey, (const void *)preiv, iv, AES_256_NROUNDS);
2125 aes_cbc_enc(&sdp->swd_enckey, kva, kva, PAGE_SIZE, iv,
2132 * uvm_swap_decryptpage(sdp, kva, slot)
2138 uvm_swap_decryptpage(struct swapdev *sdp, void *kva, int slot)
2144 aes_enc(&sdp->swd_enckey, (const void *)preiv, iv, AES_256_NROUNDS);
2147 aes_cbc_dec(&sdp->swd_deckey, kva, kva, PAGE_SIZE, iv,