svc_dg.c revision 1.2 1 /* $NetBSD: svc_dg.c,v 1.2 2000/06/04 04:35:13 thorpej Exp $ */
2
3 /*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31
32 /*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36 /* #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" */
37
38
39 /*
40 * svc_dg.c, Server side for connectionless RPC.
41 *
42 * Does some caching in the hopes of achieving execute-at-most-once semantics.
43 */
44
45 #include "namespace.h"
46 #include "reentrant.h"
47 #include <sys/types.h>
48 #include <sys/socket.h>
49 #include <rpc/rpc.h>
50 #include <errno.h>
51 #include <unistd.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #ifdef RPC_CACHE_DEBUG
56 #include <netconfig.h>
57 #include <netdir.h>
58 #endif
59 #include <err.h>
60
61 #include "rpc_com.h"
62 #include "svc_dg.h"
63
64 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
65 #define rpc_buffer(xprt) ((xprt)->xp_p1)
66
67 #ifdef __weak_alias
68 __weak_alias(svc_dg_create,_svc_dg_create)
69 #endif
70
71 #ifndef MAX
72 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
73 #endif
74
75 static void svc_dg_ops __P((SVCXPRT *));
76 static enum xprt_stat svc_dg_stat __P((SVCXPRT *));
77 static bool_t svc_dg_recv __P((SVCXPRT *, struct rpc_msg *));
78 static bool_t svc_dg_reply __P((SVCXPRT *, struct rpc_msg *));
79 static bool_t svc_dg_getargs __P((SVCXPRT *, xdrproc_t, caddr_t));
80 static bool_t svc_dg_freeargs __P((SVCXPRT *, xdrproc_t, caddr_t));
81 static void svc_dg_destroy __P((SVCXPRT *));
82 static bool_t svc_dg_control __P((SVCXPRT *, const u_int, void *));
83 static int cache_get __P((SVCXPRT *, struct rpc_msg *, char **, size_t *));
84 static void cache_set __P((SVCXPRT *, size_t));
85 int svc_dg_enablecache __P((SVCXPRT *, u_int));
86
87 /*
88 * Usage:
89 * xprt = svc_dg_create(sock, sendsize, recvsize);
90 * Does other connectionless specific initializations.
91 * Once *xprt is initialized, it is registered.
92 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
93 * system defaults are chosen.
94 * The routines returns NULL if a problem occurred.
95 */
96 static const char svc_dg_str[] = "svc_dg_create: %s";
97 static const char svc_dg_err1[] = "could not get transport information";
98 static const char svc_dg_err2[] = " transport does not support data transfer";
99 static const char __no_mem_str[] = "out of memory";
100
101 SVCXPRT *
102 svc_dg_create(fd, sendsize, recvsize)
103 int fd;
104 u_int sendsize;
105 u_int recvsize;
106 {
107 SVCXPRT *xprt;
108 struct svc_dg_data *su = NULL;
109 struct __rpc_sockinfo si;
110 struct sockaddr_storage ss;
111 socklen_t slen;
112
113 if (!__rpc_fd2sockinfo(fd, &si)) {
114 warnx(svc_dg_str, svc_dg_err1);
115 return ((SVCXPRT *)NULL);
116 }
117 /*
118 * Find the receive and the send size
119 */
120 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
121 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
122 if ((sendsize == 0) || (recvsize == 0)) {
123 warnx(svc_dg_str, svc_dg_err2);
124 return ((SVCXPRT *)NULL);
125 }
126
127 xprt = (SVCXPRT *)mem_alloc(sizeof (SVCXPRT));
128 if (xprt == NULL)
129 goto freedata;
130 memset((char *)xprt, 0, sizeof (SVCXPRT));
131
132 su = (struct svc_dg_data *)mem_alloc(sizeof (*su));
133 if (su == NULL)
134 goto freedata;
135 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
136 if ((rpc_buffer(xprt) = (char *)mem_alloc(su->su_iosz)) == NULL)
137 goto freedata;
138 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
139 XDR_DECODE);
140 su->su_cache = NULL;
141 xprt->xp_fd = fd;
142 xprt->xp_p2 = (caddr_t)su;
143 xprt->xp_verf.oa_base = su->su_verfbody;
144 svc_dg_ops(xprt);
145 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
146
147 slen = sizeof ss;
148 if (getsockname(fd, (struct sockaddr *)&ss, &slen) < 0)
149 goto freedata;
150 xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
151 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
152 xprt->xp_ltaddr.len = slen;
153 memcpy(xprt->xp_ltaddr.buf, &ss, slen);
154
155 xprt_register(xprt);
156 return (xprt);
157 freedata:
158 (void) warnx(svc_dg_str, __no_mem_str);
159 if (xprt) {
160 if (su)
161 (void) mem_free((char *) su, sizeof (*su));
162 (void) mem_free((char *)xprt, sizeof (SVCXPRT));
163 }
164 return ((SVCXPRT *)NULL);
165 }
166
167 static enum xprt_stat
168 svc_dg_stat(xprt)
169 SVCXPRT *xprt;
170 {
171 return (XPRT_IDLE);
172 }
173
174 static bool_t
175 svc_dg_recv(xprt, msg)
176 register SVCXPRT *xprt;
177 struct rpc_msg *msg;
178 {
179 struct svc_dg_data *su = su_data(xprt);
180 XDR *xdrs = &(su->su_xdrs);
181 char *reply;
182 struct sockaddr_storage ss;
183 socklen_t alen;
184 size_t replylen;
185 int rlen;
186
187 again:
188 alen = sizeof (struct sockaddr_storage);
189 rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
190 (struct sockaddr *)&ss, &alen);
191 if (rlen == -1 && errno == EINTR)
192 goto again;
193 if (rlen == -1 || (rlen < 4 * sizeof (u_int32_t)))
194 return (FALSE);
195 xprt->xp_rtaddr.buf = mem_alloc(alen);
196 memcpy(xprt->xp_rtaddr.buf, &ss, alen);
197 xprt->xp_rtaddr.len = alen;
198 #ifdef PORTMAP
199 if (ss.ss_family == AF_INET) {
200 xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
201 xprt->xp_addrlen = sizeof (struct sockaddr_in);
202 }
203 #endif
204 xdrs->x_op = XDR_DECODE;
205 XDR_SETPOS(xdrs, 0);
206 if (! xdr_callmsg(xdrs, msg)) {
207 return (FALSE);
208 }
209 su->su_xid = msg->rm_xid;
210 if (su->su_cache != NULL) {
211 if (cache_get(xprt, msg, &reply, &replylen)) {
212 (void)sendto(xprt->xp_fd, reply, replylen, 0,
213 (struct sockaddr *)&ss, alen);
214 return (FALSE);
215 }
216 }
217 return (TRUE);
218 }
219
220 static bool_t
221 svc_dg_reply(xprt, msg)
222 register SVCXPRT *xprt;
223 struct rpc_msg *msg;
224 {
225 struct svc_dg_data *su = su_data(xprt);
226 XDR *xdrs = &(su->su_xdrs);
227 bool_t stat = FALSE;
228 size_t slen;
229
230 xdrs->x_op = XDR_ENCODE;
231 XDR_SETPOS(xdrs, 0);
232 msg->rm_xid = su->su_xid;
233 if (xdr_replymsg(xdrs, msg)) {
234 slen = XDR_GETPOS(xdrs);
235 if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
236 (struct sockaddr *)xprt->xp_rtaddr.buf,
237 (socklen_t)xprt->xp_rtaddr.len) == slen) {
238 stat = TRUE;
239 if (su->su_cache && slen >= 0)
240 cache_set(xprt, slen);
241 }
242 }
243 return (stat);
244 }
245
246 static bool_t
247 svc_dg_getargs(xprt, xdr_args, args_ptr)
248 SVCXPRT *xprt;
249 xdrproc_t xdr_args;
250 caddr_t args_ptr;
251 {
252 return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
253 }
254
255 static bool_t
256 svc_dg_freeargs(xprt, xdr_args, args_ptr)
257 SVCXPRT *xprt;
258 xdrproc_t xdr_args;
259 caddr_t args_ptr;
260 {
261 register XDR *xdrs = &(su_data(xprt)->su_xdrs);
262
263 xdrs->x_op = XDR_FREE;
264 return (*xdr_args)(xdrs, args_ptr);
265 }
266
267 static void
268 svc_dg_destroy(xprt)
269 register SVCXPRT *xprt;
270 {
271 register struct svc_dg_data *su = su_data(xprt);
272
273 xprt_unregister(xprt);
274 if (xprt->xp_fd != -1)
275 (void)close(xprt->xp_fd);
276 XDR_DESTROY(&(su->su_xdrs));
277 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
278 (void) mem_free((caddr_t)su, sizeof (*su));
279 if (xprt->xp_rtaddr.buf)
280 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
281 if (xprt->xp_ltaddr.buf)
282 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
283 if (xprt->xp_tp)
284 (void) free(xprt->xp_tp);
285 (void) mem_free((caddr_t)xprt, sizeof (SVCXPRT));
286 }
287
288 static bool_t
289 svc_dg_control(xprt, rq, in)
290 SVCXPRT *xprt;
291 const u_int rq;
292 void *in;
293 {
294 return (FALSE);
295 }
296
297 static void
298 svc_dg_ops(xprt)
299 SVCXPRT *xprt;
300 {
301 static struct xp_ops ops;
302 static struct xp_ops2 ops2;
303 #ifdef __REENT
304 extern mutex_t ops_lock;
305 #endif
306
307 /* VARIABLES PROTECTED BY ops_lock: ops */
308
309 mutex_lock(&ops_lock);
310 if (ops.xp_recv == NULL) {
311 ops.xp_recv = svc_dg_recv;
312 ops.xp_stat = svc_dg_stat;
313 ops.xp_getargs = svc_dg_getargs;
314 ops.xp_reply = svc_dg_reply;
315 ops.xp_freeargs = svc_dg_freeargs;
316 ops.xp_destroy = svc_dg_destroy;
317 ops2.xp_control = svc_dg_control;
318 }
319 xprt->xp_ops = &ops;
320 xprt->xp_ops2 = &ops2;
321 mutex_unlock(&ops_lock);
322 }
323
324 /* The CACHING COMPONENT */
325
326 /*
327 * Could have been a separate file, but some part of it depends upon the
328 * private structure of the client handle.
329 *
330 * Fifo cache for cl server
331 * Copies pointers to reply buffers into fifo cache
332 * Buffers are sent again if retransmissions are detected.
333 */
334
335 #define SPARSENESS 4 /* 75% sparse */
336
337 #define ALLOC(type, size) \
338 (type *) mem_alloc((unsigned) (sizeof (type) * (size)))
339
340 #define MEMZERO(addr, type, size) \
341 (void) memset((char *) (addr), 0, sizeof (type) * (int) (size))
342
343 #define FREE(addr, type, size) \
344 mem_free((char *) (addr), (sizeof (type) * (size)))
345
346 /*
347 * An entry in the cache
348 */
349 typedef struct cache_node *cache_ptr;
350 struct cache_node {
351 /*
352 * Index into cache is xid, proc, vers, prog and address
353 */
354 u_int32_t cache_xid;
355 rpcproc_t cache_proc;
356 rpcvers_t cache_vers;
357 rpcprog_t cache_prog;
358 struct netbuf cache_addr;
359 /*
360 * The cached reply and length
361 */
362 char *cache_reply;
363 size_t cache_replylen;
364 /*
365 * Next node on the list, if there is a collision
366 */
367 cache_ptr cache_next;
368 };
369
370 /*
371 * The entire cache
372 */
373 struct cl_cache {
374 u_int uc_size; /* size of cache */
375 cache_ptr *uc_entries; /* hash table of entries in cache */
376 cache_ptr *uc_fifo; /* fifo list of entries in cache */
377 u_int uc_nextvictim; /* points to next victim in fifo list */
378 rpcprog_t uc_prog; /* saved program number */
379 rpcvers_t uc_vers; /* saved version number */
380 rpcproc_t uc_proc; /* saved procedure number */
381 };
382
383
384 /*
385 * the hashing function
386 */
387 #define CACHE_LOC(transp, xid) \
388 (xid % (SPARSENESS * ((struct cl_cache *) \
389 su_data(transp)->su_cache)->uc_size))
390
391 #ifdef __REENT
392 extern mutex_t dupreq_lock;
393 #endif
394
395 /*
396 * Enable use of the cache. Returns 1 on success, 0 on failure.
397 * Note: there is no disable.
398 */
399 static const char cache_enable_str[] = "svc_enablecache: %s %s";
400 static const char alloc_err[] = "could not allocate cache ";
401 static const char enable_err[] = "cache already enabled";
402
403 int
404 svc_dg_enablecache(transp, size)
405 SVCXPRT *transp;
406 u_int size;
407 {
408 struct svc_dg_data *su = su_data(transp);
409 struct cl_cache *uc;
410
411 mutex_lock(&dupreq_lock);
412 if (su->su_cache != NULL) {
413 (void) warnx(cache_enable_str, enable_err, " ");
414 mutex_unlock(&dupreq_lock);
415 return (0);
416 }
417 uc = ALLOC(struct cl_cache, 1);
418 if (uc == NULL) {
419 warnx(cache_enable_str, alloc_err, " ");
420 mutex_unlock(&dupreq_lock);
421 return (0);
422 }
423 uc->uc_size = size;
424 uc->uc_nextvictim = 0;
425 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
426 if (uc->uc_entries == NULL) {
427 warnx(cache_enable_str, alloc_err, "data");
428 FREE(uc, struct cl_cache, 1);
429 mutex_unlock(&dupreq_lock);
430 return (0);
431 }
432 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
433 uc->uc_fifo = ALLOC(cache_ptr, size);
434 if (uc->uc_fifo == NULL) {
435 warnx(cache_enable_str, alloc_err, "fifo");
436 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
437 FREE(uc, struct cl_cache, 1);
438 mutex_unlock(&dupreq_lock);
439 return (0);
440 }
441 MEMZERO(uc->uc_fifo, cache_ptr, size);
442 su->su_cache = (char *) uc;
443 mutex_unlock(&dupreq_lock);
444 return (1);
445 }
446
447 /*
448 * Set an entry in the cache. It assumes that the uc entry is set from
449 * the earlier call to cache_get() for the same procedure. This will always
450 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
451 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
452 * not available at svc_dg_reply time.
453 */
454
455 static const char cache_set_str[] = "cache_set: %s";
456 static const char cache_set_err1[] = "victim not found";
457 static const char cache_set_err2[] = "victim alloc failed";
458 static const char cache_set_err3[] = "could not allocate new rpc buffer";
459
460 static void
461 cache_set(xprt, replylen)
462 SVCXPRT *xprt;
463 size_t replylen;
464 {
465 register cache_ptr victim;
466 register cache_ptr *vicp;
467 register struct svc_dg_data *su = su_data(xprt);
468 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
469 u_int loc;
470 char *newbuf;
471 #ifdef RPC_CACHE_DEBUG
472 struct netconfig *nconf;
473 char *uaddr;
474 #endif
475
476 mutex_lock(&dupreq_lock);
477 /*
478 * Find space for the new entry, either by
479 * reusing an old entry, or by mallocing a new one
480 */
481 victim = uc->uc_fifo[uc->uc_nextvictim];
482 if (victim != NULL) {
483 loc = CACHE_LOC(xprt, victim->cache_xid);
484 for (vicp = &uc->uc_entries[loc];
485 *vicp != NULL && *vicp != victim;
486 vicp = &(*vicp)->cache_next)
487 ;
488 if (*vicp == NULL) {
489 warnx(cache_set_str, cache_set_err1);
490 mutex_unlock(&dupreq_lock);
491 return;
492 }
493 *vicp = victim->cache_next; /* remove from cache */
494 newbuf = victim->cache_reply;
495 } else {
496 victim = ALLOC(struct cache_node, 1);
497 if (victim == NULL) {
498 warnx(cache_set_str, cache_set_err2);
499 mutex_unlock(&dupreq_lock);
500 return;
501 }
502 newbuf = (char *)mem_alloc(su->su_iosz);
503 if (newbuf == NULL) {
504 warnx(cache_set_str, cache_set_err3);
505 FREE(victim, struct cache_node, 1);
506 mutex_unlock(&dupreq_lock);
507 return;
508 }
509 }
510
511 /*
512 * Store it away
513 */
514 #ifdef RPC_CACHE_DEBUG
515 if (nconf = getnetconfigent(xprt->xp_netid)) {
516 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
517 freenetconfigent(nconf);
518 printf(
519 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
520 su->su_xid, uc->uc_prog, uc->uc_vers,
521 uc->uc_proc, uaddr);
522 free(uaddr);
523 }
524 #endif
525 victim->cache_replylen = replylen;
526 victim->cache_reply = rpc_buffer(xprt);
527 rpc_buffer(xprt) = newbuf;
528 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
529 su->su_iosz, XDR_ENCODE);
530 victim->cache_xid = su->su_xid;
531 victim->cache_proc = uc->uc_proc;
532 victim->cache_vers = uc->uc_vers;
533 victim->cache_prog = uc->uc_prog;
534 victim->cache_addr = xprt->xp_rtaddr;
535 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
536 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
537 (int)xprt->xp_rtaddr.len);
538 loc = CACHE_LOC(xprt, victim->cache_xid);
539 victim->cache_next = uc->uc_entries[loc];
540 uc->uc_entries[loc] = victim;
541 uc->uc_fifo[uc->uc_nextvictim++] = victim;
542 uc->uc_nextvictim %= uc->uc_size;
543 mutex_unlock(&dupreq_lock);
544 }
545
546 /*
547 * Try to get an entry from the cache
548 * return 1 if found, 0 if not found and set the stage for cache_set()
549 */
550 static int
551 cache_get(xprt, msg, replyp, replylenp)
552 SVCXPRT *xprt;
553 struct rpc_msg *msg;
554 char **replyp;
555 size_t *replylenp;
556 {
557 u_int loc;
558 register cache_ptr ent;
559 register struct svc_dg_data *su = su_data(xprt);
560 register struct cl_cache *uc = (struct cl_cache *) su->su_cache;
561 #ifdef RPC_CACHE_DEBUG
562 struct netconfig *nconf;
563 char *uaddr;
564 #endif
565
566 mutex_lock(&dupreq_lock);
567 loc = CACHE_LOC(xprt, su->su_xid);
568 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
569 if (ent->cache_xid == su->su_xid &&
570 ent->cache_proc == msg->rm_call.cb_proc &&
571 ent->cache_vers == msg->rm_call.cb_vers &&
572 ent->cache_prog == msg->rm_call.cb_prog &&
573 ent->cache_addr.len == xprt->xp_rtaddr.len &&
574 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
575 xprt->xp_rtaddr.len) == 0)) {
576 #ifdef RPC_CACHE_DEBUG
577 if (nconf = getnetconfigent(xprt->xp_netid)) {
578 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
579 freenetconfigent(nconf);
580 printf(
581 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
582 su->su_xid, msg->rm_call.cb_prog,
583 msg->rm_call.cb_vers,
584 msg->rm_call.cb_proc, uaddr);
585 free(uaddr);
586 }
587 #endif
588 *replyp = ent->cache_reply;
589 *replylenp = ent->cache_replylen;
590 mutex_unlock(&dupreq_lock);
591 return (1);
592 }
593 }
594 /*
595 * Failed to find entry
596 * Remember a few things so we can do a set later
597 */
598 uc->uc_proc = msg->rm_call.cb_proc;
599 uc->uc_vers = msg->rm_call.cb_vers;
600 uc->uc_prog = msg->rm_call.cb_prog;
601 mutex_unlock(&dupreq_lock);
602 return (0);
603 }
604