svc_dg.c revision 1.1 1 /* $NetBSD: svc_dg.c,v 1.1 2000/06/02 23:11:16 fvdl Exp $ */
2
3 /*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31
32 /*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36 /* #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" */
37
38
39 /*
40 * svc_dg.c, Server side for connectionless RPC.
41 *
42 * Does some caching in the hopes of achieving execute-at-most-once semantics.
43 */
44
45 #include "namespace.h"
46 #include "reentrant.h"
47 #include <sys/types.h>
48 #include <sys/socket.h>
49 #include <rpc/rpc.h>
50 #include <errno.h>
51 #include <unistd.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #ifdef RPC_CACHE_DEBUG
55 #include <netconfig.h>
56 #include <netdir.h>
57 #endif
58 #include <err.h>
59
60 #include "rpc_com.h"
61 #include "svc_dg.h"
62
63 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
64 #define rpc_buffer(xprt) ((xprt)->xp_p1)
65
66 #ifdef __weak_alias
67 __weak_alias(svc_dg_create,_svc_dg_create)
68 #endif
69
70 #ifndef MAX
71 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
72 #endif
73
74 static void svc_dg_ops __P((SVCXPRT *));
75 static enum xprt_stat svc_dg_stat __P((SVCXPRT *));
76 static bool_t svc_dg_recv __P((SVCXPRT *, struct rpc_msg *));
77 static bool_t svc_dg_reply __P((SVCXPRT *, struct rpc_msg *));
78 static bool_t svc_dg_getargs __P((SVCXPRT *, xdrproc_t, caddr_t));
79 static bool_t svc_dg_freeargs __P((SVCXPRT *, xdrproc_t, caddr_t));
80 static void svc_dg_destroy __P((SVCXPRT *));
81 static bool_t svc_dg_control __P((SVCXPRT *, const u_int, void *));
82 static int cache_get __P((SVCXPRT *, struct rpc_msg *, char **, size_t *));
83 static void cache_set __P((SVCXPRT *, size_t));
84 int svc_dg_enablecache __P((SVCXPRT *, u_int));
85
86 /*
87 * Usage:
88 * xprt = svc_dg_create(sock, sendsize, recvsize);
89 * Does other connectionless specific initializations.
90 * Once *xprt is initialized, it is registered.
91 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
92 * system defaults are chosen.
93 * The routines returns NULL if a problem occurred.
94 */
95 static const char svc_dg_str[] = "svc_dg_create: %s";
96 static const char svc_dg_err1[] = "could not get transport information";
97 static const char svc_dg_err2[] = " transport does not support data transfer";
98 static const char __no_mem_str[] = "out of memory";
99
100 SVCXPRT *
101 svc_dg_create(fd, sendsize, recvsize)
102 int fd;
103 u_int sendsize;
104 u_int recvsize;
105 {
106 SVCXPRT *xprt;
107 struct svc_dg_data *su = NULL;
108 struct __rpc_sockinfo si;
109 struct sockaddr_storage ss;
110 socklen_t slen;
111
112 if (!__rpc_fd2sockinfo(fd, &si)) {
113 warnx(svc_dg_str, svc_dg_err1);
114 return ((SVCXPRT *)NULL);
115 }
116 /*
117 * Find the receive and the send size
118 */
119 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
120 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
121 if ((sendsize == 0) || (recvsize == 0)) {
122 warnx(svc_dg_str, svc_dg_err2);
123 return ((SVCXPRT *)NULL);
124 }
125
126 xprt = (SVCXPRT *)mem_alloc(sizeof (SVCXPRT));
127 if (xprt == NULL)
128 goto freedata;
129 memset((char *)xprt, 0, sizeof (SVCXPRT));
130
131 su = (struct svc_dg_data *)mem_alloc(sizeof (*su));
132 if (su == NULL)
133 goto freedata;
134 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
135 if ((rpc_buffer(xprt) = (char *)mem_alloc(su->su_iosz)) == NULL)
136 goto freedata;
137 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
138 XDR_DECODE);
139 su->su_cache = NULL;
140 xprt->xp_fd = fd;
141 xprt->xp_p2 = (caddr_t)su;
142 xprt->xp_verf.oa_base = su->su_verfbody;
143 svc_dg_ops(xprt);
144 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
145
146 slen = sizeof ss;
147 if (getsockname(fd, (struct sockaddr *)&ss, &slen) < 0)
148 goto freedata;
149 xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
150 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
151 xprt->xp_ltaddr.len = slen;
152 memcpy(xprt->xp_ltaddr.buf, &ss, slen);
153
154 xprt_register(xprt);
155 return (xprt);
156 freedata:
157 (void) warnx(svc_dg_str, __no_mem_str);
158 if (xprt) {
159 if (su)
160 (void) mem_free((char *) su, sizeof (*su));
161 (void) mem_free((char *)xprt, sizeof (SVCXPRT));
162 }
163 return ((SVCXPRT *)NULL);
164 }
165
166 static enum xprt_stat
167 svc_dg_stat(xprt)
168 SVCXPRT *xprt;
169 {
170 return (XPRT_IDLE);
171 }
172
173 static bool_t
174 svc_dg_recv(xprt, msg)
175 register SVCXPRT *xprt;
176 struct rpc_msg *msg;
177 {
178 struct svc_dg_data *su = su_data(xprt);
179 XDR *xdrs = &(su->su_xdrs);
180 char *reply;
181 struct sockaddr_storage ss;
182 socklen_t alen;
183 size_t replylen;
184 int rlen;
185
186 again:
187 alen = sizeof (struct sockaddr_storage);
188 rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
189 (struct sockaddr *)&ss, &alen);
190 if (rlen == -1 && errno == EINTR)
191 goto again;
192 if (rlen == -1 || (rlen < 4 * sizeof (u_int32_t)))
193 return (FALSE);
194 xprt->xp_rtaddr.buf = mem_alloc(alen);
195 memcpy(xprt->xp_rtaddr.buf, &ss, alen);
196 xprt->xp_rtaddr.len = alen;
197 #ifdef PORTMAP
198 if (ss.ss_family == AF_INET) {
199 xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
200 xprt->xp_addrlen = sizeof (struct sockaddr_in);
201 }
202 #endif
203 xdrs->x_op = XDR_DECODE;
204 XDR_SETPOS(xdrs, 0);
205 if (! xdr_callmsg(xdrs, msg)) {
206 return (FALSE);
207 }
208 su->su_xid = msg->rm_xid;
209 if (su->su_cache != NULL) {
210 if (cache_get(xprt, msg, &reply, &replylen)) {
211 (void)sendto(xprt->xp_fd, reply, replylen, 0,
212 (struct sockaddr *)&ss, alen);
213 return (FALSE);
214 }
215 }
216 return (TRUE);
217 }
218
219 static bool_t
220 svc_dg_reply(xprt, msg)
221 register SVCXPRT *xprt;
222 struct rpc_msg *msg;
223 {
224 struct svc_dg_data *su = su_data(xprt);
225 XDR *xdrs = &(su->su_xdrs);
226 bool_t stat = FALSE;
227 size_t slen;
228
229 xdrs->x_op = XDR_ENCODE;
230 XDR_SETPOS(xdrs, 0);
231 msg->rm_xid = su->su_xid;
232 if (xdr_replymsg(xdrs, msg)) {
233 slen = XDR_GETPOS(xdrs);
234 if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
235 (struct sockaddr *)xprt->xp_rtaddr.buf,
236 (socklen_t)xprt->xp_rtaddr.len) == slen) {
237 stat = TRUE;
238 if (su->su_cache && slen >= 0)
239 cache_set(xprt, slen);
240 }
241 }
242 return (stat);
243 }
244
245 static bool_t
246 svc_dg_getargs(xprt, xdr_args, args_ptr)
247 SVCXPRT *xprt;
248 xdrproc_t xdr_args;
249 caddr_t args_ptr;
250 {
251 return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
252 }
253
254 static bool_t
255 svc_dg_freeargs(xprt, xdr_args, args_ptr)
256 SVCXPRT *xprt;
257 xdrproc_t xdr_args;
258 caddr_t args_ptr;
259 {
260 register XDR *xdrs = &(su_data(xprt)->su_xdrs);
261
262 xdrs->x_op = XDR_FREE;
263 return (*xdr_args)(xdrs, args_ptr);
264 }
265
266 static void
267 svc_dg_destroy(xprt)
268 register SVCXPRT *xprt;
269 {
270 register struct svc_dg_data *su = su_data(xprt);
271
272 xprt_unregister(xprt);
273 if (xprt->xp_fd != -1)
274 (void)close(xprt->xp_fd);
275 XDR_DESTROY(&(su->su_xdrs));
276 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
277 (void) mem_free((caddr_t)su, sizeof (*su));
278 if (xprt->xp_rtaddr.buf)
279 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
280 if (xprt->xp_ltaddr.buf)
281 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
282 if (xprt->xp_tp)
283 (void) free(xprt->xp_tp);
284 (void) mem_free((caddr_t)xprt, sizeof (SVCXPRT));
285 }
286
287 static bool_t
288 svc_dg_control(xprt, rq, in)
289 SVCXPRT *xprt;
290 const u_int rq;
291 void *in;
292 {
293 return (FALSE);
294 }
295
296 static void
297 svc_dg_ops(xprt)
298 SVCXPRT *xprt;
299 {
300 static struct xp_ops ops;
301 static struct xp_ops2 ops2;
302 #ifdef __REENT
303 extern mutex_t ops_lock;
304 #endif
305
306 /* VARIABLES PROTECTED BY ops_lock: ops */
307
308 mutex_lock(&ops_lock);
309 if (ops.xp_recv == NULL) {
310 ops.xp_recv = svc_dg_recv;
311 ops.xp_stat = svc_dg_stat;
312 ops.xp_getargs = svc_dg_getargs;
313 ops.xp_reply = svc_dg_reply;
314 ops.xp_freeargs = svc_dg_freeargs;
315 ops.xp_destroy = svc_dg_destroy;
316 ops2.xp_control = svc_dg_control;
317 }
318 xprt->xp_ops = &ops;
319 xprt->xp_ops2 = &ops2;
320 mutex_unlock(&ops_lock);
321 }
322
323 /* The CACHING COMPONENT */
324
325 /*
326 * Could have been a separate file, but some part of it depends upon the
327 * private structure of the client handle.
328 *
329 * Fifo cache for cl server
330 * Copies pointers to reply buffers into fifo cache
331 * Buffers are sent again if retransmissions are detected.
332 */
333
334 #define SPARSENESS 4 /* 75% sparse */
335
336 #define ALLOC(type, size) \
337 (type *) mem_alloc((unsigned) (sizeof (type) * (size)))
338
339 #define MEMZERO(addr, type, size) \
340 (void) memset((char *) (addr), 0, sizeof (type) * (int) (size))
341
342 #define FREE(addr, type, size) \
343 mem_free((char *) (addr), (sizeof (type) * (size)))
344
345 /*
346 * An entry in the cache
347 */
348 typedef struct cache_node *cache_ptr;
349 struct cache_node {
350 /*
351 * Index into cache is xid, proc, vers, prog and address
352 */
353 u_int32_t cache_xid;
354 rpcproc_t cache_proc;
355 rpcvers_t cache_vers;
356 rpcprog_t cache_prog;
357 struct netbuf cache_addr;
358 /*
359 * The cached reply and length
360 */
361 char *cache_reply;
362 size_t cache_replylen;
363 /*
364 * Next node on the list, if there is a collision
365 */
366 cache_ptr cache_next;
367 };
368
369 /*
370 * The entire cache
371 */
372 struct cl_cache {
373 u_int uc_size; /* size of cache */
374 cache_ptr *uc_entries; /* hash table of entries in cache */
375 cache_ptr *uc_fifo; /* fifo list of entries in cache */
376 u_int uc_nextvictim; /* points to next victim in fifo list */
377 rpcprog_t uc_prog; /* saved program number */
378 rpcvers_t uc_vers; /* saved version number */
379 rpcproc_t uc_proc; /* saved procedure number */
380 };
381
382
383 /*
384 * the hashing function
385 */
386 #define CACHE_LOC(transp, xid) \
387 (xid % (SPARSENESS * ((struct cl_cache *) \
388 su_data(transp)->su_cache)->uc_size))
389
390 #ifdef __REENT
391 extern mutex_t dupreq_lock;
392 #endif
393
394 /*
395 * Enable use of the cache. Returns 1 on success, 0 on failure.
396 * Note: there is no disable.
397 */
398 static const char cache_enable_str[] = "svc_enablecache: %s %s";
399 static const char alloc_err[] = "could not allocate cache ";
400 static const char enable_err[] = "cache already enabled";
401
402 int
403 svc_dg_enablecache(transp, size)
404 SVCXPRT *transp;
405 u_int size;
406 {
407 struct svc_dg_data *su = su_data(transp);
408 struct cl_cache *uc;
409
410 mutex_lock(&dupreq_lock);
411 if (su->su_cache != NULL) {
412 (void) warnx(cache_enable_str, enable_err, " ");
413 mutex_unlock(&dupreq_lock);
414 return (0);
415 }
416 uc = ALLOC(struct cl_cache, 1);
417 if (uc == NULL) {
418 warnx(cache_enable_str, alloc_err, " ");
419 mutex_unlock(&dupreq_lock);
420 return (0);
421 }
422 uc->uc_size = size;
423 uc->uc_nextvictim = 0;
424 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
425 if (uc->uc_entries == NULL) {
426 warnx(cache_enable_str, alloc_err, "data");
427 FREE(uc, struct cl_cache, 1);
428 mutex_unlock(&dupreq_lock);
429 return (0);
430 }
431 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
432 uc->uc_fifo = ALLOC(cache_ptr, size);
433 if (uc->uc_fifo == NULL) {
434 warnx(cache_enable_str, alloc_err, "fifo");
435 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
436 FREE(uc, struct cl_cache, 1);
437 mutex_unlock(&dupreq_lock);
438 return (0);
439 }
440 MEMZERO(uc->uc_fifo, cache_ptr, size);
441 su->su_cache = (char *) uc;
442 mutex_unlock(&dupreq_lock);
443 return (1);
444 }
445
446 /*
447 * Set an entry in the cache. It assumes that the uc entry is set from
448 * the earlier call to cache_get() for the same procedure. This will always
449 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
450 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
451 * not available at svc_dg_reply time.
452 */
453
454 static const char cache_set_str[] = "cache_set: %s";
455 static const char cache_set_err1[] = "victim not found";
456 static const char cache_set_err2[] = "victim alloc failed";
457 static const char cache_set_err3[] = "could not allocate new rpc buffer";
458
459 static void
460 cache_set(xprt, replylen)
461 SVCXPRT *xprt;
462 size_t replylen;
463 {
464 register cache_ptr victim;
465 register cache_ptr *vicp;
466 register struct svc_dg_data *su = su_data(xprt);
467 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
468 u_int loc;
469 char *newbuf;
470 #ifdef RPC_CACHE_DEBUG
471 struct netconfig *nconf;
472 char *uaddr;
473 #endif
474
475 mutex_lock(&dupreq_lock);
476 /*
477 * Find space for the new entry, either by
478 * reusing an old entry, or by mallocing a new one
479 */
480 victim = uc->uc_fifo[uc->uc_nextvictim];
481 if (victim != NULL) {
482 loc = CACHE_LOC(xprt, victim->cache_xid);
483 for (vicp = &uc->uc_entries[loc];
484 *vicp != NULL && *vicp != victim;
485 vicp = &(*vicp)->cache_next)
486 ;
487 if (*vicp == NULL) {
488 warnx(cache_set_str, cache_set_err1);
489 mutex_unlock(&dupreq_lock);
490 return;
491 }
492 *vicp = victim->cache_next; /* remove from cache */
493 newbuf = victim->cache_reply;
494 } else {
495 victim = ALLOC(struct cache_node, 1);
496 if (victim == NULL) {
497 warnx(cache_set_str, cache_set_err2);
498 mutex_unlock(&dupreq_lock);
499 return;
500 }
501 newbuf = (char *)mem_alloc(su->su_iosz);
502 if (newbuf == NULL) {
503 warnx(cache_set_str, cache_set_err3);
504 FREE(victim, struct cache_node, 1);
505 mutex_unlock(&dupreq_lock);
506 return;
507 }
508 }
509
510 /*
511 * Store it away
512 */
513 #ifdef RPC_CACHE_DEBUG
514 if (nconf = getnetconfigent(xprt->xp_netid)) {
515 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
516 freenetconfigent(nconf);
517 printf(
518 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
519 su->su_xid, uc->uc_prog, uc->uc_vers,
520 uc->uc_proc, uaddr);
521 free(uaddr);
522 }
523 #endif
524 victim->cache_replylen = replylen;
525 victim->cache_reply = rpc_buffer(xprt);
526 rpc_buffer(xprt) = newbuf;
527 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
528 su->su_iosz, XDR_ENCODE);
529 victim->cache_xid = su->su_xid;
530 victim->cache_proc = uc->uc_proc;
531 victim->cache_vers = uc->uc_vers;
532 victim->cache_prog = uc->uc_prog;
533 victim->cache_addr = xprt->xp_rtaddr;
534 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
535 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
536 (int)xprt->xp_rtaddr.len);
537 loc = CACHE_LOC(xprt, victim->cache_xid);
538 victim->cache_next = uc->uc_entries[loc];
539 uc->uc_entries[loc] = victim;
540 uc->uc_fifo[uc->uc_nextvictim++] = victim;
541 uc->uc_nextvictim %= uc->uc_size;
542 mutex_unlock(&dupreq_lock);
543 }
544
545 /*
546 * Try to get an entry from the cache
547 * return 1 if found, 0 if not found and set the stage for cache_set()
548 */
549 static int
550 cache_get(xprt, msg, replyp, replylenp)
551 SVCXPRT *xprt;
552 struct rpc_msg *msg;
553 char **replyp;
554 size_t *replylenp;
555 {
556 u_int loc;
557 register cache_ptr ent;
558 register struct svc_dg_data *su = su_data(xprt);
559 register struct cl_cache *uc = (struct cl_cache *) su->su_cache;
560 #ifdef RPC_CACHE_DEBUG
561 struct netconfig *nconf;
562 char *uaddr;
563 #endif
564
565 mutex_lock(&dupreq_lock);
566 loc = CACHE_LOC(xprt, su->su_xid);
567 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
568 if (ent->cache_xid == su->su_xid &&
569 ent->cache_proc == msg->rm_call.cb_proc &&
570 ent->cache_vers == msg->rm_call.cb_vers &&
571 ent->cache_prog == msg->rm_call.cb_prog &&
572 ent->cache_addr.len == xprt->xp_rtaddr.len &&
573 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
574 xprt->xp_rtaddr.len) == 0)) {
575 #ifdef RPC_CACHE_DEBUG
576 if (nconf = getnetconfigent(xprt->xp_netid)) {
577 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
578 freenetconfigent(nconf);
579 printf(
580 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
581 su->su_xid, msg->rm_call.cb_prog,
582 msg->rm_call.cb_vers,
583 msg->rm_call.cb_proc, uaddr);
584 free(uaddr);
585 }
586 #endif
587 *replyp = ent->cache_reply;
588 *replylenp = ent->cache_replylen;
589 mutex_unlock(&dupreq_lock);
590 return (1);
591 }
592 }
593 /*
594 * Failed to find entry
595 * Remember a few things so we can do a set later
596 */
597 uc->uc_proc = msg->rm_call.cb_proc;
598 uc->uc_vers = msg->rm_call.cb_vers;
599 uc->uc_prog = msg->rm_call.cb_prog;
600 mutex_unlock(&dupreq_lock);
601 return (0);
602 }
603