svc_dg.c revision 1.4 1 /* $NetBSD: svc_dg.c,v 1.4 2000/07/06 03:10:35 christos Exp $ */
2
3 /*
4 * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
5 * unrestricted use provided that this legend is included on all tape
6 * media and as a part of the software program in whole or part. Users
7 * may copy or modify Sun RPC without charge, but are not authorized
8 * to license or distribute it to anyone else except as part of a product or
9 * program developed by the user.
10 *
11 * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
12 * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
13 * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
14 *
15 * Sun RPC is provided with no support and without any obligation on the
16 * part of Sun Microsystems, Inc. to assist in its use, correction,
17 * modification or enhancement.
18 *
19 * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
20 * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
21 * OR ANY PART THEREOF.
22 *
23 * In no event will Sun Microsystems, Inc. be liable for any lost revenue
24 * or profits or other special, indirect and consequential damages, even if
25 * Sun has been advised of the possibility of such damages.
26 *
27 * Sun Microsystems, Inc.
28 * 2550 Garcia Avenue
29 * Mountain View, California 94043
30 */
31
32 /*
33 * Copyright (c) 1986-1991 by Sun Microsystems Inc.
34 */
35
36 /* #ident "@(#)svc_dg.c 1.17 94/04/24 SMI" */
37
38
39 /*
40 * svc_dg.c, Server side for connectionless RPC.
41 *
42 * Does some caching in the hopes of achieving execute-at-most-once semantics.
43 */
44
45 #include "namespace.h"
46 #include "reentrant.h"
47 #include <sys/types.h>
48 #include <sys/socket.h>
49 #include <rpc/rpc.h>
50 #include <errno.h>
51 #include <unistd.h>
52 #include <stdio.h>
53 #include <stdlib.h>
54 #include <string.h>
55 #ifdef RPC_CACHE_DEBUG
56 #include <netconfig.h>
57 #include <netdir.h>
58 #endif
59 #include <err.h>
60
61 #include "rpc_com.h"
62 #include "svc_dg.h"
63
64 #define su_data(xprt) ((struct svc_dg_data *)(xprt->xp_p2))
65 #define rpc_buffer(xprt) ((xprt)->xp_p1)
66
67 #ifdef __weak_alias
68 __weak_alias(svc_dg_create,_svc_dg_create)
69 #endif
70
71 #ifndef MAX
72 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
73 #endif
74
75 static void svc_dg_ops __P((SVCXPRT *));
76 static enum xprt_stat svc_dg_stat __P((SVCXPRT *));
77 static bool_t svc_dg_recv __P((SVCXPRT *, struct rpc_msg *));
78 static bool_t svc_dg_reply __P((SVCXPRT *, struct rpc_msg *));
79 static bool_t svc_dg_getargs __P((SVCXPRT *, xdrproc_t, caddr_t));
80 static bool_t svc_dg_freeargs __P((SVCXPRT *, xdrproc_t, caddr_t));
81 static void svc_dg_destroy __P((SVCXPRT *));
82 static bool_t svc_dg_control __P((SVCXPRT *, const u_int, void *));
83 static int cache_get __P((SVCXPRT *, struct rpc_msg *, char **, size_t *));
84 static void cache_set __P((SVCXPRT *, size_t));
85 int svc_dg_enablecache __P((SVCXPRT *, u_int));
86
87 /*
88 * Usage:
89 * xprt = svc_dg_create(sock, sendsize, recvsize);
90 * Does other connectionless specific initializations.
91 * Once *xprt is initialized, it is registered.
92 * see (svc.h, xprt_register). If recvsize or sendsize are 0 suitable
93 * system defaults are chosen.
94 * The routines returns NULL if a problem occurred.
95 */
96 static const char svc_dg_str[] = "svc_dg_create: %s";
97 static const char svc_dg_err1[] = "could not get transport information";
98 static const char svc_dg_err2[] = " transport does not support data transfer";
99 static const char __no_mem_str[] = "out of memory";
100
101 SVCXPRT *
102 svc_dg_create(fd, sendsize, recvsize)
103 int fd;
104 u_int sendsize;
105 u_int recvsize;
106 {
107 SVCXPRT *xprt;
108 struct svc_dg_data *su = NULL;
109 struct __rpc_sockinfo si;
110 struct sockaddr_storage ss;
111 socklen_t slen;
112
113 if (!__rpc_fd2sockinfo(fd, &si)) {
114 warnx(svc_dg_str, svc_dg_err1);
115 return (NULL);
116 }
117 /*
118 * Find the receive and the send size
119 */
120 sendsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsize);
121 recvsize = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsize);
122 if ((sendsize == 0) || (recvsize == 0)) {
123 warnx(svc_dg_str, svc_dg_err2);
124 return (NULL);
125 }
126
127 xprt = mem_alloc(sizeof (SVCXPRT));
128 if (xprt == NULL)
129 goto freedata;
130 memset(xprt, 0, sizeof (SVCXPRT));
131
132 su = mem_alloc(sizeof (*su));
133 if (su == NULL)
134 goto freedata;
135 su->su_iosz = ((MAX(sendsize, recvsize) + 3) / 4) * 4;
136 if ((rpc_buffer(xprt) = mem_alloc(su->su_iosz)) == NULL)
137 goto freedata;
138 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt), su->su_iosz,
139 XDR_DECODE);
140 su->su_cache = NULL;
141 xprt->xp_fd = fd;
142 xprt->xp_p2 = (caddr_t)(void *)su;
143 xprt->xp_verf.oa_base = su->su_verfbody;
144 svc_dg_ops(xprt);
145 xprt->xp_rtaddr.maxlen = sizeof (struct sockaddr_storage);
146
147 slen = sizeof ss;
148 if (getsockname(fd, (struct sockaddr *)(void *)&ss, &slen) < 0)
149 goto freedata;
150 xprt->xp_ltaddr.buf = mem_alloc(sizeof (struct sockaddr_storage));
151 xprt->xp_ltaddr.maxlen = sizeof (struct sockaddr_storage);
152 xprt->xp_ltaddr.len = slen;
153 memcpy(xprt->xp_ltaddr.buf, &ss, slen);
154
155 xprt_register(xprt);
156 return (xprt);
157 freedata:
158 (void) warnx(svc_dg_str, __no_mem_str);
159 if (xprt) {
160 if (su)
161 (void) mem_free(su, sizeof (*su));
162 (void) mem_free(xprt, sizeof (SVCXPRT));
163 }
164 return (NULL);
165 }
166
167 /*ARGSUSED*/
168 static enum xprt_stat
169 svc_dg_stat(xprt)
170 SVCXPRT *xprt;
171 {
172 return (XPRT_IDLE);
173 }
174
175 static bool_t
176 svc_dg_recv(xprt, msg)
177 SVCXPRT *xprt;
178 struct rpc_msg *msg;
179 {
180 struct svc_dg_data *su = su_data(xprt);
181 XDR *xdrs = &(su->su_xdrs);
182 char *reply;
183 struct sockaddr_storage ss;
184 socklen_t alen;
185 size_t replylen;
186 int rlen;
187
188 again:
189 alen = sizeof (struct sockaddr_storage);
190 rlen = recvfrom(xprt->xp_fd, rpc_buffer(xprt), su->su_iosz, 0,
191 (struct sockaddr *)(void *)&ss, &alen);
192 if (rlen == -1 && errno == EINTR)
193 goto again;
194 if (rlen == -1 || (rlen < 4 * sizeof (u_int32_t)))
195 return (FALSE);
196 if (xprt->xp_rtaddr.len < alen) {
197 if (xprt->xp_rtaddr.len != 0)
198 mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.len);
199 xprt->xp_rtaddr.buf = mem_alloc(alen);
200 xprt->xp_rtaddr.len = alen;
201 }
202 memcpy(xprt->xp_rtaddr.buf, &ss, alen);
203 #ifdef PORTMAP
204 if (ss.ss_family == AF_INET) {
205 xprt->xp_raddr = *(struct sockaddr_in *)xprt->xp_rtaddr.buf;
206 xprt->xp_addrlen = sizeof (struct sockaddr_in);
207 }
208 #endif
209 xdrs->x_op = XDR_DECODE;
210 XDR_SETPOS(xdrs, 0);
211 if (! xdr_callmsg(xdrs, msg)) {
212 return (FALSE);
213 }
214 su->su_xid = msg->rm_xid;
215 if (su->su_cache != NULL) {
216 if (cache_get(xprt, msg, &reply, &replylen)) {
217 (void)sendto(xprt->xp_fd, reply, replylen, 0,
218 (struct sockaddr *)(void *)&ss, alen);
219 return (FALSE);
220 }
221 }
222 return (TRUE);
223 }
224
225 static bool_t
226 svc_dg_reply(xprt, msg)
227 SVCXPRT *xprt;
228 struct rpc_msg *msg;
229 {
230 struct svc_dg_data *su = su_data(xprt);
231 XDR *xdrs = &(su->su_xdrs);
232 bool_t stat = FALSE;
233 size_t slen;
234
235 xdrs->x_op = XDR_ENCODE;
236 XDR_SETPOS(xdrs, 0);
237 msg->rm_xid = su->su_xid;
238 if (xdr_replymsg(xdrs, msg)) {
239 slen = XDR_GETPOS(xdrs);
240 if (sendto(xprt->xp_fd, rpc_buffer(xprt), slen, 0,
241 (struct sockaddr *)xprt->xp_rtaddr.buf,
242 (socklen_t)xprt->xp_rtaddr.len) == slen) {
243 stat = TRUE;
244 if (su->su_cache)
245 cache_set(xprt, slen);
246 }
247 }
248 return (stat);
249 }
250
251 static bool_t
252 svc_dg_getargs(xprt, xdr_args, args_ptr)
253 SVCXPRT *xprt;
254 xdrproc_t xdr_args;
255 caddr_t args_ptr;
256 {
257 return (*xdr_args)(&(su_data(xprt)->su_xdrs), args_ptr);
258 }
259
260 static bool_t
261 svc_dg_freeargs(xprt, xdr_args, args_ptr)
262 SVCXPRT *xprt;
263 xdrproc_t xdr_args;
264 caddr_t args_ptr;
265 {
266 XDR *xdrs = &(su_data(xprt)->su_xdrs);
267
268 xdrs->x_op = XDR_FREE;
269 return (*xdr_args)(xdrs, args_ptr);
270 }
271
272 static void
273 svc_dg_destroy(xprt)
274 SVCXPRT *xprt;
275 {
276 struct svc_dg_data *su = su_data(xprt);
277
278 xprt_unregister(xprt);
279 if (xprt->xp_fd != -1)
280 (void)close(xprt->xp_fd);
281 XDR_DESTROY(&(su->su_xdrs));
282 (void) mem_free(rpc_buffer(xprt), su->su_iosz);
283 (void) mem_free(su, sizeof (*su));
284 if (xprt->xp_rtaddr.buf)
285 (void) mem_free(xprt->xp_rtaddr.buf, xprt->xp_rtaddr.maxlen);
286 if (xprt->xp_ltaddr.buf)
287 (void) mem_free(xprt->xp_ltaddr.buf, xprt->xp_ltaddr.maxlen);
288 if (xprt->xp_tp)
289 (void) free(xprt->xp_tp);
290 (void) mem_free(xprt, sizeof (SVCXPRT));
291 }
292
293 static bool_t
294 /*ARGSUSED*/
295 svc_dg_control(xprt, rq, in)
296 SVCXPRT *xprt;
297 const u_int rq;
298 void *in;
299 {
300 return (FALSE);
301 }
302
303 static void
304 svc_dg_ops(xprt)
305 SVCXPRT *xprt;
306 {
307 static struct xp_ops ops;
308 static struct xp_ops2 ops2;
309 #ifdef __REENT
310 extern mutex_t ops_lock;
311 #endif
312
313 /* VARIABLES PROTECTED BY ops_lock: ops */
314
315 mutex_lock(&ops_lock);
316 if (ops.xp_recv == NULL) {
317 ops.xp_recv = svc_dg_recv;
318 ops.xp_stat = svc_dg_stat;
319 ops.xp_getargs = svc_dg_getargs;
320 ops.xp_reply = svc_dg_reply;
321 ops.xp_freeargs = svc_dg_freeargs;
322 ops.xp_destroy = svc_dg_destroy;
323 ops2.xp_control = svc_dg_control;
324 }
325 xprt->xp_ops = &ops;
326 xprt->xp_ops2 = &ops2;
327 mutex_unlock(&ops_lock);
328 }
329
330 /* The CACHING COMPONENT */
331
332 /*
333 * Could have been a separate file, but some part of it depends upon the
334 * private structure of the client handle.
335 *
336 * Fifo cache for cl server
337 * Copies pointers to reply buffers into fifo cache
338 * Buffers are sent again if retransmissions are detected.
339 */
340
341 #define SPARSENESS 4 /* 75% sparse */
342
343 #define ALLOC(type, size) \
344 (type *) mem_alloc((sizeof (type) * (size)))
345
346 #define MEMZERO(addr, type, size) \
347 (void) memset((void *) (addr), 0, sizeof (type) * (int) (size))
348
349 #define FREE(addr, type, size) \
350 mem_free((addr), (sizeof (type) * (size)))
351
352 /*
353 * An entry in the cache
354 */
355 typedef struct cache_node *cache_ptr;
356 struct cache_node {
357 /*
358 * Index into cache is xid, proc, vers, prog and address
359 */
360 u_int32_t cache_xid;
361 rpcproc_t cache_proc;
362 rpcvers_t cache_vers;
363 rpcprog_t cache_prog;
364 struct netbuf cache_addr;
365 /*
366 * The cached reply and length
367 */
368 char *cache_reply;
369 size_t cache_replylen;
370 /*
371 * Next node on the list, if there is a collision
372 */
373 cache_ptr cache_next;
374 };
375
376 /*
377 * The entire cache
378 */
379 struct cl_cache {
380 u_int uc_size; /* size of cache */
381 cache_ptr *uc_entries; /* hash table of entries in cache */
382 cache_ptr *uc_fifo; /* fifo list of entries in cache */
383 u_int uc_nextvictim; /* points to next victim in fifo list */
384 rpcprog_t uc_prog; /* saved program number */
385 rpcvers_t uc_vers; /* saved version number */
386 rpcproc_t uc_proc; /* saved procedure number */
387 };
388
389
390 /*
391 * the hashing function
392 */
393 #define CACHE_LOC(transp, xid) \
394 (xid % (SPARSENESS * ((struct cl_cache *) \
395 su_data(transp)->su_cache)->uc_size))
396
397 #ifdef __REENT
398 extern mutex_t dupreq_lock;
399 #endif
400
401 /*
402 * Enable use of the cache. Returns 1 on success, 0 on failure.
403 * Note: there is no disable.
404 */
405 static const char cache_enable_str[] = "svc_enablecache: %s %s";
406 static const char alloc_err[] = "could not allocate cache ";
407 static const char enable_err[] = "cache already enabled";
408
409 int
410 svc_dg_enablecache(transp, size)
411 SVCXPRT *transp;
412 u_int size;
413 {
414 struct svc_dg_data *su = su_data(transp);
415 struct cl_cache *uc;
416
417 mutex_lock(&dupreq_lock);
418 if (su->su_cache != NULL) {
419 (void) warnx(cache_enable_str, enable_err, " ");
420 mutex_unlock(&dupreq_lock);
421 return (0);
422 }
423 uc = ALLOC(struct cl_cache, 1);
424 if (uc == NULL) {
425 warnx(cache_enable_str, alloc_err, " ");
426 mutex_unlock(&dupreq_lock);
427 return (0);
428 }
429 uc->uc_size = size;
430 uc->uc_nextvictim = 0;
431 uc->uc_entries = ALLOC(cache_ptr, size * SPARSENESS);
432 if (uc->uc_entries == NULL) {
433 warnx(cache_enable_str, alloc_err, "data");
434 FREE(uc, struct cl_cache, 1);
435 mutex_unlock(&dupreq_lock);
436 return (0);
437 }
438 MEMZERO(uc->uc_entries, cache_ptr, size * SPARSENESS);
439 uc->uc_fifo = ALLOC(cache_ptr, size);
440 if (uc->uc_fifo == NULL) {
441 warnx(cache_enable_str, alloc_err, "fifo");
442 FREE(uc->uc_entries, cache_ptr, size * SPARSENESS);
443 FREE(uc, struct cl_cache, 1);
444 mutex_unlock(&dupreq_lock);
445 return (0);
446 }
447 MEMZERO(uc->uc_fifo, cache_ptr, size);
448 su->su_cache = (char *)(void *)uc;
449 mutex_unlock(&dupreq_lock);
450 return (1);
451 }
452
453 /*
454 * Set an entry in the cache. It assumes that the uc entry is set from
455 * the earlier call to cache_get() for the same procedure. This will always
456 * happen because cache_get() is calle by svc_dg_recv and cache_set() is called
457 * by svc_dg_reply(). All this hoopla because the right RPC parameters are
458 * not available at svc_dg_reply time.
459 */
460
461 static const char cache_set_str[] = "cache_set: %s";
462 static const char cache_set_err1[] = "victim not found";
463 static const char cache_set_err2[] = "victim alloc failed";
464 static const char cache_set_err3[] = "could not allocate new rpc buffer";
465
466 static void
467 cache_set(xprt, replylen)
468 SVCXPRT *xprt;
469 size_t replylen;
470 {
471 cache_ptr victim;
472 cache_ptr *vicp;
473 struct svc_dg_data *su = su_data(xprt);
474 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
475 u_int loc;
476 char *newbuf;
477 #ifdef RPC_CACHE_DEBUG
478 struct netconfig *nconf;
479 char *uaddr;
480 #endif
481
482 mutex_lock(&dupreq_lock);
483 /*
484 * Find space for the new entry, either by
485 * reusing an old entry, or by mallocing a new one
486 */
487 victim = uc->uc_fifo[uc->uc_nextvictim];
488 if (victim != NULL) {
489 loc = CACHE_LOC(xprt, victim->cache_xid);
490 for (vicp = &uc->uc_entries[loc];
491 *vicp != NULL && *vicp != victim;
492 vicp = &(*vicp)->cache_next)
493 ;
494 if (*vicp == NULL) {
495 warnx(cache_set_str, cache_set_err1);
496 mutex_unlock(&dupreq_lock);
497 return;
498 }
499 *vicp = victim->cache_next; /* remove from cache */
500 newbuf = victim->cache_reply;
501 } else {
502 victim = ALLOC(struct cache_node, 1);
503 if (victim == NULL) {
504 warnx(cache_set_str, cache_set_err2);
505 mutex_unlock(&dupreq_lock);
506 return;
507 }
508 newbuf = mem_alloc(su->su_iosz);
509 if (newbuf == NULL) {
510 warnx(cache_set_str, cache_set_err3);
511 FREE(victim, struct cache_node, 1);
512 mutex_unlock(&dupreq_lock);
513 return;
514 }
515 }
516
517 /*
518 * Store it away
519 */
520 #ifdef RPC_CACHE_DEBUG
521 if (nconf = getnetconfigent(xprt->xp_netid)) {
522 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
523 freenetconfigent(nconf);
524 printf(
525 "cache set for xid= %x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
526 su->su_xid, uc->uc_prog, uc->uc_vers,
527 uc->uc_proc, uaddr);
528 free(uaddr);
529 }
530 #endif
531 victim->cache_replylen = replylen;
532 victim->cache_reply = rpc_buffer(xprt);
533 rpc_buffer(xprt) = newbuf;
534 xdrmem_create(&(su->su_xdrs), rpc_buffer(xprt),
535 su->su_iosz, XDR_ENCODE);
536 victim->cache_xid = su->su_xid;
537 victim->cache_proc = uc->uc_proc;
538 victim->cache_vers = uc->uc_vers;
539 victim->cache_prog = uc->uc_prog;
540 victim->cache_addr = xprt->xp_rtaddr;
541 victim->cache_addr.buf = ALLOC(char, xprt->xp_rtaddr.len);
542 (void) memcpy(victim->cache_addr.buf, xprt->xp_rtaddr.buf,
543 (size_t)xprt->xp_rtaddr.len);
544 loc = CACHE_LOC(xprt, victim->cache_xid);
545 victim->cache_next = uc->uc_entries[loc];
546 uc->uc_entries[loc] = victim;
547 uc->uc_fifo[uc->uc_nextvictim++] = victim;
548 uc->uc_nextvictim %= uc->uc_size;
549 mutex_unlock(&dupreq_lock);
550 }
551
552 /*
553 * Try to get an entry from the cache
554 * return 1 if found, 0 if not found and set the stage for cache_set()
555 */
556 static int
557 cache_get(xprt, msg, replyp, replylenp)
558 SVCXPRT *xprt;
559 struct rpc_msg *msg;
560 char **replyp;
561 size_t *replylenp;
562 {
563 u_int loc;
564 cache_ptr ent;
565 struct svc_dg_data *su = su_data(xprt);
566 struct cl_cache *uc = (struct cl_cache *) su->su_cache;
567 #ifdef RPC_CACHE_DEBUG
568 struct netconfig *nconf;
569 char *uaddr;
570 #endif
571
572 mutex_lock(&dupreq_lock);
573 loc = CACHE_LOC(xprt, su->su_xid);
574 for (ent = uc->uc_entries[loc]; ent != NULL; ent = ent->cache_next) {
575 if (ent->cache_xid == su->su_xid &&
576 ent->cache_proc == msg->rm_call.cb_proc &&
577 ent->cache_vers == msg->rm_call.cb_vers &&
578 ent->cache_prog == msg->rm_call.cb_prog &&
579 ent->cache_addr.len == xprt->xp_rtaddr.len &&
580 (memcmp(ent->cache_addr.buf, xprt->xp_rtaddr.buf,
581 xprt->xp_rtaddr.len) == 0)) {
582 #ifdef RPC_CACHE_DEBUG
583 if (nconf = getnetconfigent(xprt->xp_netid)) {
584 uaddr = taddr2uaddr(nconf, &xprt->xp_rtaddr);
585 freenetconfigent(nconf);
586 printf(
587 "cache entry found for xid=%x prog=%d vers=%d proc=%d for rmtaddr=%s\n",
588 su->su_xid, msg->rm_call.cb_prog,
589 msg->rm_call.cb_vers,
590 msg->rm_call.cb_proc, uaddr);
591 free(uaddr);
592 }
593 #endif
594 *replyp = ent->cache_reply;
595 *replylenp = ent->cache_replylen;
596 mutex_unlock(&dupreq_lock);
597 return (1);
598 }
599 }
600 /*
601 * Failed to find entry
602 * Remember a few things so we can do a set later
603 */
604 uc->uc_proc = msg->rm_call.cb_proc;
605 uc->uc_vers = msg->rm_call.cb_vers;
606 uc->uc_prog = msg->rm_call.cb_prog;
607 mutex_unlock(&dupreq_lock);
608 return (0);
609 }
610