clnt_vc.c revision 1.17.8.2 1 /* $NetBSD: clnt_vc.c,v 1.17.8.2 2013/03/31 20:26:33 riz Exp $ */
2
3 /*
4 * Copyright (c) 2010, Oracle America, Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met:
9 *
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following
14 * disclaimer in the documentation and/or other materials
15 * provided with the distribution.
16 * * Neither the name of the "Oracle America, Inc." nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
25 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
27 * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
30 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <sys/cdefs.h>
35 #if defined(LIBC_SCCS) && !defined(lint)
36 #if 0
37 static char *sccsid = "@(#)clnt_tcp.c 1.37 87/10/05 Copyr 1984 Sun Micro";
38 static char *sccsid = "@(#)clnt_tcp.c 2.2 88/08/01 4.0 RPCSRC";
39 static char sccsid[] = "@(#)clnt_vc.c 1.19 89/03/16 Copyr 1988 Sun Micro";
40 #else
41 __RCSID("$NetBSD: clnt_vc.c,v 1.17.8.2 2013/03/31 20:26:33 riz Exp $");
42 #endif
43 #endif
44
45 /*
46 * clnt_tcp.c, Implements a TCP/IP based, client side RPC.
47 *
48 * Copyright (C) 1984, Sun Microsystems, Inc.
49 *
50 * TCP based RPC supports 'batched calls'.
51 * A sequence of calls may be batched-up in a send buffer. The rpc call
52 * return immediately to the client even though the call was not necessarily
53 * sent. The batching occurs if the results' xdr routine is NULL (0) AND
54 * the rpc timeout value is zero (see clnt.h, rpc).
55 *
56 * Clients should NOT casually batch calls that in fact return results; that is,
57 * the server side should be aware that a call is batched and not produce any
58 * return message. Batched calls that produce many result messages can
59 * deadlock (netlock) the client and the server....
60 *
61 * Now go hang yourself.
62 */
63
64 #include "namespace.h"
65 #include "reentrant.h"
66 #include <sys/types.h>
67 #include <sys/poll.h>
68 #include <sys/socket.h>
69
70 #include <assert.h>
71 #include <err.h>
72 #include <errno.h>
73 #include <netdb.h>
74 #include <stdio.h>
75 #include <stdlib.h>
76 #include <string.h>
77 #include <unistd.h>
78 #include <signal.h>
79
80 #include <rpc/rpc.h>
81
82 #include "rpc_internal.h"
83
84 #ifdef __weak_alias
85 __weak_alias(clnt_vc_create,_clnt_vc_create)
86 #endif
87
88 #define MCALL_MSG_SIZE 24
89
90 static enum clnt_stat clnt_vc_call __P((CLIENT *, rpcproc_t, xdrproc_t,
91 const char *, xdrproc_t, caddr_t, struct timeval));
92 static void clnt_vc_geterr __P((CLIENT *, struct rpc_err *));
93 static bool_t clnt_vc_freeres __P((CLIENT *, xdrproc_t, caddr_t));
94 static void clnt_vc_abort __P((CLIENT *));
95 static bool_t clnt_vc_control __P((CLIENT *, u_int, char *));
96 static void clnt_vc_destroy __P((CLIENT *));
97 static struct clnt_ops *clnt_vc_ops __P((void));
98 static bool_t time_not_ok __P((struct timeval *));
99 static int read_vc __P((caddr_t, caddr_t, int));
100 static int write_vc __P((caddr_t, caddr_t, int));
101
102 struct ct_data {
103 int ct_fd;
104 bool_t ct_closeit;
105 struct timeval ct_wait;
106 bool_t ct_waitset; /* wait set by clnt_control? */
107 struct netbuf ct_addr;
108 struct rpc_err ct_error;
109 union {
110 char ct_mcallc[MCALL_MSG_SIZE]; /* marshalled callmsg */
111 u_int32_t ct_mcalli;
112 } ct_u;
113 u_int ct_mpos; /* pos after marshal */
114 XDR ct_xdrs;
115 };
116
117 /*
118 * This machinery implements per-fd locks for MT-safety. It is not
119 * sufficient to do per-CLIENT handle locks for MT-safety because a
120 * user may create more than one CLIENT handle with the same fd behind
121 * it. Therfore, we allocate an array of flags (vc_fd_locks), protected
122 * by the clnt_fd_lock mutex, and an array (vc_cv) of condition variables
123 * similarly protected. Vc_fd_lock[fd] == 1 => a call is activte on some
124 * CLIENT handle created for that fd.
125 * The current implementation holds locks across the entire RPC and reply.
126 * Yes, this is silly, and as soon as this code is proven to work, this
127 * should be the first thing fixed. One step at a time.
128 */
129 #ifdef _REENTRANT
130 static int *vc_fd_locks;
131 #define __rpc_lock_value __isthreaded;
132 extern mutex_t clnt_fd_lock;
133 static cond_t *vc_cv;
134 #define release_fd_lock(fd, mask) { \
135 mutex_lock(&clnt_fd_lock); \
136 vc_fd_locks[fd] = 0; \
137 mutex_unlock(&clnt_fd_lock); \
138 thr_sigsetmask(SIG_SETMASK, &(mask), NULL); \
139 cond_signal(&vc_cv[fd]); \
140 }
141 #else
142 #define release_fd_lock(fd,mask)
143 #define __rpc_lock_value 0
144 #endif
145
146
147 /*
148 * Create a client handle for a connection.
149 * Default options are set, which the user can change using clnt_control()'s.
150 * The rpc/vc package does buffering similar to stdio, so the client
151 * must pick send and receive buffer sizes, 0 => use the default.
152 * NB: fd is copied into a private area.
153 * NB: The rpch->cl_auth is set null authentication. Caller may wish to
154 * set this something more useful.
155 *
156 * fd should be an open socket
157 */
158 CLIENT *
159 clnt_vc_create(fd, raddr, prog, vers, sendsz, recvsz)
160 int fd;
161 const struct netbuf *raddr;
162 rpcprog_t prog;
163 rpcvers_t vers;
164 u_int sendsz;
165 u_int recvsz;
166 {
167 CLIENT *h;
168 struct ct_data *ct = NULL;
169 struct rpc_msg call_msg;
170 #ifdef _REENTRANT
171 sigset_t mask;
172 #endif
173 sigset_t newmask;
174 struct sockaddr_storage ss;
175 socklen_t slen;
176 struct __rpc_sockinfo si;
177
178 _DIAGASSERT(raddr != NULL);
179
180 h = mem_alloc(sizeof(*h));
181 if (h == NULL) {
182 warnx("clnt_vc_create: out of memory");
183 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
184 rpc_createerr.cf_error.re_errno = errno;
185 goto fooy;
186 }
187 ct = mem_alloc(sizeof(*ct));
188 if (ct == NULL) {
189 warnx("clnt_vc_create: out of memory");
190 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
191 rpc_createerr.cf_error.re_errno = errno;
192 goto fooy;
193 }
194
195 sigfillset(&newmask);
196 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
197 #ifdef _REENTRANT
198 mutex_lock(&clnt_fd_lock);
199 if (vc_fd_locks == NULL) {
200 size_t cv_allocsz, fd_allocsz;
201 int dtbsize = __rpc_dtbsize();
202
203 fd_allocsz = dtbsize * sizeof (int);
204 vc_fd_locks = mem_alloc(fd_allocsz);
205 if (vc_fd_locks == NULL) {
206 mutex_unlock(&clnt_fd_lock);
207 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
208 goto fooy;
209 } else
210 memset(vc_fd_locks, '\0', fd_allocsz);
211
212 _DIAGASSERT(vc_cv == NULL);
213 cv_allocsz = dtbsize * sizeof (cond_t);
214 vc_cv = mem_alloc(cv_allocsz);
215 if (vc_cv == NULL) {
216 mem_free(vc_fd_locks, fd_allocsz);
217 vc_fd_locks = NULL;
218 mutex_unlock(&clnt_fd_lock);
219 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
220 goto fooy;
221 } else {
222 int i;
223
224 for (i = 0; i < dtbsize; i++)
225 cond_init(&vc_cv[i], 0, (void *) 0);
226 }
227 } else
228 _DIAGASSERT(vc_cv != NULL);
229 #endif
230
231 /*
232 * XXX - fvdl connecting while holding a mutex?
233 */
234 slen = sizeof ss;
235 if (getpeername(fd, (struct sockaddr *)(void *)&ss, &slen) < 0) {
236 if (errno != ENOTCONN) {
237 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
238 rpc_createerr.cf_error.re_errno = errno;
239 mutex_unlock(&clnt_fd_lock);
240 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
241 goto fooy;
242 }
243 if (connect(fd, (struct sockaddr *)raddr->buf, raddr->len) < 0){
244 rpc_createerr.cf_stat = RPC_SYSTEMERROR;
245 rpc_createerr.cf_error.re_errno = errno;
246 mutex_unlock(&clnt_fd_lock);
247 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
248 goto fooy;
249 }
250 }
251 mutex_unlock(&clnt_fd_lock);
252 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
253 if (!__rpc_fd2sockinfo(fd, &si))
254 goto fooy;
255
256 ct->ct_closeit = FALSE;
257
258 /*
259 * Set up private data struct
260 */
261 ct->ct_fd = fd;
262 ct->ct_wait.tv_usec = 0;
263 ct->ct_waitset = FALSE;
264 ct->ct_addr.buf = malloc((size_t)raddr->maxlen);
265 if (ct->ct_addr.buf == NULL)
266 goto fooy;
267 memcpy(ct->ct_addr.buf, raddr->buf, (size_t)raddr->len);
268 ct->ct_addr.len = raddr->len;
269 ct->ct_addr.maxlen = raddr->maxlen;
270
271 /*
272 * Initialize call message
273 */
274 call_msg.rm_xid = __RPC_GETXID();
275 call_msg.rm_direction = CALL;
276 call_msg.rm_call.cb_rpcvers = RPC_MSG_VERSION;
277 call_msg.rm_call.cb_prog = (u_int32_t)prog;
278 call_msg.rm_call.cb_vers = (u_int32_t)vers;
279
280 /*
281 * pre-serialize the static part of the call msg and stash it away
282 */
283 xdrmem_create(&(ct->ct_xdrs), ct->ct_u.ct_mcallc, MCALL_MSG_SIZE,
284 XDR_ENCODE);
285 if (! xdr_callhdr(&(ct->ct_xdrs), &call_msg)) {
286 if (ct->ct_closeit) {
287 (void)close(fd);
288 }
289 goto fooy;
290 }
291 ct->ct_mpos = XDR_GETPOS(&(ct->ct_xdrs));
292 XDR_DESTROY(&(ct->ct_xdrs));
293
294 /*
295 * Create a client handle which uses xdrrec for serialization
296 * and authnone for authentication.
297 */
298 h->cl_ops = clnt_vc_ops();
299 h->cl_private = ct;
300 h->cl_auth = authnone_create();
301 sendsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)sendsz);
302 recvsz = __rpc_get_t_size(si.si_af, si.si_proto, (int)recvsz);
303 xdrrec_create(&(ct->ct_xdrs), sendsz, recvsz,
304 h->cl_private, read_vc, write_vc);
305 return (h);
306
307 fooy:
308 /*
309 * Something goofed, free stuff and barf
310 */
311 if (ct)
312 mem_free(ct, sizeof(struct ct_data));
313 if (h)
314 mem_free(h, sizeof(CLIENT));
315 return (NULL);
316 }
317
318 static enum clnt_stat
319 clnt_vc_call(h, proc, xdr_args, args_ptr, xdr_results, results_ptr, timeout)
320 CLIENT *h;
321 rpcproc_t proc;
322 xdrproc_t xdr_args;
323 const char *args_ptr;
324 xdrproc_t xdr_results;
325 caddr_t results_ptr;
326 struct timeval timeout;
327 {
328 struct ct_data *ct;
329 XDR *xdrs;
330 struct rpc_msg reply_msg;
331 u_int32_t x_id;
332 u_int32_t *msg_x_id;
333 bool_t shipnow;
334 int refreshes = 2;
335 #ifdef _REENTRANT
336 sigset_t mask, newmask;
337 #endif
338
339 _DIAGASSERT(h != NULL);
340
341 ct = (struct ct_data *) h->cl_private;
342
343 #ifdef _REENTRANT
344 sigfillset(&newmask);
345 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
346 mutex_lock(&clnt_fd_lock);
347 while (vc_fd_locks[ct->ct_fd])
348 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
349 vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
350 mutex_unlock(&clnt_fd_lock);
351 #endif
352
353 xdrs = &(ct->ct_xdrs);
354 msg_x_id = &ct->ct_u.ct_mcalli;
355
356 if (!ct->ct_waitset) {
357 if (time_not_ok(&timeout) == FALSE)
358 ct->ct_wait = timeout;
359 }
360
361 shipnow =
362 (xdr_results == NULL && timeout.tv_sec == 0
363 && timeout.tv_usec == 0) ? FALSE : TRUE;
364
365 call_again:
366 xdrs->x_op = XDR_ENCODE;
367 ct->ct_error.re_status = RPC_SUCCESS;
368 x_id = ntohl(--(*msg_x_id));
369 if ((! XDR_PUTBYTES(xdrs, ct->ct_u.ct_mcallc, ct->ct_mpos)) ||
370 (! XDR_PUTINT32(xdrs, (int32_t *)&proc)) ||
371 (! AUTH_MARSHALL(h->cl_auth, xdrs)) ||
372 (! (*xdr_args)(xdrs, __UNCONST(args_ptr)))) {
373 if (ct->ct_error.re_status == RPC_SUCCESS)
374 ct->ct_error.re_status = RPC_CANTENCODEARGS;
375 (void)xdrrec_endofrecord(xdrs, TRUE);
376 release_fd_lock(ct->ct_fd, mask);
377 return (ct->ct_error.re_status);
378 }
379 if (! xdrrec_endofrecord(xdrs, shipnow)) {
380 release_fd_lock(ct->ct_fd, mask);
381 return (ct->ct_error.re_status = RPC_CANTSEND);
382 }
383 if (! shipnow) {
384 release_fd_lock(ct->ct_fd, mask);
385 return (RPC_SUCCESS);
386 }
387 /*
388 * Hack to provide rpc-based message passing
389 */
390 if (timeout.tv_sec == 0 && timeout.tv_usec == 0) {
391 release_fd_lock(ct->ct_fd, mask);
392 return(ct->ct_error.re_status = RPC_TIMEDOUT);
393 }
394
395
396 /*
397 * Keep receiving until we get a valid transaction id
398 */
399 xdrs->x_op = XDR_DECODE;
400 for (;;) {
401 reply_msg.acpted_rply.ar_verf = _null_auth;
402 reply_msg.acpted_rply.ar_results.where = NULL;
403 reply_msg.acpted_rply.ar_results.proc = (xdrproc_t)xdr_void;
404 if (! xdrrec_skiprecord(xdrs)) {
405 release_fd_lock(ct->ct_fd, mask);
406 return (ct->ct_error.re_status);
407 }
408 /* now decode and validate the response header */
409 if (! xdr_replymsg(xdrs, &reply_msg)) {
410 if (ct->ct_error.re_status == RPC_SUCCESS)
411 continue;
412 release_fd_lock(ct->ct_fd, mask);
413 return (ct->ct_error.re_status);
414 }
415 if (reply_msg.rm_xid == x_id)
416 break;
417 }
418
419 /*
420 * process header
421 */
422 _seterr_reply(&reply_msg, &(ct->ct_error));
423 if (ct->ct_error.re_status == RPC_SUCCESS) {
424 if (! AUTH_VALIDATE(h->cl_auth,
425 &reply_msg.acpted_rply.ar_verf)) {
426 ct->ct_error.re_status = RPC_AUTHERROR;
427 ct->ct_error.re_why = AUTH_INVALIDRESP;
428 } else if (! (*xdr_results)(xdrs, results_ptr)) {
429 if (ct->ct_error.re_status == RPC_SUCCESS)
430 ct->ct_error.re_status = RPC_CANTDECODERES;
431 }
432 /* free verifier ... */
433 if (reply_msg.acpted_rply.ar_verf.oa_base != NULL) {
434 xdrs->x_op = XDR_FREE;
435 (void)xdr_opaque_auth(xdrs,
436 &(reply_msg.acpted_rply.ar_verf));
437 }
438 } /* end successful completion */
439 else {
440 /* maybe our credentials need to be refreshed ... */
441 if (refreshes-- && AUTH_REFRESH(h->cl_auth))
442 goto call_again;
443 } /* end of unsuccessful completion */
444 release_fd_lock(ct->ct_fd, mask);
445 return (ct->ct_error.re_status);
446 }
447
448 static void
449 clnt_vc_geterr(h, errp)
450 CLIENT *h;
451 struct rpc_err *errp;
452 {
453 struct ct_data *ct;
454
455 _DIAGASSERT(h != NULL);
456 _DIAGASSERT(errp != NULL);
457
458 ct = (struct ct_data *) h->cl_private;
459 *errp = ct->ct_error;
460 }
461
462 static bool_t
463 clnt_vc_freeres(cl, xdr_res, res_ptr)
464 CLIENT *cl;
465 xdrproc_t xdr_res;
466 caddr_t res_ptr;
467 {
468 struct ct_data *ct;
469 XDR *xdrs;
470 bool_t dummy;
471 #ifdef _REENTRANT
472 sigset_t mask;
473 #endif
474 sigset_t newmask;
475
476 _DIAGASSERT(cl != NULL);
477
478 ct = (struct ct_data *)cl->cl_private;
479 xdrs = &(ct->ct_xdrs);
480
481 sigfillset(&newmask);
482 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
483 mutex_lock(&clnt_fd_lock);
484 #ifdef _REENTRANT
485 while (vc_fd_locks[ct->ct_fd])
486 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
487 #endif
488
489 xdrs->x_op = XDR_FREE;
490 dummy = (*xdr_res)(xdrs, res_ptr);
491 mutex_unlock(&clnt_fd_lock);
492 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
493 cond_signal(&vc_cv[ct->ct_fd]);
494
495 return dummy;
496 }
497
498 /*ARGSUSED*/
499 static void
500 clnt_vc_abort(cl)
501 CLIENT *cl;
502 {
503 }
504
505 static bool_t
506 clnt_vc_control(cl, request, info)
507 CLIENT *cl;
508 u_int request;
509 char *info;
510 {
511 struct ct_data *ct;
512 void *infop = info;
513 #ifdef _REENTRANT
514 sigset_t mask;
515 #endif
516 sigset_t newmask;
517
518 _DIAGASSERT(cl != NULL);
519
520 ct = (struct ct_data *)cl->cl_private;
521
522 sigfillset(&newmask);
523 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
524 mutex_lock(&clnt_fd_lock);
525 #ifdef _REENTRANT
526 while (vc_fd_locks[ct->ct_fd])
527 cond_wait(&vc_cv[ct->ct_fd], &clnt_fd_lock);
528 vc_fd_locks[ct->ct_fd] = __rpc_lock_value;
529 #endif
530 mutex_unlock(&clnt_fd_lock);
531
532 switch (request) {
533 case CLSET_FD_CLOSE:
534 ct->ct_closeit = TRUE;
535 release_fd_lock(ct->ct_fd, mask);
536 return (TRUE);
537 case CLSET_FD_NCLOSE:
538 ct->ct_closeit = FALSE;
539 release_fd_lock(ct->ct_fd, mask);
540 return (TRUE);
541 default:
542 break;
543 }
544
545 /* for other requests which use info */
546 if (info == NULL) {
547 release_fd_lock(ct->ct_fd, mask);
548 return (FALSE);
549 }
550 switch (request) {
551 case CLSET_TIMEOUT:
552 if (time_not_ok((struct timeval *)(void *)info)) {
553 release_fd_lock(ct->ct_fd, mask);
554 return (FALSE);
555 }
556 ct->ct_wait = *(struct timeval *)infop;
557 ct->ct_waitset = TRUE;
558 break;
559 case CLGET_TIMEOUT:
560 *(struct timeval *)infop = ct->ct_wait;
561 break;
562 case CLGET_SERVER_ADDR:
563 (void) memcpy(info, ct->ct_addr.buf, (size_t)ct->ct_addr.len);
564 break;
565 case CLGET_FD:
566 *(int *)(void *)info = ct->ct_fd;
567 break;
568 case CLGET_SVC_ADDR:
569 /* The caller should not free this memory area */
570 *(struct netbuf *)(void *)info = ct->ct_addr;
571 break;
572 case CLSET_SVC_ADDR: /* set to new address */
573 release_fd_lock(ct->ct_fd, mask);
574 return (FALSE);
575 case CLGET_XID:
576 /*
577 * use the knowledge that xid is the
578 * first element in the call structure
579 * This will get the xid of the PREVIOUS call
580 */
581 *(u_int32_t *)(void *)info =
582 ntohl(*(u_int32_t *)(void *)&ct->ct_u.ct_mcalli);
583 break;
584 case CLSET_XID:
585 /* This will set the xid of the NEXT call */
586 *(u_int32_t *)(void *)&ct->ct_u.ct_mcalli =
587 htonl(*((u_int32_t *)(void *)info) + 1);
588 /* increment by 1 as clnt_vc_call() decrements once */
589 break;
590 case CLGET_VERS:
591 /*
592 * This RELIES on the information that, in the call body,
593 * the version number field is the fifth field from the
594 * begining of the RPC header. MUST be changed if the
595 * call_struct is changed
596 */
597 *(u_int32_t *)(void *)info =
598 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
599 4 * BYTES_PER_XDR_UNIT));
600 break;
601
602 case CLSET_VERS:
603 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
604 4 * BYTES_PER_XDR_UNIT) =
605 htonl(*(u_int32_t *)(void *)info);
606 break;
607
608 case CLGET_PROG:
609 /*
610 * This RELIES on the information that, in the call body,
611 * the program number field is the fourth field from the
612 * begining of the RPC header. MUST be changed if the
613 * call_struct is changed
614 */
615 *(u_int32_t *)(void *)info =
616 ntohl(*(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
617 3 * BYTES_PER_XDR_UNIT));
618 break;
619
620 case CLSET_PROG:
621 *(u_int32_t *)(void *)(ct->ct_u.ct_mcallc +
622 3 * BYTES_PER_XDR_UNIT) =
623 htonl(*(u_int32_t *)(void *)info);
624 break;
625
626 default:
627 release_fd_lock(ct->ct_fd, mask);
628 return (FALSE);
629 }
630 release_fd_lock(ct->ct_fd, mask);
631 return (TRUE);
632 }
633
634
635 static void
636 clnt_vc_destroy(cl)
637 CLIENT *cl;
638 {
639 struct ct_data *ct;
640 #ifdef _REENTRANT
641 int ct_fd;
642 sigset_t mask;
643 #endif
644 sigset_t newmask;
645
646 _DIAGASSERT(cl != NULL);
647
648 ct = (struct ct_data *) cl->cl_private;
649 ct_fd = ct->ct_fd;
650
651 sigfillset(&newmask);
652 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
653 mutex_lock(&clnt_fd_lock);
654 #ifdef _REENTRANT
655 while (vc_fd_locks[ct_fd])
656 cond_wait(&vc_cv[ct_fd], &clnt_fd_lock);
657 #endif
658 if (ct->ct_closeit && ct->ct_fd != -1) {
659 (void)close(ct->ct_fd);
660 }
661 XDR_DESTROY(&(ct->ct_xdrs));
662 if (ct->ct_addr.buf)
663 free(ct->ct_addr.buf);
664 mem_free(ct, sizeof(struct ct_data));
665 mem_free(cl, sizeof(CLIENT));
666 mutex_unlock(&clnt_fd_lock);
667 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
668
669 cond_signal(&vc_cv[ct_fd]);
670 }
671
672 /*
673 * Interface between xdr serializer and tcp connection.
674 * Behaves like the system calls, read & write, but keeps some error state
675 * around for the rpc level.
676 */
677 static int
678 read_vc(ctp, buf, len)
679 caddr_t ctp;
680 caddr_t buf;
681 int len;
682 {
683 struct ct_data *ct = (struct ct_data *)(void *)ctp;
684 struct pollfd fd;
685 struct timespec ts;
686
687 if (len == 0)
688 return (0);
689
690 TIMEVAL_TO_TIMESPEC(&ct->ct_wait, &ts);
691 fd.fd = ct->ct_fd;
692 fd.events = POLLIN;
693 for (;;) {
694 switch (pollts(&fd, 1, &ts, NULL)) {
695 case 0:
696 ct->ct_error.re_status = RPC_TIMEDOUT;
697 return (-1);
698
699 case -1:
700 if (errno == EINTR)
701 continue;
702 ct->ct_error.re_status = RPC_CANTRECV;
703 ct->ct_error.re_errno = errno;
704 return (-1);
705 }
706 break;
707 }
708 switch (len = read(ct->ct_fd, buf, (size_t)len)) {
709
710 case 0:
711 /* premature eof */
712 ct->ct_error.re_errno = ECONNRESET;
713 ct->ct_error.re_status = RPC_CANTRECV;
714 len = -1; /* it's really an error */
715 break;
716
717 case -1:
718 ct->ct_error.re_errno = errno;
719 ct->ct_error.re_status = RPC_CANTRECV;
720 break;
721 }
722 return (len);
723 }
724
725 static int
726 write_vc(ctp, buf, len)
727 caddr_t ctp;
728 caddr_t buf;
729 int len;
730 {
731 struct ct_data *ct = (struct ct_data *)(void *)ctp;
732 int i, cnt;
733
734 for (cnt = len; cnt > 0; cnt -= i, buf += i) {
735 if ((i = write(ct->ct_fd, buf, (size_t)cnt)) == -1) {
736 ct->ct_error.re_errno = errno;
737 ct->ct_error.re_status = RPC_CANTSEND;
738 return (-1);
739 }
740 }
741 return (len);
742 }
743
744 static struct clnt_ops *
745 clnt_vc_ops()
746 {
747 static struct clnt_ops ops;
748 #ifdef _REENTRANT
749 extern mutex_t ops_lock;
750 sigset_t mask;
751 #endif
752 sigset_t newmask;
753
754 /* VARIABLES PROTECTED BY ops_lock: ops */
755
756 sigfillset(&newmask);
757 thr_sigsetmask(SIG_SETMASK, &newmask, &mask);
758 mutex_lock(&ops_lock);
759 if (ops.cl_call == NULL) {
760 ops.cl_call = clnt_vc_call;
761 ops.cl_abort = clnt_vc_abort;
762 ops.cl_geterr = clnt_vc_geterr;
763 ops.cl_freeres = clnt_vc_freeres;
764 ops.cl_destroy = clnt_vc_destroy;
765 ops.cl_control = clnt_vc_control;
766 }
767 mutex_unlock(&ops_lock);
768 thr_sigsetmask(SIG_SETMASK, &(mask), NULL);
769 return (&ops);
770 }
771
772 /*
773 * Make sure that the time is not garbage. -1 value is disallowed.
774 * Note this is different from time_not_ok in clnt_dg.c
775 */
776 static bool_t
777 time_not_ok(t)
778 struct timeval *t;
779 {
780
781 _DIAGASSERT(t != NULL);
782
783 return (t->tv_sec <= -1 || t->tv_sec > 100000000 ||
784 t->tv_usec <= -1 || t->tv_usec > 1000000);
785 }
786