1 1.2 andvar /* $NetBSD: gemini_ipmvar.h,v 1.2 2024/02/10 08:24:50 andvar Exp $ */ 2 1.1 cliff 3 1.1 cliff #ifndef _GEMINI_IPMVAR_H_ 4 1.1 cliff #define _GEMINI_IPMVAR_H_ 5 1.1 cliff 6 1.1 cliff /* 7 1.1 cliff * message queue 8 1.1 cliff * 9 1.1 cliff * - the queue gets located in memory shared between cores 10 1.1 cliff * - is mapped non-cached so SW coherency is not required. 11 1.1 cliff * - be sure ipm_queue_t starts on 32 bit (min) boundary to align descriptors 12 1.2 andvar * - note that indices are 8 bit and NIPMDESC < (1<<8) 13 1.1 cliff * be sure to adjust typedef if size is increased 14 1.1 cliff * - current sizes, typedef, and padding make sizeof(ipm_queue_t) == 4096 15 1.1 cliff */ 16 1.1 cliff typedef uint32_t ipmqindex_t; 17 1.1 cliff #define NIPMDESC 255 18 1.1 cliff #define IPMQPADSZ (4096 - ((sizeof(ipm_desc_t) * NIPMDESC) + (2 * sizeof(ipmqindex_t)))) 19 1.1 cliff typedef struct ipm_queue { 20 1.1 cliff ipm_desc_t ipm_desc[NIPMDESC]; 21 1.1 cliff volatile ipmqindex_t ix_write; /* writer increments and inserts here */ 22 1.1 cliff volatile ipmqindex_t ix_read; /* reader extracts here and increments */ 23 1.1 cliff uint8_t pad[IPMQPADSZ]; 24 1.1 cliff } ipm_queue_t; 25 1.1 cliff 26 1.1 cliff static inline ipmqindex_t 27 1.1 cliff ipmqnext(ipmqindex_t ix) 28 1.1 cliff { 29 1.1 cliff if (++ix >= NIPMDESC) 30 1.1 cliff ix = 0; 31 1.1 cliff return ix; 32 1.1 cliff } 33 1.1 cliff 34 1.1 cliff static inline bool 35 1.1 cliff ipmqisempty(ipmqindex_t ixr, ipmqindex_t ixw) 36 1.1 cliff { 37 1.1 cliff if (ixr == ixw) 38 1.1 cliff return TRUE; 39 1.1 cliff return FALSE; 40 1.1 cliff } 41 1.1 cliff 42 1.1 cliff static inline bool 43 1.1 cliff ipmqisfull(ipmqindex_t ixr, ipmqindex_t ixw) 44 1.1 cliff { 45 1.1 cliff if (ipmqnext(ixw) == ixr) 46 1.1 cliff return TRUE; 47 1.1 cliff return FALSE; 48 1.1 cliff } 49 1.1 cliff 50 1.1 cliff #endif /* _GEMINI_IPMVAR_H_ */ 51