xf86Bus.c revision 05b261ec
1/*
2 * Copyright (c) 1997-2003 by The XFree86 Project, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Except as contained in this notice, the name of the copyright holder(s)
23 * and author(s) shall not be used in advertising or otherwise to promote
24 * the sale, use or other dealings in this Software without prior written
25 * authorization from the copyright holder(s) and author(s).
26 */
27
28#define REDUCER
29/*
30 * This file contains the interfaces to the bus-specific code
31 */
32
33#ifdef HAVE_XORG_CONFIG_H
34#include <xorg-config.h>
35#endif
36
37#include <ctype.h>
38#include <stdlib.h>
39#include <unistd.h>
40#include <X11/X.h>
41#include "os.h"
42#include "xf86.h"
43#include "xf86Priv.h"
44#include "xf86Resources.h"
45
46/* Bus-specific headers */
47
48#include "xf86Bus.h"
49
50#define XF86_OS_PRIVS
51#define NEED_OS_RAC_PROTOS
52#include "xf86_OSproc.h"
53
54#include "xf86RAC.h"
55
56/* Entity data */
57EntityPtr *xf86Entities = NULL;	/* Bus slots claimed by drivers */
58int xf86NumEntities = 0;
59static int xf86EntityPrivateCount = 0;
60BusAccPtr xf86BusAccInfo = NULL;
61
62xf86AccessRec AccessNULL = {NULL,NULL,NULL};
63
64xf86CurrentAccessRec xf86CurrentAccess = {NULL,NULL};
65
66BusRec primaryBus = { BUS_NONE, {{0}}};
67
68static Bool xf86ResAccessEnter = FALSE;
69
70#ifdef REDUCER
71/* Resources that temporarily conflict with estimated resources */
72static resPtr AccReducers = NULL;
73#endif
74
75/* resource lists */
76resPtr Acc = NULL;
77resPtr osRes = NULL;
78
79/* allocatable ranges */
80resPtr ResRange = NULL;
81
82/* predefined special resources */
83_X_EXPORT resRange resVgaExclusive[] = {_VGA_EXCLUSIVE, _END};
84_X_EXPORT resRange resVgaShared[] = {_VGA_SHARED, _END};
85_X_EXPORT resRange resVgaMemShared[] = {_VGA_SHARED_MEM,_END};
86_X_EXPORT resRange resVgaIoShared[] = {_VGA_SHARED_IO,_END};
87_X_EXPORT resRange resVgaUnusedExclusive[] = {_VGA_EXCLUSIVE_UNUSED, _END};
88_X_EXPORT resRange resVgaUnusedShared[] = {_VGA_SHARED_UNUSED, _END};
89_X_EXPORT resRange resVgaSparseExclusive[] = {_VGA_EXCLUSIVE_SPARSE, _END};
90_X_EXPORT resRange resVgaSparseShared[] = {_VGA_SHARED_SPARSE, _END};
91_X_EXPORT resRange res8514Exclusive[] = {_8514_EXCLUSIVE, _END};
92_X_EXPORT resRange res8514Shared[] = {_8514_SHARED, _END};
93
94/* Flag: do we need RAC ? */
95static Bool needRAC = FALSE;
96static Bool doFramebufferMode = FALSE;
97
98/* state change notification callback list */
99static StateChangeNotificationPtr StateChangeNotificationList;
100static void notifyStateChange(xf86NotifyState state);
101
102#undef MIN
103#define MIN(x,y) ((x<y)?x:y)
104
105
106/*
107 * Call the bus probes relevant to the architecture.
108 *
109 * The only one available so far is for PCI and SBUS.
110 */
111
112void
113xf86BusProbe(void)
114{
115    xf86PciProbe();
116#if (defined(__sparc__) || defined(__sparc)) && !defined(__OpenBSD__)
117    xf86SbusProbe();
118#endif
119}
120
121/*
122 * Determine what bus type the busID string represents.  The start of the
123 * bus-dependent part of the string is returned as retID.
124 */
125
126BusType
127StringToBusType(const char* busID, const char **retID)
128{
129    char *p, *s;
130    BusType ret = BUS_NONE;
131
132    /* If no type field, Default to PCI */
133    if (isdigit(busID[0])) {
134	if (retID)
135	    *retID = busID;
136	return BUS_PCI;
137    }
138
139    s = xstrdup(busID);
140    p = strtok(s, ":");
141    if (p == NULL || *p == 0) {
142	xfree(s);
143	return BUS_NONE;
144    }
145    if (!xf86NameCmp(p, "pci") || !xf86NameCmp(p, "agp"))
146	ret = BUS_PCI;
147    if (!xf86NameCmp(p, "isa"))
148	ret = BUS_ISA;
149    if (!xf86NameCmp(p, "sbus"))
150	ret = BUS_SBUS;
151    if (ret != BUS_NONE)
152	if (retID)
153	    *retID = busID + strlen(p) + 1;
154    xfree(s);
155    return ret;
156}
157
158/*
159 * Entity related code.
160 */
161
162void
163xf86EntityInit(void)
164{
165    int i;
166    resPtr *pprev_next;
167    resPtr res;
168    xf86AccessPtr pacc;
169
170    for (i = 0; i < xf86NumEntities; i++)
171	if (xf86Entities[i]->entityInit) {
172	    if (xf86Entities[i]->access->busAcc)
173		((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
174		    (xf86Entities[i]->access->busAcc);
175	    pacc = xf86Entities[i]->access->fallback;
176	    if (pacc->AccessEnable)
177		pacc->AccessEnable(pacc->arg);
178	    xf86Entities[i]->entityInit(i,xf86Entities[i]->private);
179	    if (pacc->AccessDisable)
180		pacc->AccessDisable(pacc->arg);
181	    /* remove init resources after init is processed */
182	    pprev_next = &Acc;
183	    res = Acc;
184	    while (res) {
185		if (res->res_type & ResInit && (res->entityIndex == i)) {
186		    (*pprev_next) = res->next;
187		    xfree(res);
188		} else
189		    pprev_next = &(res->next);
190		res = (*pprev_next);
191	    }
192	}
193}
194
195int
196xf86AllocateEntity(void)
197{
198    xf86NumEntities++;
199    xf86Entities = xnfrealloc(xf86Entities,
200			      sizeof(EntityPtr) * xf86NumEntities);
201    xf86Entities[xf86NumEntities - 1] = xnfcalloc(1,sizeof(EntityRec));
202    xf86Entities[xf86NumEntities - 1]->entityPrivates =
203               xnfcalloc(sizeof(DevUnion) * xf86EntityPrivateCount, 1);
204    return (xf86NumEntities - 1);
205}
206
207static void
208EntityEnter(void)
209{
210    int i;
211    xf86AccessPtr pacc;
212
213    for (i = 0; i < xf86NumEntities; i++)
214	if (xf86Entities[i]->entityEnter) {
215	    if (xf86Entities[i]->access->busAcc)
216		((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
217		    (xf86Entities[i]->access->busAcc);
218	    pacc = xf86Entities[i]->access->fallback;
219	    if (pacc->AccessEnable)
220		pacc->AccessEnable(pacc->arg);
221	    xf86Entities[i]->entityEnter(i,xf86Entities[i]->private);
222	    if (pacc->AccessDisable)
223		pacc->AccessDisable(pacc->arg);
224	}
225}
226
227static void
228EntityLeave(void)
229{
230    int i;
231    xf86AccessPtr pacc;
232
233    for (i = 0; i < xf86NumEntities; i++)
234	if (xf86Entities[i]->entityLeave) {
235	    if (xf86Entities[i]->access->busAcc)
236		((BusAccPtr)xf86Entities[i]->access->busAcc)->set_f
237		    (xf86Entities[i]->access->busAcc);
238	    pacc = xf86Entities[i]->access->fallback;
239	    if (pacc->AccessEnable)
240		pacc->AccessEnable(pacc->arg);
241	    xf86Entities[i]->entityLeave(i,xf86Entities[i]->private);
242	    if (pacc->AccessDisable)
243		pacc->AccessDisable(pacc->arg);
244	}
245}
246
247_X_EXPORT Bool
248xf86IsEntityPrimary(int entityIndex)
249{
250    EntityPtr pEnt = xf86Entities[entityIndex];
251
252    if (primaryBus.type != pEnt->busType) return FALSE;
253
254    switch (pEnt->busType) {
255    case BUS_PCI:
256	return (pEnt->pciBusId.bus == primaryBus.id.pci.bus &&
257		pEnt->pciBusId.device == primaryBus.id.pci.device &&
258		pEnt->pciBusId.func == primaryBus.id.pci.func);
259    case BUS_ISA:
260	return TRUE;
261    case BUS_SBUS:
262	return (pEnt->sbusBusId.fbNum == primaryBus.id.sbus.fbNum);
263    default:
264	return FALSE;
265    }
266}
267
268_X_EXPORT Bool
269xf86SetEntityFuncs(int entityIndex, EntityProc init, EntityProc enter,
270		   EntityProc leave, pointer private)
271{
272    if (entityIndex >= xf86NumEntities)
273	return FALSE;
274    xf86Entities[entityIndex]->entityInit = init;
275    xf86Entities[entityIndex]->entityEnter = enter;
276    xf86Entities[entityIndex]->entityLeave = leave;
277    xf86Entities[entityIndex]->private = private;
278    return TRUE;
279}
280
281Bool
282xf86DriverHasEntities(DriverPtr drvp)
283{
284    int i;
285    for (i = 0; i < xf86NumEntities; i++) {
286	if (xf86Entities[i]->driver == drvp)
287	    return TRUE;
288    }
289    return FALSE;
290}
291
292_X_EXPORT void
293xf86AddEntityToScreen(ScrnInfoPtr pScrn, int entityIndex)
294{
295    if (entityIndex == -1)
296	return;
297    if (xf86Entities[entityIndex]->inUse &&
298	!(xf86Entities[entityIndex]->entityProp & IS_SHARED_ACCEL))
299	FatalError("Requested Entity already in use!\n");
300
301    pScrn->numEntities++;
302    pScrn->entityList = xnfrealloc(pScrn->entityList,
303				    pScrn->numEntities * sizeof(int));
304    pScrn->entityList[pScrn->numEntities - 1] = entityIndex;
305    xf86Entities[entityIndex]->access->next = pScrn->access;
306    pScrn->access = xf86Entities[entityIndex]->access;
307    xf86Entities[entityIndex]->inUse = TRUE;
308    pScrn->entityInstanceList = xnfrealloc(pScrn->entityInstanceList,
309				    pScrn->numEntities * sizeof(int));
310    pScrn->entityInstanceList[pScrn->numEntities - 1] = 0;
311    pScrn->domainIOBase = xf86Entities[entityIndex]->domainIO;
312}
313
314_X_EXPORT void
315xf86SetEntityInstanceForScreen(ScrnInfoPtr pScrn, int entityIndex, int instance)
316{
317    int i;
318
319    if (entityIndex == -1 || entityIndex >= xf86NumEntities)
320	return;
321
322    for (i = 0; i < pScrn->numEntities; i++) {
323	if (pScrn->entityList[i] == entityIndex) {
324	    pScrn->entityInstanceList[i] = instance;
325	    break;
326	}
327    }
328}
329
330/*
331 * XXX  This needs to be updated for the case where a single entity may have
332 * instances associated with more than one screen.
333 */
334_X_EXPORT ScrnInfoPtr
335xf86FindScreenForEntity(int entityIndex)
336{
337    int i,j;
338
339    if (entityIndex == -1) return NULL;
340
341    if (xf86Screens) {
342	for (i = 0; i < xf86NumScreens; i++) {
343	    for (j = 0; j < xf86Screens[i]->numEntities; j++) {
344		if ( xf86Screens[i]->entityList[j] == entityIndex )
345		    return (xf86Screens[i]);
346	    }
347	}
348    }
349    return NULL;
350}
351
352_X_EXPORT void
353xf86RemoveEntityFromScreen(ScrnInfoPtr pScrn, int entityIndex)
354{
355    int i;
356    EntityAccessPtr *ptr = (EntityAccessPtr *)&pScrn->access;
357    EntityAccessPtr peacc;
358
359    for (i = 0; i < pScrn->numEntities; i++) {
360	if (pScrn->entityList[i] == entityIndex) {
361	    peacc = xf86Entities[pScrn->entityList[i]]->access;
362	    (*ptr) = peacc->next;
363	    /* disable entity: call disable func */
364	    if (peacc->pAccess && peacc->pAccess->AccessDisable)
365		peacc->pAccess->AccessDisable(peacc->pAccess->arg);
366	    /* also disable fallback - just in case */
367	    if (peacc->fallback && peacc->fallback->AccessDisable)
368		peacc->fallback->AccessDisable(peacc->fallback->arg);
369	    for (i++; i < pScrn->numEntities; i++)
370		pScrn->entityList[i-1] = pScrn->entityList[i];
371	    pScrn->numEntities--;
372	    xf86Entities[entityIndex]->inUse = FALSE;
373	    break;
374	}
375	ptr = &(xf86Entities[pScrn->entityList[i]]->access->next);
376    }
377}
378
379/*
380 * xf86ClearEntitiesForScreen() - called when a screen is deleted
381 * to mark it's entities unused. Called by xf86DeleteScreen().
382 */
383void
384xf86ClearEntityListForScreen(int scrnIndex)
385{
386    ScrnInfoPtr pScrn = xf86Screens[scrnIndex];
387    EntityAccessPtr peacc;
388    int i, entityIndex;
389
390    if (pScrn->entityList == NULL || pScrn->numEntities == 0) return;
391
392    for (i = 0; i < pScrn->numEntities; i++) {
393	entityIndex = pScrn->entityList[i];
394	xf86Entities[entityIndex]->inUse = FALSE;
395	/* disable resource: call the disable function */
396	peacc = xf86Entities[entityIndex]->access;
397	if (peacc->pAccess && peacc->pAccess->AccessDisable)
398	    peacc->pAccess->AccessDisable(peacc->pAccess->arg);
399	/* and the fallback function */
400	if (peacc->fallback && peacc->fallback->AccessDisable)
401	    peacc->fallback->AccessDisable(peacc->fallback->arg);
402	/* shared resources are only needed when entity is active: remove */
403	xf86DeallocateResourcesForEntity(entityIndex, ResShared);
404    }
405    xfree(pScrn->entityList);
406    xfree(pScrn->entityInstanceList);
407    if (pScrn->CurrentAccess->pIoAccess == (EntityAccessPtr)pScrn->access)
408	pScrn->CurrentAccess->pIoAccess = NULL;
409    if (pScrn->CurrentAccess->pMemAccess == (EntityAccessPtr)pScrn->access)
410	pScrn->CurrentAccess->pMemAccess = NULL;
411    pScrn->entityList = NULL;
412    pScrn->entityInstanceList = NULL;
413}
414
415_X_EXPORT void
416xf86DeallocateResourcesForEntity(int entityIndex, unsigned long type)
417{
418    resPtr *pprev_next = &Acc;
419    resPtr res = Acc;
420
421    while (res) {
422	if (res->entityIndex == entityIndex &&
423	    (type & ResAccMask & res->res_type))
424	{
425	    (*pprev_next) = res->next;
426	    xfree(res);
427	} else
428	    pprev_next = &(res->next);
429	res = (*pprev_next);
430    }
431}
432
433/*
434 * Add an extra device section (GDevPtr) to an entity.
435 */
436
437void
438xf86AddDevToEntity(int entityIndex, GDevPtr dev)
439{
440    EntityPtr pEnt;
441
442    if (entityIndex >= xf86NumEntities)
443	return;
444
445    pEnt = xf86Entities[entityIndex];
446    pEnt->numInstances++;
447    pEnt->devices = xnfrealloc(pEnt->devices,
448				pEnt->numInstances * sizeof(GDevPtr));
449    pEnt->devices[pEnt->numInstances - 1] = dev;
450    dev->claimed = TRUE;
451}
452
453/*
454 * xf86GetEntityInfo() -- This function hands information from the
455 * EntityRec struct to the drivers. The EntityRec structure itself
456 * remains invisible to the driver.
457 */
458_X_EXPORT EntityInfoPtr
459xf86GetEntityInfo(int entityIndex)
460{
461    EntityInfoPtr pEnt;
462    int i;
463
464    if (entityIndex >= xf86NumEntities)
465	return NULL;
466
467    pEnt = xnfcalloc(1,sizeof(EntityInfoRec));
468    pEnt->index = entityIndex;
469    pEnt->location = xf86Entities[entityIndex]->bus;
470    pEnt->active = xf86Entities[entityIndex]->active;
471    pEnt->chipset = xf86Entities[entityIndex]->chipset;
472    pEnt->resources = xf86Entities[entityIndex]->resources;
473    pEnt->driver = xf86Entities[entityIndex]->driver;
474    if ( (xf86Entities[entityIndex]->devices) &&
475         (xf86Entities[entityIndex]->devices[0]) ) {
476	for (i = 0; i < xf86Entities[entityIndex]->numInstances; i++)
477	    if (xf86Entities[entityIndex]->devices[i]->screen == 0)
478	        break;
479	pEnt->device = xf86Entities[entityIndex]->devices[i];
480    } else
481	pEnt->device = NULL;
482
483    return pEnt;
484}
485
486_X_EXPORT int
487xf86GetNumEntityInstances(int entityIndex)
488{
489    if (entityIndex >= xf86NumEntities)
490	return -1;
491
492    return xf86Entities[entityIndex]->numInstances;
493}
494
495_X_EXPORT GDevPtr
496xf86GetDevFromEntity(int entityIndex, int instance)
497{
498    int i;
499
500    /* We might not use AddDevtoEntity */
501    if ( (!xf86Entities[entityIndex]->devices) ||
502         (!xf86Entities[entityIndex]->devices[0]) )
503	return NULL;
504
505    if (entityIndex >= xf86NumEntities ||
506	instance >= xf86Entities[entityIndex]->numInstances)
507	return NULL;
508
509    for (i = 0; i < xf86Entities[entityIndex]->numInstances; i++)
510	if (xf86Entities[entityIndex]->devices[i]->screen == instance)
511	    break;
512    return xf86Entities[entityIndex]->devices[i];
513}
514
515/*
516 * general generic disable function.
517 */
518static void
519disableAccess(void)
520{
521    int i;
522    xf86AccessPtr pacc;
523    EntityAccessPtr peacc;
524
525    /* call disable funcs and reset current access pointer */
526    /* the entity specific access funcs are in an enabled  */
527    /* state - driver must restore their state explicitely */
528    for (i = 0; i < xf86NumScreens; i++) {
529	peacc = xf86Screens[i]->CurrentAccess->pIoAccess;
530	while (peacc) {
531	    if (peacc->pAccess && peacc->pAccess->AccessDisable)
532		peacc->pAccess->AccessDisable(peacc->pAccess->arg);
533	    peacc = peacc->next;
534	}
535	xf86Screens[i]->CurrentAccess->pIoAccess = NULL;
536	peacc = xf86Screens[i]->CurrentAccess->pMemAccess;
537	while (peacc) {
538	    if (peacc->pAccess && peacc->pAccess->AccessDisable)
539		peacc->pAccess->AccessDisable(peacc->pAccess->arg);
540	    peacc = peacc->next;
541	}
542	xf86Screens[i]->CurrentAccess->pMemAccess = NULL;
543    }
544    /* then call the generic entity disable funcs */
545    for (i = 0; i < xf86NumEntities; i++) {
546	pacc = xf86Entities[i]->access->fallback;
547	if (pacc->AccessDisable)
548	    pacc->AccessDisable(pacc->arg);
549    }
550}
551
552static void
553clearAccess(void)
554{
555    int i;
556
557    /* call disable funcs and reset current access pointer */
558    /* the entity specific access funcs are in an enabled  */
559    /* state - driver must restore their state explicitely */
560    for (i = 0; i < xf86NumScreens; i++) {
561	xf86Screens[i]->CurrentAccess->pIoAccess = NULL;
562	xf86Screens[i]->CurrentAccess->pMemAccess = NULL;
563    }
564
565}
566
567/*
568 * Generic interface to bus specific code - add other buses here
569 */
570
571/*
572 * xf86AccessInit() - set up everything needed for access control
573 * called only once on first server generation.
574 */
575void
576xf86AccessInit(void)
577{
578    initPciState();
579    initPciBusState();
580    DisablePciBusAccess();
581    DisablePciAccess();
582
583    xf86ResAccessEnter = TRUE;
584}
585
586/*
587 * xf86AccessEnter() -- gets called to save the text mode VGA IO
588 * resources when reentering the server after a VT switch.
589 */
590void
591xf86AccessEnter(void)
592{
593    if (xf86ResAccessEnter)
594	return;
595
596    /*
597     * on enter we simply disable routing of special resources
598     * to any bus and let the RAC code to "open" the right bridges.
599     */
600    PciBusStateEnter();
601    DisablePciBusAccess();
602    PciStateEnter();
603    disableAccess();
604    EntityEnter();
605    notifyStateChange(NOTIFY_ENTER);
606    xf86EnterServerState(SETUP);
607    xf86ResAccessEnter = TRUE;
608}
609
610/*
611 * xf86AccessLeave() -- prepares access for and calls the
612 * entityLeave() functions.
613 * xf86AccessLeaveState() --- gets called to restore the
614 * access to the VGA IO resources when switching VT or on
615 * server exit.
616 * This was split to call xf86AccessLeaveState() from
617 * ddxGiveUp().
618 */
619void
620xf86AccessLeave(void)
621{
622    if (!xf86ResAccessEnter)
623	return;
624    notifyStateChange(NOTIFY_LEAVE);
625    disableAccess();
626    DisablePciBusAccess();
627    EntityLeave();
628}
629
630void
631xf86AccessLeaveState(void)
632{
633    if (!xf86ResAccessEnter)
634	return;
635    xf86ResAccessEnter = FALSE;
636    PciStateLeave();
637    PciBusStateLeave();
638}
639
640/*
641 * xf86AccessRestoreState() - Restore the access registers to the
642 * state before X was started. This is handy for framebuffers.
643 */
644static void
645xf86AccessRestoreState(void)
646{
647    if (!xf86ResAccessEnter)
648	return;
649    PciStateLeave();
650    PciBusStateLeave();
651}
652
653/*
654 * xf86EnableAccess() -- enable access to controlled resources.
655 * To reduce latency when switching access the ScrnInfoRec has
656 * a linked list of the EntityAccPtr of all screen entities.
657 */
658/*
659 * switching access needs to be done in te following oder:
660 * disable
661 * 1. disable old entity
662 * 2. reroute bus
663 * 3. enable new entity
664 * Otherwise resources needed for access control might be shadowed
665 * by other resources!
666 */
667
668_X_EXPORT void
669xf86EnableAccess(ScrnInfoPtr pScrn)
670{
671    register EntityAccessPtr peAcc = (EntityAccessPtr) pScrn->access;
672    register EntityAccessPtr pceAcc;
673    register xf86AccessPtr pAcc;
674    EntityAccessPtr tmp;
675
676#ifdef DEBUG
677    ErrorF("Enable access %i\n",pScrn->scrnIndex);
678#endif
679
680    /* Entity is not under access control or currently enabled */
681    if (!pScrn->access) {
682	if (pScrn->busAccess) {
683	    ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
684	}
685	return;
686    }
687
688    switch (pScrn->resourceType) {
689    case IO:
690	pceAcc = pScrn->CurrentAccess->pIoAccess;
691	if (peAcc == pceAcc) {
692	    return;
693	}
694	if (pScrn->CurrentAccess->pMemAccess == pceAcc)
695	    pScrn->CurrentAccess->pMemAccess = NULL;
696	while (pceAcc) {
697	    pAcc = pceAcc->pAccess;
698	    if ( pAcc && pAcc->AccessDisable)
699		(*pAcc->AccessDisable)(pAcc->arg);
700	    pceAcc = pceAcc->next;
701	}
702	if (pScrn->busAccess)
703	    ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
704	while (peAcc) {
705	    pAcc = peAcc->pAccess;
706	    if (pAcc && pAcc->AccessEnable)
707		(*pAcc->AccessEnable)(pAcc->arg);
708	    peAcc = peAcc->next;
709	}
710	pScrn->CurrentAccess->pIoAccess = (EntityAccessPtr) pScrn->access;
711	return;
712
713    case MEM_IO:
714	pceAcc = pScrn->CurrentAccess->pIoAccess;
715	if (peAcc != pceAcc) { /* current Io != pAccess */
716	    tmp = pceAcc;
717	    while (pceAcc) {
718		pAcc = pceAcc->pAccess;
719		if (pAcc && pAcc->AccessDisable)
720		    (*pAcc->AccessDisable)(pAcc->arg);
721		pceAcc = pceAcc->next;
722	    }
723	    pceAcc = pScrn->CurrentAccess->pMemAccess;
724	    if (peAcc != pceAcc /* current Mem != pAccess */
725		&& tmp !=pceAcc) {
726		while (pceAcc) {
727		    pAcc = pceAcc->pAccess;
728		    if (pAcc && pAcc->AccessDisable)
729			(*pAcc->AccessDisable)(pAcc->arg);
730		    pceAcc = pceAcc->next;
731		}
732	    }
733	} else {    /* current Io == pAccess */
734	    pceAcc = pScrn->CurrentAccess->pMemAccess;
735	    if (pceAcc == peAcc) { /* current Mem == pAccess */
736		return;
737	    }
738	    while (pceAcc) {  /* current Mem != pAccess */
739		pAcc = pceAcc->pAccess;
740		if (pAcc && pAcc->AccessDisable)
741		    (*pAcc->AccessDisable)(pAcc->arg);
742		pceAcc = pceAcc->next;
743	    }
744	}
745	if (pScrn->busAccess)
746	    ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
747	while (peAcc) {
748	    pAcc = peAcc->pAccess;
749	    if (pAcc && pAcc->AccessEnable)
750		(*pAcc->AccessEnable)(pAcc->arg);
751		peAcc = peAcc->next;
752	}
753	pScrn->CurrentAccess->pMemAccess =
754	    pScrn->CurrentAccess->pIoAccess = (EntityAccessPtr) pScrn->access;
755	return;
756
757    case MEM:
758	pceAcc = pScrn->CurrentAccess->pMemAccess;
759	if (peAcc == pceAcc) {
760	    return;
761	}
762	if (pScrn->CurrentAccess->pIoAccess == pceAcc)
763	    pScrn->CurrentAccess->pIoAccess = NULL;
764	while (pceAcc) {
765	    pAcc = pceAcc->pAccess;
766	    if ( pAcc && pAcc->AccessDisable)
767		(*pAcc->AccessDisable)(pAcc->arg);
768	    pceAcc = pceAcc->next;
769	}
770	if (pScrn->busAccess)
771	    ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
772	while (peAcc) {
773	    pAcc = peAcc->pAccess;
774	    if (pAcc && pAcc->AccessEnable)
775		(*pAcc->AccessEnable)(pAcc->arg);
776	    peAcc = peAcc->next;
777	}
778	pScrn->CurrentAccess->pMemAccess = (EntityAccessPtr) pScrn->access;
779	return;
780
781    case NONE:
782	if (pScrn->busAccess) {
783	    ((BusAccPtr)pScrn->busAccess)->set_f(pScrn->busAccess);
784	}
785	return;
786    }
787}
788
789_X_EXPORT void
790xf86SetCurrentAccess(Bool Enable, ScrnInfoPtr pScrn)
791{
792    EntityAccessPtr pceAcc2 = NULL;
793    register EntityAccessPtr pceAcc = NULL;
794    register xf86AccessPtr pAcc;
795
796
797    switch(pScrn->resourceType) {
798    case IO:
799	pceAcc = pScrn->CurrentAccess->pIoAccess;
800	break;
801    case MEM:
802	pceAcc = pScrn->CurrentAccess->pMemAccess;
803	break;
804    case MEM_IO:
805	pceAcc = pScrn->CurrentAccess->pMemAccess;
806	pceAcc2 = pScrn->CurrentAccess->pIoAccess;
807	break;
808    default:
809	break;
810    }
811
812    while (pceAcc) {
813	pAcc = pceAcc->pAccess;
814	if ( pAcc) {
815	    if (!Enable) {
816		if (pAcc->AccessDisable)
817		    (*pAcc->AccessDisable)(pAcc->arg);
818	    } else {
819		if (pAcc->AccessEnable)
820		    (*pAcc->AccessEnable)(pAcc->arg);
821	    }
822	}
823	pceAcc = pceAcc->next;
824	if (!pceAcc) {
825	    pceAcc = pceAcc2;
826	    pceAcc2 = NULL;
827	}
828    }
829}
830
831_X_EXPORT void
832xf86SetAccessFuncs(EntityInfoPtr pEnt, xf86SetAccessFuncPtr funcs,
833		   xf86SetAccessFuncPtr oldFuncs)
834{
835    AccessFuncPtr rac;
836
837    if (!xf86Entities[pEnt->index]->rac)
838	xf86Entities[pEnt->index]->rac = xnfcalloc(1,sizeof(AccessFuncRec));
839
840    rac = xf86Entities[pEnt->index]->rac;
841
842    if (funcs->mem == funcs->io_mem && funcs->mem && funcs->io)
843	xf86Entities[pEnt->index]->entityProp |= NO_SEPARATE_MEM_FROM_IO;
844    if (funcs->io == funcs->io_mem && funcs->mem && funcs->io)
845	xf86Entities[pEnt->index]->entityProp |= NO_SEPARATE_IO_FROM_MEM;
846
847    rac->mem_new = funcs->mem;
848    rac->io_new = funcs->io;
849    rac->io_mem_new = funcs->io_mem;
850
851    rac->old = oldFuncs;
852}
853
854/*
855 * Conflict checking
856 */
857
858static memType
859getMask(memType val)
860{
861    memType mask = 0;
862    memType tmp = 0;
863
864    mask=~mask;
865    tmp = ~((~tmp) >> 1);
866
867    while (!(val & tmp)) {
868	mask = mask >> 1;
869	val = val << 1;
870    }
871    return mask;
872}
873
874/*
875 * checkConflictBlock() -- check for conflicts of a block resource range.
876 * If conflict is found return end of conflicting range. Else return 0.
877 */
878static memType
879checkConflictBlock(resRange *range, resPtr pRes)
880{
881    memType val,tmp,prev;
882    int i;
883
884    switch (pRes->res_type & ResExtMask) {
885    case ResBlock:
886	if (range->rBegin < pRes->block_end &&
887	    range->rEnd > pRes->block_begin) {
888#ifdef DEBUG
889	    ErrorF("b-b conflict w: %lx %lx\n",
890		   pRes->block_begin,pRes->block_end);
891#endif
892	    return pRes->block_end < range->rEnd ?
893		pRes->block_end : range->rEnd;
894	}
895	return 0;
896    case ResSparse:
897	if (pRes->sparse_base > range->rEnd) return 0;
898
899	val = (~pRes->sparse_mask | pRes->sparse_base) & getMask(range->rEnd);
900#ifdef DEBUG
901	ErrorF("base = 0x%lx, mask = 0x%lx, begin = 0x%lx, end = 0x%lx ,"
902	       "val = 0x%lx\n",
903		pRes->sparse_base, pRes->sparse_mask, range->rBegin,
904		range->rEnd, val);
905#endif
906	i = sizeof(memType) * 8;
907	tmp = prev = pRes->sparse_base;
908
909	while (i) {
910	    tmp |= 1<< (--i) & val;
911	    if (tmp > range->rEnd)
912		tmp = prev;
913	    else
914		prev = tmp;
915	}
916	if (tmp >= range->rBegin) {
917#ifdef DEBUG
918	    ErrorF("conflict found at: 0x%lx\n",tmp);
919	    ErrorF("b-d conflict w: %lx %lx\n",
920		   pRes->sparse_base,pRes->sparse_mask);
921#endif
922	    return tmp;
923	}
924	else
925	    return 0;
926    }
927    return 0;
928}
929
930/*
931 * checkConflictSparse() -- check for conflicts of a sparse resource range.
932 * If conflict is found return base of conflicting region. Else return 0.
933 */
934#define mt_max ~(memType)0
935#define length sizeof(memType) * 8
936static memType
937checkConflictSparse(resRange *range, resPtr pRes)
938{
939    memType val, tmp, prev;
940    int i;
941
942    switch (pRes->res_type & ResExtMask) {
943    case ResSparse:
944	tmp = pRes->sparse_mask & range->rMask;
945	if ((tmp & pRes->sparse_base) == (tmp & range->rBase)) {
946#ifdef DEBUG
947	    ErrorF("s-b conflict w: %lx %lx\n",
948		   pRes->sparse_base,pRes->sparse_mask);
949#endif
950	    return pRes->sparse_mask;
951	}
952	return 0;
953
954    case ResBlock:
955	if (pRes->block_end < range->rBase) return 0;
956
957	val = (~range->rMask | range->rBase) & getMask(pRes->block_end);
958	i = length;
959	tmp = prev = range->rBase;
960
961	while (i) {
962#ifdef DEBUG
963	    ErrorF("tmp = 0x%lx\n",tmp);
964#endif
965	    tmp |= 1<< (--i) & val;
966	    if (tmp > pRes->block_end)
967		tmp = prev;
968	    else
969		prev = tmp;
970	}
971	if (tmp < pRes->block_begin)
972	    return 0;
973	else {
974	    /*
975	     * now we subdivide the block region in sparse regions
976	     * with base values = 2^n and find the smallest mask.
977	     * This might be done in a simpler way....
978	     */
979	    memType mask, m_mask = 0, base = pRes->block_begin;
980	    int i;
981	    while (base < pRes->block_end) {
982		for (i = 1; i < length; i++)
983		    if ( base != (base & (mt_max << i))) break;
984		mask = mt_max >> (length - i);
985		do mask >>= 1;
986		while ((mask + base + 1) > pRes->block_end);
987		/* m_mask and are _inverted_ sparse masks */
988		m_mask = mask > m_mask ? mask : m_mask;
989		base = base + mask + 1;
990	    }
991#ifdef DEBUG
992	    ErrorF("conflict found at: 0x%lx\n",tmp);
993	    ErrorF("b-b conflict w: %lx %lx\n",
994		   pRes->block_begin,pRes->block_end);
995#endif
996	    return ~m_mask;
997	}
998    }
999    return 0;
1000}
1001#undef mt_max
1002#undef length
1003
1004/*
1005 * needCheck() -- this function decides whether to check for conflicts
1006 * depending on the types of the resource ranges and their locations
1007 */
1008static Bool
1009needCheck(resPtr pRes, unsigned long type, int entityIndex, xf86State state)
1010{
1011    /* the same entity shouldn't conflict with itself */
1012    ScrnInfoPtr pScrn;
1013    int i;
1014    BusType loc = BUS_NONE;
1015    BusType r_loc = BUS_NONE;
1016
1017    /* Ignore overlapped ranges that have been nullified */
1018    if ((pRes->res_type & ResOverlap) && (pRes->block_begin > pRes->block_end))
1019	return FALSE;
1020
1021    if ((pRes->res_type & ResTypeMask) != (type & ResTypeMask))
1022        return FALSE;
1023
1024    /*
1025     * Resources set by BIOS (ResBios) are allowed to conflict
1026     * with resources marked (ResBios).
1027     */
1028    if (pRes->res_type & type & ResBios)
1029	return FALSE;
1030
1031    /*If requested, skip over estimated resources */
1032    if (pRes->res_type & type & ResEstimated)
1033 	return FALSE;
1034
1035    if (type & pRes->res_type & ResUnused)
1036 	return FALSE;
1037
1038    if (state == OPERATING) {
1039	if (type & ResDisableOpr || pRes->res_type & ResDisableOpr)
1040	    return FALSE;
1041	if (type & pRes->res_type & ResUnusedOpr) return FALSE;
1042	/*
1043	 * Maybe we should have ResUnused set The resUnusedOpr
1044	 * bit, too. This way we could avoid this confusion
1045	 */
1046	if ((type & ResUnusedOpr && pRes->res_type & ResUnused) ||
1047	    (type & ResUnused && pRes->res_type & ResUnusedOpr))
1048	    return FALSE;
1049    }
1050
1051    if (entityIndex > -1)
1052	loc = xf86Entities[entityIndex]->busType;
1053    if (pRes->entityIndex > -1)
1054	r_loc = xf86Entities[pRes->entityIndex]->busType;
1055
1056    switch (type & ResAccMask) {
1057    case ResExclusive:
1058	switch (pRes->res_type & ResAccMask) {
1059	case ResExclusive:
1060	    break;
1061	case ResShared:
1062	    /* ISA buses are only locally exclusive on a PCI system */
1063	    if (loc == BUS_ISA && r_loc == BUS_PCI)
1064		return FALSE;
1065	    break;
1066	}
1067	break;
1068    case ResShared:
1069	switch (pRes->res_type & ResAccMask) {
1070	case ResExclusive:
1071	    /* ISA buses are only locally exclusive on a PCI system */
1072	    if (loc == BUS_PCI && r_loc == BUS_ISA)
1073		return FALSE;
1074	    break;
1075	case ResShared:
1076	    return FALSE;
1077	}
1078	break;
1079    case ResAny:
1080	break;
1081    }
1082
1083    if (pRes->entityIndex == entityIndex) return FALSE;
1084
1085    if (pRes->entityIndex > -1 &&
1086	(pScrn = xf86FindScreenForEntity(entityIndex))) {
1087	for (i = 0; i < pScrn->numEntities; i++)
1088	    if (pScrn->entityList[i] == pRes->entityIndex) return FALSE;
1089    }
1090    return TRUE;
1091}
1092
1093/*
1094 * checkConflict() - main conflict checking function which all other
1095 * function call.
1096 */
1097static memType
1098checkConflict(resRange *rgp, resPtr pRes, int entityIndex,
1099	      xf86State state, Bool ignoreIdentical)
1100{
1101    memType ret;
1102
1103    while(pRes) {
1104	if (!needCheck(pRes,rgp->type, entityIndex ,state)) {
1105	    pRes = pRes->next;
1106	    continue;
1107	}
1108	switch (rgp->type & ResExtMask) {
1109	case ResBlock:
1110	    if (rgp->rEnd < rgp->rBegin) {
1111		xf86Msg(X_ERROR,"end of block range 0x%lx < begin 0x%lx\n",
1112			rgp->rEnd,rgp->rBegin);
1113		return 0;
1114	    }
1115	    if ((ret = checkConflictBlock(rgp, pRes))) {
1116		if (!ignoreIdentical || (rgp->rBegin != pRes->block_begin)
1117		    || (rgp->rEnd != pRes->block_end))
1118		    return ret;
1119	    }
1120    break;
1121	case ResSparse:
1122	    if ((rgp->rBase & rgp->rMask) != rgp->rBase) {
1123		xf86Msg(X_ERROR,"sparse io range (base: 0x%lx  mask: 0x%lx)"
1124			"doesn't satisfy (base & mask = mask)\n",
1125			rgp->rBase, rgp->rMask);
1126		return 0;
1127	    }
1128	    if ((ret = checkConflictSparse(rgp, pRes))) {
1129		if (!ignoreIdentical || (rgp->rBase != pRes->sparse_base)
1130		    || (rgp->rMask != pRes->sparse_mask))
1131		    return ret;
1132	    }
1133	    break;
1134	}
1135	pRes = pRes->next;
1136    }
1137    return 0;
1138}
1139
1140/*
1141 * ChkConflict() -- used within xxxBus ; find conflict with any location.
1142 */
1143memType
1144ChkConflict(resRange *rgp, resPtr res, xf86State state)
1145{
1146    return checkConflict(rgp, res, -2, state,FALSE);
1147}
1148
1149/*
1150 * xf86ChkConflict() - This function is the low level interface to
1151 * the resource broker that gets exported. Tests all resources ie.
1152 * performs test with SETUP flag.
1153 */
1154_X_EXPORT memType
1155xf86ChkConflict(resRange *rgp, int entityIndex)
1156{
1157    return checkConflict(rgp, Acc, entityIndex, SETUP,FALSE);
1158}
1159
1160/*
1161 * Resources List handling
1162 */
1163
1164_X_EXPORT resPtr
1165xf86JoinResLists(resPtr rlist1, resPtr rlist2)
1166{
1167    resPtr pRes;
1168
1169    if (!rlist1)
1170	return rlist2;
1171
1172    if (!rlist2)
1173	return rlist1;
1174
1175    for (pRes = rlist1; pRes->next; pRes = pRes->next)
1176	;
1177    pRes->next = rlist2;
1178    return rlist1;
1179}
1180
1181_X_EXPORT resPtr
1182xf86AddResToList(resPtr rlist, resRange *range, int entityIndex)
1183{
1184    resPtr new;
1185
1186    switch (range->type & ResExtMask) {
1187    case ResBlock:
1188	if (range->rEnd < range->rBegin) {
1189		xf86Msg(X_ERROR,"end of block range 0x%lx < begin 0x%lx\n",
1190			range->rEnd,range->rBegin);
1191		return rlist;
1192	}
1193	break;
1194    case ResSparse:
1195	if ((range->rBase & range->rMask) != range->rBase) {
1196	    xf86Msg(X_ERROR,"sparse io range (base: 0x%lx  mask: 0x%lx)"
1197		    "doesn't satisfy (base & mask = mask)\n",
1198		    range->rBase, range->rMask);
1199	    return rlist;
1200	}
1201	break;
1202    }
1203
1204    new = xnfalloc(sizeof(resRec));
1205    /*
1206     * Only background resources may be registered with ResBios
1207     * and ResEstimated set. Other resources only set it for
1208     * testing.
1209     */
1210    if (entityIndex != (-1))
1211        range->type &= ~(ResBios | ResEstimated);
1212    new->val = *range;
1213    new->entityIndex = entityIndex;
1214    new->next = rlist;
1215    return new;
1216}
1217
1218_X_EXPORT void
1219xf86FreeResList(resPtr rlist)
1220{
1221    resPtr pRes;
1222
1223    if (!rlist)
1224	return;
1225
1226    for (pRes = rlist->next; pRes; rlist = pRes, pRes = pRes->next)
1227	xfree(rlist);
1228    xfree(rlist);
1229}
1230
1231_X_EXPORT resPtr
1232xf86DupResList(const resPtr rlist)
1233{
1234    resPtr pRes, ret, prev, new;
1235
1236    if (!rlist)
1237	return NULL;
1238
1239    ret = xnfalloc(sizeof(resRec));
1240    *ret = *rlist;
1241    prev = ret;
1242    for (pRes = rlist->next; pRes; pRes = pRes->next) {
1243	new = xnfalloc(sizeof(resRec));
1244	*new = *pRes;
1245	prev->next = new;
1246	prev = new;
1247    }
1248    return ret;
1249}
1250
1251_X_EXPORT void
1252xf86PrintResList(int verb, resPtr list)
1253{
1254    int i = 0;
1255    const char *s, *r;
1256    resPtr tmp = list;
1257    unsigned long type;
1258
1259    if (!list)
1260	return;
1261
1262    type = ResMem;
1263    r = "M";
1264    while (1) {
1265	while (list) {
1266	    if ((list->res_type & ResPhysMask) == type) {
1267		switch (list->res_type & ResExtMask) {
1268		case ResBlock:
1269		    xf86ErrorFVerb(verb,
1270				   "\t[%d] %d\t%ld\t0x%08lx - 0x%08lx (0x%lx)",
1271				   i, list->entityIndex,
1272				   (list->res_type & ResDomain) >> 24,
1273				   list->block_begin, list->block_end,
1274				   list->block_end - list->block_begin + 1);
1275		    break;
1276		case ResSparse:
1277		    xf86ErrorFVerb(verb, "\t[%d] %d\t%ld\t0x%08lx - 0x%08lx ",
1278				   i, list->entityIndex,
1279				   (list->res_type & ResDomain) >> 24,
1280				   list->sparse_base,list->sparse_mask);
1281		    break;
1282		default:
1283		    list = list->next;
1284		    continue;
1285		}
1286		xf86ErrorFVerb(verb, " %s", r);
1287		switch (list->res_type & ResAccMask) {
1288		case ResExclusive:
1289		    if (list->res_type & ResUnused)
1290			s = "x";
1291		    else
1292			s = "X";
1293		    break;
1294		case ResShared:
1295		    if (list->res_type & ResUnused)
1296			s = "s";
1297		    else
1298			s = "S";
1299		    break;
1300		default:
1301		    s = "?";
1302		}
1303		xf86ErrorFVerb(verb, "%s", s);
1304		switch (list->res_type & ResExtMask) {
1305		case ResBlock:
1306		    s = "[B]";
1307		    break;
1308		case ResSparse:
1309		    s = "[S]";
1310		    break;
1311		default:
1312		    s = "[?]";
1313		}
1314		xf86ErrorFVerb(verb, "%s", s);
1315		if (list->res_type & ResEstimated)
1316		    xf86ErrorFVerb(verb, "E");
1317		if (list->res_type & ResOverlap)
1318		    xf86ErrorFVerb(verb, "O");
1319		if (list->res_type & ResInit)
1320		    xf86ErrorFVerb(verb, "t");
1321		if (list->res_type & ResBios)
1322		    xf86ErrorFVerb(verb, "(B)");
1323		if (list->res_type & ResBus)
1324		    xf86ErrorFVerb(verb, "(b)");
1325		if (list->res_type & ResOprMask) {
1326		    switch (list->res_type & ResOprMask) {
1327		    case ResUnusedOpr:
1328			s = "(OprU)";
1329			break;
1330		    case ResDisableOpr:
1331			s = "(OprD)";
1332			break;
1333		    default:
1334			s = "(Opr?)";
1335			break;
1336		    }
1337		    xf86ErrorFVerb(verb, "%s", s);
1338		}
1339		xf86ErrorFVerb(verb, "\n");
1340		i++;
1341	    }
1342	    list = list->next;
1343	}
1344	if (type == ResIo) break;
1345	type = ResIo;
1346	r = "I";
1347	list = tmp;
1348    }
1349}
1350
1351resPtr
1352xf86AddRangesToList(resPtr list, resRange *pRange, int entityIndex)
1353{
1354    while(pRange && pRange->type != ResEnd) {
1355	list = xf86AddResToList(list,pRange,entityIndex);
1356	pRange++;
1357    }
1358    return list;
1359}
1360
1361void
1362xf86ResourceBrokerInit(void)
1363{
1364    resPtr resPci;
1365
1366    osRes = NULL;
1367
1368    /* Get the addressable ranges */
1369    ResRange = xf86BusAccWindowsFromOS();
1370    xf86MsgVerb(X_INFO, 3, "Addressable bus resource ranges are\n");
1371    xf86PrintResList(3, ResRange);
1372
1373    /* Get the ranges used exclusively by the system */
1374    osRes = xf86AccResFromOS(osRes);
1375    xf86MsgVerb(X_INFO, 3, "OS-reported resource ranges:\n");
1376    xf86PrintResList(3, osRes);
1377
1378    /* Bus dep initialization */
1379    resPci = ResourceBrokerInitPci(&osRes);
1380    Acc = xf86JoinResLists(xf86DupResList(osRes), resPci);
1381
1382    xf86MsgVerb(X_INFO, 3, "All system resource ranges:\n");
1383    xf86PrintResList(3, Acc);
1384
1385}
1386
1387#define MEM_ALIGN (1024 * 1024)
1388
1389/*
1390 * RemoveOverlaps() -- remove overlaps between resources of the
1391 * same kind.
1392 * Beware: This function doesn't check for access attributes.
1393 * At resource broker initialization this is no problem as this
1394 * only deals with exclusive resources.
1395 */
1396#if 0
1397void
1398RemoveOverlaps(resPtr target, resPtr list, Bool pow2Alignment, Bool useEstimated)
1399{
1400    resPtr pRes;
1401    memType size, newsize, adjust;
1402
1403    if (!target)
1404	return;
1405
1406    for (pRes = list; pRes; pRes = pRes->next) {
1407	if (pRes != target
1408	    && ((pRes->res_type & ResTypeMask) ==
1409		(target->res_type & ResTypeMask))
1410	    && pRes->block_begin <= target->block_end
1411	    && pRes->block_end >= target->block_begin) {
1412	    /* Possibly ignore estimated resources */
1413	    if (!useEstimated && (pRes->res_type & ResEstimated)) continue;
1414	    /*
1415	     * Target should be a larger region than pRes.  If pRes fully
1416	     * contains target, don't do anything unless target can overlap.
1417	     */
1418	    if (pRes->block_begin <= target->block_begin &&
1419		pRes->block_end >= target->block_end) {
1420		if (target->res_type & ResOverlap) {
1421		    /* Nullify range but keep its ResOverlap bit on */
1422		    target->block_end = target->block_begin - 1;
1423		    return;
1424		}
1425		continue;
1426	    }
1427	    /*
1428	     * In cases where the target and pRes have the same starting
1429	     * address, reduce the size of the target (given it's an estimate).
1430	     */
1431	    if (pRes->block_begin == target->block_begin) {
1432		if (target->res_type & ResOverlap)
1433		    target->block_end = target->block_begin - 1;
1434		else
1435		    target->block_end = pRes->block_end;
1436	    }
1437	    /* Otherwise, trim target to remove the overlap */
1438	    else if (pRes->block_begin <= target->block_end) {
1439		target->block_end = pRes->block_begin - 1;
1440	    } else if (!pow2Alignment &&
1441		       pRes->block_end >= target->block_begin) {
1442		target->block_begin = pRes->block_end + 1;
1443	    }
1444	    if (pow2Alignment) {
1445		/*
1446		 * Align to a power of two.  This requires finding the
1447		 * largest power of two that is smaller than the adjusted
1448		 * size.
1449		 */
1450		size = target->block_end - target->block_begin + 1;
1451		newsize = 1UL << (sizeof(memType) * 8 - 1);
1452		while (!(newsize & size))
1453		    newsize >>= 1;
1454		target->block_end = target->block_begin + newsize - 1;
1455	    } else if (target->block_end > MEM_ALIGN) {
1456		/* Align the end to MEM_ALIGN */
1457		if ((adjust = (target->block_end + 1) % MEM_ALIGN))
1458		    target->block_end -= adjust;
1459	    }
1460	}
1461    }
1462}
1463#else
1464
1465void
1466RemoveOverlaps(resPtr target, resPtr list, Bool pow2Alignment, Bool useEstimated)
1467{
1468    resPtr pRes;
1469    memType size, newsize, adjust;
1470
1471    if (!target)
1472	return;
1473
1474    if (!(target->res_type & ResEstimated)   /* Don't touch sure resources */
1475	&& !(target->res_type & ResOverlap)) /* Unless they may overlap    */
1476	return;
1477
1478    for (pRes = list; pRes; pRes = pRes->next) {
1479	if (pRes == target
1480	    || ((pRes->res_type & ResTypeMask) !=
1481		(target->res_type & ResTypeMask))
1482	    || pRes->block_begin > target->block_end
1483	    || pRes->block_end < target->block_begin)
1484	    continue;
1485
1486	if (pRes->block_begin <= target->block_begin) {
1487	    /* Possibly ignore estimated resources */
1488	    if (!useEstimated && (pRes->res_type & ResEstimated))
1489		continue;
1490
1491	    /* Special cases */
1492	    if (pRes->block_end >= target->block_end) {
1493		/*
1494		 * If pRes fully contains target, don't do anything
1495		 * unless target can overlap.
1496		 */
1497		if (target->res_type & ResOverlap) {
1498		    /* Nullify range but keep its ResOverlap bit on */
1499		    target->block_end = target->block_begin - 1;
1500		    return;
1501		} else
1502		    continue;
1503	    } else {
1504#if 0 /* Don't trim start address - we trust what we got */
1505		/*
1506		 * If !pow2Alignment trim start address: !pow2Alingment
1507		 * is only set when estimated OS addresses are handled.
1508		 * In cases where the target and pRes have the same
1509		 * starting address, reduce the size of the target
1510		 * (given it's an estimate).
1511		 */
1512		if (!pow2Alignment)
1513		    target->block_begin = pRes->block_end + 1;
1514		else
1515#endif
1516		if (pRes->block_begin == target->block_begin)
1517		    target->block_end = pRes->block_end;
1518		else
1519		    continue;
1520	    }
1521	} else {
1522	    /* Trim target to remove the overlap */
1523		target->block_end = pRes->block_begin - 1;
1524	}
1525	if (pow2Alignment) {
1526	    /*
1527	     * Align to a power of two.  This requires finding the
1528	     * largest power of two that is smaller than the adjusted
1529	     * size.
1530	     */
1531	    size = target->block_end - target->block_begin + 1;
1532	    newsize = 1UL << (sizeof(memType) * 8 - 1);
1533	    while (!(newsize & size))
1534		newsize >>= 1;
1535	    target->block_end = target->block_begin + newsize - 1;
1536	} else if (target->block_end > MEM_ALIGN) {
1537	    /* Align the end to MEM_ALIGN */
1538	    if ((adjust = (target->block_end + 1) % MEM_ALIGN))
1539		target->block_end -= adjust;
1540	}
1541    }
1542}
1543
1544#endif
1545
1546/*
1547 * Resource request code
1548 */
1549
1550#define ALIGN(x,a) ((x) + a) &~(a)
1551
1552_X_EXPORT resRange
1553xf86GetBlock(unsigned long type, memType size,
1554	 memType window_start, memType window_end,
1555	 memType align_mask, resPtr avoid)
1556{
1557    memType min, max, tmp;
1558    resRange r = {ResEnd,0,0};
1559    resPtr res_range = ResRange;
1560
1561    if (!size) return r;
1562    if (window_end < window_start || (window_end - window_start) < (size - 1)) {
1563	ErrorF("Requesting insufficient memory window!:"
1564	       " start: 0x%lx end: 0x%lx size 0x%lx\n",
1565	       window_start,window_end,size);
1566	return r;
1567    }
1568    type = (type & ~(ResExtMask | ResBios | ResEstimated)) | ResBlock;
1569
1570    while (res_range) {
1571	if ((type & ResTypeMask) == (res_range->res_type & ResTypeMask)) {
1572	    if (res_range->block_begin > window_start)
1573		min = res_range->block_begin;
1574	    else
1575		min = window_start;
1576	    if (res_range->block_end < window_end)
1577		max = res_range->block_end;
1578	    else
1579		max = window_end;
1580	    min = ALIGN(min,align_mask);
1581	    /* do not produce an overflow! */
1582	    while (min < max && (max - min) >= (size - 1)) {
1583		RANGE(r,min,min + size - 1,type);
1584		tmp = ChkConflict(&r,Acc,SETUP);
1585		if (!tmp) {
1586		    tmp = ChkConflict(&r,avoid,SETUP);
1587		    if (!tmp) {
1588			return r;
1589		    }
1590		}
1591		min = ALIGN(tmp,align_mask);
1592	    }
1593	}
1594	res_range = res_range->next;
1595    }
1596    RANGE(r,0,0,ResEnd);
1597    return r;
1598}
1599
1600#define mt_max ~(memType)0
1601#define length sizeof(memType) * 8
1602/*
1603 * make_base() -- assign the lowest bits to the bits set in mask.
1604 *                example: mask 011010 val 0000110 -> 011000
1605 */
1606static memType
1607make_base(memType val, memType mask)
1608{
1609    int i,j = 0;
1610    memType ret = 0
1611	;
1612    for (i = 0;i<length;i++) {
1613	if ((1 << i) & mask) {
1614	    ret |= (((val >> j) & 1) << i);
1615	    j++;
1616	}
1617    }
1618    return ret;
1619}
1620
1621/*
1622 * make_base() -- assign the bits set in mask to the lowest bits.
1623 *                example: mask 011010 , val 010010 -> 000011
1624 */
1625static memType
1626unmake_base(memType val, memType mask)
1627{
1628    int i,j = 0;
1629    memType ret = 0;
1630
1631    for (i = 0;i<length;i++) {
1632	if ((1 << i) & mask) {
1633	    ret |= (((val >> i) & 1) << j);
1634	    j++;
1635	}
1636    }
1637    return ret;
1638}
1639
1640static memType
1641fix_counter(memType val, memType old_mask, memType mask)
1642{
1643    mask = old_mask & mask;
1644
1645    val = make_base(val,old_mask);
1646    return unmake_base(val,mask);
1647}
1648
1649_X_EXPORT resRange
1650xf86GetSparse(unsigned long type,  memType fixed_bits,
1651	  memType decode_mask, memType address_mask, resPtr avoid)
1652{
1653    resRange r = {ResEnd,0,0};
1654    memType new_mask;
1655    memType mask1;
1656    memType base;
1657    memType counter = 0;
1658    memType counter1;
1659    memType max_counter = ~(memType)0;
1660    memType max_counter1;
1661    memType conflict = 0;
1662
1663    /* for sanity */
1664    type = (type & ~(ResExtMask | ResBios | ResEstimated)) | ResSparse;
1665
1666    /*
1667     * a sparse address consists of 3 parts:
1668     * fixed_bits:   F bits which hard decoded by the hardware
1669     * decode_bits:  D bits which are used to decode address
1670     *                 but which may be set by software
1671     * address_bits: A bits which are used to address the
1672     *                 sparse range.
1673     * the decode_mask marks all decode bits while the address_mask
1674     * masks out all address_bits:
1675     *                F D A
1676     * decode_mask:   0 1 0
1677     * address_mask:  1 1 0
1678     */
1679    decode_mask &= address_mask;
1680    new_mask = decode_mask;
1681
1682    /*
1683     * We start by setting the decode_mask bits to different values
1684     * when a conflict is found the address_mask of the conflicting
1685     * resource is returned. We remove those bits from decode_mask
1686     * that are also set in the returned address_mask as they always
1687     * conflict with resources which use them as address masks.
1688     * The resoulting mask is stored in new_mask.
1689     * We continue until no conflict is found or until we have
1690     * tried all possible settings of new_mask.
1691     */
1692    while (1) {
1693	base = make_base(counter,new_mask) | fixed_bits;
1694	RANGE(r,base,address_mask,type);
1695	conflict = ChkConflict(&r,Acc,SETUP);
1696	if (!conflict) {
1697	    conflict = ChkConflict(&r,avoid,SETUP);
1698	    if (!conflict) {
1699		return r;
1700	    }
1701	}
1702	counter = fix_counter(counter,new_mask,conflict);
1703	max_counter = fix_counter(max_counter,new_mask,conflict);
1704	new_mask &= conflict;
1705	counter ++;
1706	if (counter > max_counter) break;
1707    }
1708    if (!new_mask && (new_mask == decode_mask)) {
1709	RANGE(r,0,0,ResEnd);
1710	return r;
1711    }
1712    /*
1713     * if we haven't been successful we also try to modify those
1714     * bits in decode_mask that are not at the same time set in
1715     * new mask. These bits overlap with address_bits of some
1716     * resources. If a conflict with a resource of this kind is
1717     * found (ie. returned_mask & mask1 != mask1) with
1718     * mask1 = decode_mask & ~new_mask we cannot
1719     * use our choice of bits in the new_mask part. We try
1720     * another choice.
1721     */
1722    max_counter = fix_counter(mt_max,mt_max,new_mask);
1723    mask1 = decode_mask & ~new_mask;
1724    max_counter1 = fix_counter(mt_max,mt_max,mask1);
1725    counter = 0;
1726
1727    while (1) {
1728	counter1 = 0;
1729	while (1) {
1730	    base = make_base(counter1,mask1);
1731	    RANGE(r,base,address_mask,type);
1732	    conflict = ChkConflict(&r,Acc,SETUP);
1733	    if (!conflict) {
1734		conflict = ChkConflict(&r,avoid,SETUP);
1735		if (!conflict) {
1736		    return r;
1737		}
1738	    }
1739	    counter1 ++;
1740	    if ((mask1 & conflict) != mask1 || counter1 > max_counter1)
1741		break;
1742	}
1743	counter ++;
1744	if (counter > max_counter) break;
1745    }
1746    RANGE(r,0,0,ResEnd);
1747    return r;
1748}
1749
1750#undef length
1751#undef mt_max
1752
1753/*
1754 * Resource registrarion
1755 */
1756
1757static resList
1758xf86GetResourcesImplicitly(int entityIndex)
1759{
1760    if (entityIndex >= xf86NumEntities) return NULL;
1761
1762    switch (xf86Entities[entityIndex]->bus.type) {
1763    case BUS_ISA:
1764    case BUS_NONE:
1765    case BUS_SBUS:
1766	return NULL;
1767    case BUS_PCI:
1768	return GetImplicitPciResources(entityIndex);
1769    case BUS_last:
1770	return NULL;
1771    }
1772    return NULL;
1773}
1774
1775static void
1776convertRange2Host(int entityIndex, resRange *pRange)
1777{
1778    if (pRange->type & ResBus) {
1779	switch (xf86Entities[entityIndex]->busType) {
1780	case BUS_PCI:
1781	    pciConvertRange2Host(entityIndex,pRange);
1782	    break;
1783	case BUS_ISA:
1784	    isaConvertRange2Host(pRange);
1785	    break;
1786	default:
1787	    break;
1788	}
1789
1790	pRange->type &= ~ResBus;
1791    }
1792}
1793
1794static void
1795xf86ConvertListToHost(int entityIndex, resPtr list)
1796{
1797    while (list) {
1798	convertRange2Host(entityIndex, &list->val);
1799	list = list->next;
1800    }
1801}
1802
1803/*
1804 * xf86RegisterResources() -- attempts to register listed resources.
1805 * If list is NULL it tries to obtain resources implicitly. Function
1806 * returns a resPtr listing all resources not successfully registered.
1807 */
1808
1809_X_EXPORT resPtr
1810xf86RegisterResources(int entityIndex, resList list, unsigned long access)
1811{
1812    resPtr res = NULL;
1813    resRange range;
1814    resList list_f = NULL;
1815
1816    if (!list) {
1817	list = xf86GetResourcesImplicitly(entityIndex);
1818	/* these resources have to be in host address space already */
1819	if (!list) return NULL;
1820	list_f = list;
1821    }
1822
1823    while(list->type != ResEnd) {
1824	range = *list;
1825
1826	convertRange2Host(entityIndex,&range);
1827
1828	if ((access != ResNone) && (access & ResAccMask)) {
1829	    range.type = (range.type & ~ResAccMask) | (access & ResAccMask);
1830	}
1831 	range.type &= ~ResEstimated;	/* Not allowed for drivers */
1832#if !((defined(__alpha__) || (defined(__ia64__))) && defined(linux))
1833	/* On Alpha Linux, do not check for conflicts, trust the kernel. */
1834	if (checkConflict(&range, Acc, entityIndex, SETUP,TRUE))
1835	    res = xf86AddResToList(res,&range,entityIndex);
1836	else
1837#endif
1838	{
1839	    Acc = xf86AddResToList(Acc,&range,entityIndex);
1840	}
1841	list++;
1842    }
1843    if (list_f)
1844      xfree(list_f);
1845
1846#ifdef DEBUG
1847    xf86MsgVerb(X_INFO, 3,"Resources after driver initialization\n");
1848    xf86PrintResList(3, Acc);
1849    if (res) xf86MsgVerb(X_INFO, 3,
1850			 "Failed Resources after driver initialization "
1851			 "for Entity: %i\n",entityIndex);
1852    xf86PrintResList(3, res);
1853#endif
1854    return res;
1855
1856}
1857
1858static void
1859busTypeSpecific(EntityPtr pEnt, xf86State state, xf86AccessPtr *acc_mem,
1860		xf86AccessPtr *acc_io, xf86AccessPtr *acc_mem_io)
1861{
1862    pciAccPtr *ppaccp;
1863
1864    switch (pEnt->bus.type) {
1865    case BUS_ISA:
1866    case BUS_SBUS:
1867	    *acc_mem = *acc_io = *acc_mem_io = &AccessNULL;
1868	    break;
1869	break;
1870    case BUS_PCI:
1871	ppaccp = xf86PciAccInfo;
1872	while (*ppaccp) {
1873	    if ((*ppaccp)->busnum == pEnt->pciBusId.bus
1874		&& (*ppaccp)->devnum == pEnt->pciBusId.device
1875		&& (*ppaccp)->funcnum == pEnt->pciBusId.func) {
1876		*acc_io = &(*ppaccp)->ioAccess;
1877		*acc_mem = &(*ppaccp)->memAccess;
1878		*acc_mem_io = &(*ppaccp)->io_memAccess;
1879		break;
1880	    }
1881	    ppaccp++;
1882	}
1883	break;
1884    default:
1885	*acc_mem = *acc_io = *acc_mem_io = NULL;
1886	break;
1887    }
1888    return;
1889}
1890
1891static void
1892setAccess(EntityPtr pEnt, xf86State state)
1893{
1894
1895    xf86AccessPtr acc_mem, acc_io, acc_mem_io;
1896    xf86AccessPtr org_mem = NULL, org_io = NULL, org_mem_io = NULL;
1897    int prop;
1898
1899    busTypeSpecific(pEnt,state,&acc_mem,&acc_io,&acc_mem_io);
1900
1901    /* The replacement function needs to handle _all_ shared resources */
1902    /* unless they are handeled locally and disabled otherwise         */
1903    if (pEnt->rac) {
1904	if (pEnt->rac->io_new) {
1905	    org_io = acc_io;
1906	    acc_io = pEnt->rac->io_new;
1907	}
1908	if (pEnt->rac->mem_new) {
1909	    org_mem = acc_mem;
1910	    acc_mem = pEnt->rac->mem_new;
1911	}
1912	if (pEnt->rac->io_mem_new) {
1913	    org_mem_io = acc_mem_io;
1914	    acc_mem_io = pEnt->rac->io_mem_new;
1915	}
1916    }
1917
1918    if (state == OPERATING) {
1919	prop = pEnt->entityProp;
1920	switch(pEnt->entityProp & NEED_SHARED) {
1921	case NEED_SHARED:
1922	    pEnt->access->rt = MEM_IO;
1923	    break;
1924	case NEED_IO_SHARED:
1925	    pEnt->access->rt = IO;
1926	    break;
1927	case NEED_MEM_SHARED:
1928	    pEnt->access->rt = MEM;
1929	    break;
1930	default:
1931	    pEnt->access->rt = NONE;
1932	}
1933    } else {
1934	prop = NEED_SHARED | NEED_MEM | NEED_IO;
1935	pEnt->access->rt = MEM_IO;
1936    }
1937
1938    switch(pEnt->access->rt) {
1939    case IO:
1940	pEnt->access->pAccess = acc_io;
1941	break;
1942    case MEM:
1943	pEnt->access->pAccess = acc_mem;
1944	break;
1945    case MEM_IO:
1946	pEnt->access->pAccess = acc_mem_io;
1947	break;
1948    default: /* no conflicts at all */
1949	pEnt->access->pAccess =  NULL; /* remove from RAC */
1950	break;
1951    }
1952
1953    if (org_io) {
1954	/* does the driver want the old access func? */
1955	if (pEnt->rac->old) {
1956	    /* give it to the driver, leave state disabled */
1957	    pEnt->rac->old->io = org_io;
1958	} else if (org_io->AccessEnable) {
1959	    /* driver doesn't want it - enable generic access */
1960	    org_io->AccessEnable(org_io->arg);
1961	}
1962    }
1963
1964    if (org_mem_io) {
1965	/* does the driver want the old access func? */
1966	if (pEnt->rac->old) {
1967	    /* give it to the driver, leave state disabled */
1968	    pEnt->rac->old->io_mem = org_mem_io;
1969	} else if (org_mem_io->AccessEnable) {
1970	    /* driver doesn't want it - enable generic access */
1971	    org_mem_io->AccessEnable(org_mem_io->arg);
1972	}
1973    }
1974
1975    if (org_mem) {
1976	/* does the driver want the old access func? */
1977	if (pEnt->rac->old) {
1978	    /* give it to the driver, leave state disabled */
1979	    pEnt->rac->old->mem = org_mem;
1980	} else if (org_mem->AccessEnable) {
1981	    /* driver doesn't want it - enable generic access */
1982	    org_mem->AccessEnable(org_mem->arg);
1983	}
1984    }
1985
1986    if (!(prop & NEED_MEM_SHARED)){
1987	if (prop & NEED_MEM) {
1988	    if (acc_mem && acc_mem->AccessEnable)
1989		acc_mem->AccessEnable(acc_mem->arg);
1990	} else {
1991	    if (acc_mem && acc_mem->AccessDisable)
1992		acc_mem->AccessDisable(acc_mem->arg);
1993	}
1994    }
1995
1996    if (!(prop & NEED_IO_SHARED)) {
1997	if (prop & NEED_IO) {
1998	    if (acc_io && acc_io->AccessEnable)
1999	    acc_io->AccessEnable(acc_io->arg);
2000	} else {
2001	    if (acc_io && acc_io->AccessDisable)
2002		acc_io->AccessDisable(acc_io->arg);
2003	}
2004    }
2005
2006    /* disable shared resources */
2007    if (pEnt->access->pAccess
2008	&& pEnt->access->pAccess->AccessDisable)
2009	pEnt->access->pAccess->AccessDisable(pEnt->access->pAccess->arg);
2010
2011    /*
2012     * If device is not under access control it is enabled.
2013     * If it needs bus routing do it here as it isn't bus
2014     * type specific. Any conflicts should be checked at this
2015     * stage
2016     */
2017    if (!pEnt->access->pAccess
2018	&& (pEnt->entityProp & (state == SETUP ? NEED_VGA_ROUTED_SETUP :
2019				NEED_VGA_ROUTED)))
2020	((BusAccPtr)pEnt->busAcc)->set_f(pEnt->busAcc);
2021}
2022
2023
2024/*
2025 * xf86EnterServerState() -- set state the server is in.
2026 */
2027
2028typedef enum { TRI_UNSET, TRI_TRUE, TRI_FALSE } TriState;
2029
2030static void
2031SetSIGIOForState(xf86State state)
2032{
2033    static int sigio_state;
2034    static TriState sigio_blocked = TRI_UNSET;
2035
2036    if ((state == SETUP) && (sigio_blocked != TRI_TRUE)) {
2037        sigio_state = xf86BlockSIGIO();
2038	sigio_blocked = TRI_TRUE;
2039    } else if ((state == OPERATING) && (sigio_blocked != TRI_UNSET)) {
2040        xf86UnblockSIGIO(sigio_state);
2041        sigio_blocked = TRI_FALSE;
2042    }
2043}
2044
2045_X_EXPORT void
2046xf86EnterServerState(xf86State state)
2047{
2048    EntityPtr pEnt;
2049    ScrnInfoPtr pScrn;
2050    int i,j;
2051    int needVGA = 0;
2052    resType rt;
2053    /*
2054     * This is a good place to block SIGIO during SETUP state.
2055     * SIGIO should be blocked in SETUP state otherwise (u)sleep()
2056     * might get interrupted early.
2057     * We take care not to call xf86BlockSIGIO() twice.
2058     */
2059    SetSIGIOForState(state);
2060#ifdef DEBUG
2061    if (state == SETUP)
2062	ErrorF("Entering SETUP state\n");
2063    else
2064	ErrorF("Entering OPERATING state\n");
2065#endif
2066
2067    /* When servicing a dumb framebuffer we don't need to do anything */
2068    if (doFramebufferMode) return;
2069
2070    for (i=0; i<xf86NumScreens; i++) {
2071	pScrn = xf86Screens[i];
2072	j = pScrn->entityList[pScrn->numEntities - 1];
2073	pScrn->access = xf86Entities[j]->access;
2074
2075 	for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2076 	    pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2077 	    if (pEnt->entityProp & (state == SETUP ? NEED_VGA_ROUTED_SETUP
2078 				    : NEED_VGA_ROUTED))
2079		xf86Screens[i]->busAccess = pEnt->busAcc;
2080 	}
2081	if (xf86Screens[i]->busAccess)
2082	    needVGA ++;
2083    }
2084
2085    /*
2086     * if we just have one screen we don't have RAC.
2087     * Therefore just enable the screen and return.
2088     */
2089    if (!needRAC) {
2090	xf86EnableAccess(xf86Screens[0]);
2091	notifyStateChange(NOTIFY_ENABLE);
2092	return;
2093    }
2094
2095    if (state == SETUP)
2096	notifyStateChange(NOTIFY_SETUP_TRANSITION);
2097    else
2098	notifyStateChange(NOTIFY_OPERATING_TRANSITION);
2099
2100    clearAccess();
2101    for (i=0; i<xf86NumScreens;i++) {
2102
2103	rt = NONE;
2104
2105	for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2106	    pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2107	    setAccess(pEnt,state);
2108
2109	    if (pEnt->access->rt != NONE) {
2110		if (rt != NONE && rt != pEnt->access->rt)
2111		    rt = MEM_IO;
2112		else
2113		    rt = pEnt->access->rt;
2114	    }
2115	}
2116	xf86Screens[i]->resourceType = rt;
2117	if (rt == NONE) {
2118	    xf86Screens[i]->access = NULL;
2119	    if (needVGA < 2)
2120		xf86Screens[i]->busAccess = NULL;
2121	}
2122
2123#ifdef DEBUG
2124	if (xf86Screens[i]->busAccess)
2125	    ErrorF("Screen %i setting vga route\n",i);
2126#endif
2127	switch (rt) {
2128	case MEM_IO:
2129	    xf86MsgVerb(X_INFO, 3, "Screen %i shares mem & io resources\n",i);
2130	    break;
2131	case IO:
2132	    xf86MsgVerb(X_INFO, 3, "Screen %i shares io resources\n",i);
2133	    break;
2134	case MEM:
2135	    xf86MsgVerb(X_INFO, 3, "Screen %i shares mem resources\n",i);
2136	    break;
2137	default:
2138	    xf86MsgVerb(X_INFO, 3, "Entity %i shares no resources\n",i);
2139	    break;
2140	}
2141    }
2142    if (state == SETUP)
2143	notifyStateChange(NOTIFY_SETUP);
2144    else
2145	notifyStateChange(NOTIFY_OPERATING);
2146}
2147
2148/*
2149 * xf86SetOperatingState() -- Set ResOperMask for resources listed.
2150 */
2151_X_EXPORT resPtr
2152xf86SetOperatingState(resList list, int entityIndex, int mask)
2153{
2154    resPtr acc;
2155    resPtr r_fail = NULL;
2156    resRange range;
2157
2158    while (list->type != ResEnd) {
2159	range = *list;
2160	convertRange2Host(entityIndex,&range);
2161
2162	acc = Acc;
2163	while (acc) {
2164#define MASK (ResTypeMask | ResExtMask)
2165	    if ((acc->entityIndex == entityIndex)
2166		&& (acc->val.a == range.a) && (acc->val.b == range.b)
2167		&& ((acc->val.type & MASK) == (range.type & MASK)))
2168		break;
2169#undef MASK
2170	    acc = acc->next;
2171	}
2172	if (acc)
2173	    acc->val.type = (acc->val.type & ~ResOprMask)
2174		| (mask & ResOprMask);
2175	else {
2176	    r_fail = xf86AddResToList(r_fail,&range,entityIndex);
2177	}
2178	list ++;
2179    }
2180
2181     return r_fail;
2182}
2183
2184/*
2185 * Stage specific code
2186 */
2187 /*
2188  * ProcessEstimatedConflicts() -- Do something about driver-registered
2189  * resources that conflict with estimated resources.  For now, just register
2190  * them with a logged warning.
2191  */
2192#ifdef REDUCER
2193static void
2194ProcessEstimatedConflicts(void)
2195{
2196    if (!AccReducers)
2197	return;
2198
2199    /* Temporary */
2200    xf86MsgVerb(X_WARNING, 3,
2201		"Registering the following despite conflicts with estimated"
2202		" resources:\n");
2203    xf86PrintResList(3, AccReducers);
2204    Acc = xf86JoinResLists(Acc, AccReducers);
2205    AccReducers = NULL;
2206}
2207#endif
2208
2209/*
2210 * xf86ClaimFixedResources() -- This function gets called from the
2211 * driver Probe() function to claim fixed resources.
2212 */
2213static void
2214resError(resList list)
2215{
2216    FatalError("A driver tried to allocate the %s %sresource at \n"
2217	       "0x%lx:0x%lx which conflicted with another resource. Send the\n"
2218	       "output of the server to %s. Please \n"
2219	       "specify your computer hardware as closely as possible.\n",
2220	       ResIsBlock(list)?"Block":"Sparse",
2221	       ResIsMem(list)?"Mem":"Io",
2222	       ResIsBlock(list)?list->rBegin:list->rBase,
2223	       ResIsBlock(list)?list->rEnd:list->rMask,BUILDERADDR);
2224}
2225
2226/*
2227 * xf86ClaimFixedResources() is used to allocate non-relocatable resources.
2228 * This should only be done by a driver's Probe() function.
2229 */
2230_X_EXPORT void
2231xf86ClaimFixedResources(resList list, int entityIndex)
2232{
2233    resPtr ptr = NULL;
2234    resRange range;
2235
2236    if (!list) return;
2237
2238    while (list->type !=ResEnd) {
2239 	range = *list;
2240
2241	convertRange2Host(entityIndex,&range);
2242
2243 	range.type &= ~ResEstimated;	/* Not allowed for drivers */
2244 	switch (range.type & ResAccMask) {
2245  	case ResExclusive:
2246 	    if (!xf86ChkConflict(&range, entityIndex)) {
2247 		Acc = xf86AddResToList(Acc, &range, entityIndex);
2248#ifdef REDUCER
2249	    } else {
2250 		range.type |= ResEstimated;
2251 		if (!xf86ChkConflict(&range, entityIndex) &&
2252 		    !checkConflict(&range, AccReducers, entityIndex,
2253				   SETUP, FALSE)) {
2254 		    range.type &= ~(ResEstimated | ResBios);
2255 		    AccReducers =
2256 			xf86AddResToList(AccReducers, &range, entityIndex);
2257#endif
2258		} else resError(&range); /* no return */
2259#ifdef REDUCER
2260	    }
2261#endif
2262	    break;
2263	case ResShared:
2264	    /* at this stage the resources are just added to the
2265	     * EntityRec. After the Probe() phase this list is checked by
2266	     * xf86PostProbe(). All resources which don't
2267	     * conflict with already allocated ones are allocated
2268	     * and removed from the EntityRec. Thus a non-empty resource
2269	     * list in the EntityRec indicates resource conflicts the
2270	     * driver should either handle or fail.
2271	     */
2272	    if (xf86Entities[entityIndex]->active)
2273		ptr = xf86AddResToList(ptr,&range,entityIndex);
2274	    break;
2275	}
2276	list++;
2277    }
2278    xf86Entities[entityIndex]->resources =
2279	xf86JoinResLists(xf86Entities[entityIndex]->resources,ptr);
2280    xf86MsgVerb(X_INFO, 3,
2281	"resource ranges after xf86ClaimFixedResources() call:\n");
2282    xf86PrintResList(3,Acc);
2283#ifdef REDUCER
2284    ProcessEstimatedConflicts();
2285#endif
2286#ifdef DEBUG
2287    if (ptr) {
2288	xf86MsgVerb(X_INFO, 3, "to be registered later:\n");
2289	xf86PrintResList(3,ptr);
2290    }
2291#endif
2292}
2293
2294static void
2295checkRoutingForScreens(xf86State state)
2296{
2297    resList list = resVgaUnusedExclusive;
2298    resPtr pResVGA = NULL;
2299    resPtr pResVGAHost;
2300    pointer vga = NULL;
2301    int i,j;
2302    int entityIndex;
2303    EntityPtr pEnt;
2304    resPtr pAcc;
2305    resRange range;
2306
2307    /*
2308     * find devices that need VGA routed: ie the ones that have
2309     * registered VGA resources without ResUnused. ResUnused
2310     * doesn't conflict with itself therefore use it here.
2311     */
2312    while (list->type != ResEnd) { /* create resPtr from resList for VGA */
2313	range = *list;
2314	range.type &= ~(ResBios | ResEstimated); /* if set remove them */
2315	pResVGA = xf86AddResToList(pResVGA, &range, -1);
2316	list++;
2317    }
2318
2319    for (i = 0; i < xf86NumScreens; i++) {
2320	for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2321	    entityIndex = xf86Screens[i]->entityList[j];
2322	    pEnt = xf86Entities[entityIndex];
2323	    pAcc = Acc;
2324	    vga = NULL;
2325	    pResVGAHost = xf86DupResList(pResVGA);
2326	    xf86ConvertListToHost(entityIndex,pResVGAHost);
2327	    while (pAcc) {
2328		if (pAcc->entityIndex == entityIndex)
2329		    if (checkConflict(&pAcc->val, pResVGAHost,
2330				      entityIndex, state, FALSE)) {
2331			if (vga && vga != pEnt->busAcc) {
2332			    xf86Msg(X_ERROR, "Screen %i needs vga routed to"
2333				    "different buses - deleting\n",i);
2334			    xf86DeleteScreen(i--,0);
2335			}
2336#ifdef DEBUG
2337			{
2338			    resPtr rlist = xf86AddResToList(NULL,&pAcc->val,
2339							    pAcc->entityIndex);
2340			    xf86MsgVerb(X_INFO,3,"====== %s\n",
2341					state == OPERATING ? "OPERATING"
2342					: "SETUP");
2343			    xf86MsgVerb(X_INFO,3,"%s Resource:\n",
2344					(pAcc->val.type) & ResMem ? "Mem" :"Io");
2345			    xf86PrintResList(3,rlist);
2346			    xf86FreeResList(rlist);
2347			    xf86MsgVerb(X_INFO,3,"Conflicts with:\n");
2348			    xf86PrintResList(3,pResVGAHost);
2349			    xf86MsgVerb(X_INFO,3,"=====\n");
2350			}
2351#endif
2352			vga = pEnt->busAcc;
2353			pEnt->entityProp |= (state == SETUP
2354			    ? NEED_VGA_ROUTED_SETUP : NEED_VGA_ROUTED);
2355			if (state == OPERATING) {
2356			    if (pAcc->val.type & ResMem)
2357				pEnt->entityProp |= NEED_VGA_MEM;
2358			    else
2359				pEnt->entityProp |= NEED_VGA_IO;
2360			}
2361		    }
2362		pAcc = pAcc->next;
2363	    }
2364	    if (vga)
2365		xf86MsgVerb(X_INFO, 3,"Setting vga for screen %i.\n",i);
2366	    xf86FreeResList(pResVGAHost);
2367	}
2368    }
2369    xf86FreeResList(pResVGA);
2370}
2371
2372/*
2373 * xf86PostProbe() -- Allocate all non conflicting resources
2374 * This function gets called by xf86Init().
2375 */
2376void
2377xf86PostProbe(void)
2378{
2379    memType val;
2380    int i,j;
2381    resPtr resp, acc, tmp, resp_x, *pprev_next;
2382
2383    if (fbSlotClaimed) {
2384        if (pciSlotClaimed || isaSlotClaimed
2385#if (defined(__sparc__) || defined(__sparc)) && !defined(__OpenBSD__)
2386	    || sbusSlotClaimed
2387#endif
2388	    ) {
2389	    FatalError("Cannot run in framebuffer mode. Please specify busIDs "
2390		       "       for all framebuffer devices\n");
2391	    return;
2392	} else  {
2393	    xf86Msg(X_INFO,"Running in FRAMEBUFFER Mode\n");
2394	    xf86AccessRestoreState();
2395	    notifyStateChange(NOTIFY_ENABLE);
2396	    doFramebufferMode = TRUE;
2397
2398	    return;
2399	}
2400    }
2401    /* don't compare against ResInit - remove it from clone.*/
2402    acc = tmp = xf86DupResList(Acc);
2403    pprev_next = &acc;
2404    while (tmp) {
2405	if (tmp->res_type & ResInit) {
2406	    (*pprev_next) = tmp->next;
2407	    xfree(tmp);
2408	} else
2409	    pprev_next = &(tmp->next);
2410	tmp = (*pprev_next);
2411    }
2412
2413    for (i=0; i<xf86NumEntities; i++) {
2414	resp = xf86Entities[i]->resources;
2415	xf86Entities[i]->resources = NULL;
2416	resp_x = NULL;
2417	while (resp) {
2418	    if (! (val = checkConflict(&resp->val,acc,i,SETUP,FALSE)))  {
2419 	        resp->res_type &= ~(ResBios); /* just used for chkConflict() */
2420		tmp = resp_x;
2421		resp_x = resp;
2422		resp = resp->next;
2423		resp_x->next = tmp;
2424#ifdef REDUCER
2425	    } else {
2426		resp->res_type |= ResEstimated;
2427 		if (!checkConflict(&resp->val, acc, i, SETUP, FALSE)) {
2428 		    resp->res_type &= ~(ResEstimated | ResBios);
2429 		    tmp = AccReducers;
2430 		    AccReducers = resp;
2431 		    resp = resp->next;
2432 		    AccReducers->next = tmp;
2433#endif
2434		} else {
2435		    xf86MsgVerb(X_INFO, 3, "Found conflict at: 0x%lx\n",val);
2436 		    resp->res_type &= ~ResEstimated;
2437		    tmp = xf86Entities[i]->resources;
2438		    xf86Entities[i]->resources = resp;
2439		    resp = resp->next;
2440		    xf86Entities[i]->resources->next = tmp;
2441		}
2442#ifdef REDUCER
2443	    }
2444#endif
2445	}
2446	xf86JoinResLists(Acc,resp_x);
2447#ifdef REDUCER
2448	ProcessEstimatedConflicts();
2449#endif
2450    }
2451    xf86FreeResList(acc);
2452#if !(defined(__alpha__) && defined(linux)) && \
2453    !(defined(__ia64__) && defined(linux)) && \
2454    !(defined(__sparc64__) && defined(__OpenBSD__))
2455    /*
2456     * No need to validate on Alpha Linux or OpenBSD/sparc64,
2457     * trust the kernel.
2458     */
2459    ValidatePci();
2460#endif
2461
2462    xf86MsgVerb(X_INFO, 3, "resource ranges after probing:\n");
2463    xf86PrintResList(3, Acc);
2464    checkRoutingForScreens(SETUP);
2465
2466    for (i = 0; i < xf86NumScreens; i++) {
2467	for (j = 0; j<xf86Screens[i]->numEntities; j++) {
2468	    EntityPtr pEnt = xf86Entities[xf86Screens[i]->entityList[j]];
2469 	    if ((pEnt->entityProp & NEED_VGA_ROUTED_SETUP) &&
2470 		((xf86Screens[i]->busAccess = pEnt->busAcc)))
2471		break;
2472	}
2473    }
2474}
2475
2476static void
2477checkRequiredResources(int entityIndex)
2478{
2479    resRange range;
2480    resPtr pAcc = Acc;
2481    const EntityPtr pEnt = xf86Entities[entityIndex];
2482    while (pAcc) {
2483	if (pAcc->entityIndex == entityIndex) {
2484	    range = pAcc->val;
2485	    /*  ResAny to find conflicts with anything. */
2486	    range.type = (range.type & ~ResAccMask) | ResAny | ResBios;
2487	    if (checkConflict(&range,Acc,entityIndex,OPERATING,FALSE))
2488		switch (pAcc->res_type & ResPhysMask) {
2489		case ResMem:
2490		    pEnt->entityProp |= NEED_MEM_SHARED;
2491		    break;
2492		case ResIo:
2493		    pEnt->entityProp |= NEED_IO_SHARED;
2494		    break;
2495		}
2496	    if (!(pAcc->res_type & ResOprMask)) {
2497		switch (pAcc->res_type & ResPhysMask) {
2498		case ResMem:
2499		    pEnt->entityProp |= NEED_MEM;
2500		    break;
2501		case ResIo:
2502		    pEnt->entityProp |= NEED_IO;
2503		    break;
2504		}
2505	    }
2506	}
2507	pAcc = pAcc->next;
2508    }
2509
2510    /* check if we can separately enable mem/io resources */
2511    /* XXX we still need to find out how to set this yet  */
2512    if ( ((pEnt->entityProp & NO_SEPARATE_MEM_FROM_IO)
2513	  && (pEnt->entityProp & NEED_MEM_SHARED))
2514	 || ((pEnt->entityProp & NO_SEPARATE_IO_FROM_MEM)
2515	     && (pEnt->entityProp & NEED_IO_SHARED)) )
2516	pEnt->entityProp |= NEED_SHARED;
2517    /*
2518     * After we have checked all resources of an entity agains any
2519     * other resource we know if the entity need this resource type
2520     * (ie. mem/io) at all. if not we can disable this type completely,
2521     * so no need to share it either.
2522     */
2523    if ((pEnt->entityProp & NEED_MEM_SHARED)
2524	&& (!(pEnt->entityProp & NEED_MEM))
2525	&& (!(pEnt->entityProp & NO_SEPARATE_MEM_FROM_IO)))
2526	pEnt->entityProp &= ~(unsigned long)NEED_MEM_SHARED;
2527
2528    if ((pEnt->entityProp & NEED_IO_SHARED)
2529	&& (!(pEnt->entityProp & NEED_IO))
2530	&& (!(pEnt->entityProp & NO_SEPARATE_IO_FROM_MEM)))
2531	pEnt->entityProp &= ~(unsigned long)NEED_IO_SHARED;
2532}
2533
2534void
2535xf86PostPreInit()
2536{
2537  if (doFramebufferMode) return;
2538
2539    if (xf86NumScreens > 1)
2540	needRAC = TRUE;
2541
2542    xf86MsgVerb(X_INFO, 3, "do I need RAC?");
2543
2544    if (needRAC) {
2545	xf86ErrorFVerb(3, "  Yes, I do.\n");
2546    } else {
2547	xf86ErrorFVerb(3, "  No, I don't.\n");
2548    }
2549
2550    xf86MsgVerb(X_INFO, 3, "resource ranges after preInit:\n");
2551    xf86PrintResList(3, Acc);
2552}
2553
2554void
2555xf86PostScreenInit(void)
2556{
2557    int i,j;
2558    ScreenPtr pScreen;
2559    unsigned int flags;
2560    int nummem = 0, numio = 0;
2561
2562    if (doFramebufferMode) {
2563	SetSIGIOForState(OPERATING);
2564	return;
2565    }
2566
2567#ifdef DEBUG
2568    ErrorF("PostScreenInit  generation: %i\n",serverGeneration);
2569#endif
2570    if (serverGeneration == 1) {
2571	checkRoutingForScreens(OPERATING);
2572	for (i=0; i<xf86NumEntities; i++) {
2573	    checkRequiredResources(i);
2574	}
2575
2576	/*
2577	 * after removing NEED_XXX_SHARED from entities that
2578	 * don't need need XXX resources at all we might have
2579	 * a single entity left that has NEED_XXX_SHARED set.
2580	 * In this case we can delete that, too.
2581	 */
2582	for (i = 0; i < xf86NumEntities; i++) {
2583	    if (xf86Entities[i]->entityProp & NEED_MEM_SHARED)
2584		nummem++;
2585	    if (xf86Entities[i]->entityProp & NEED_IO_SHARED)
2586		numio++;
2587	}
2588	for (i = 0; i < xf86NumEntities; i++) {
2589	    if (nummem < 2)
2590		xf86Entities[i]->entityProp &= ~NEED_MEM_SHARED;
2591	    if (numio < 2)
2592		xf86Entities[i]->entityProp &= ~NEED_IO_SHARED;
2593	}
2594    }
2595
2596    if (xf86Screens && needRAC) {
2597	int needRACforVga = 0;
2598
2599	for (i = 0; i < xf86NumScreens; i++) {
2600	    for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2601		if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2602		    & NEED_VGA_ROUTED) {
2603		    needRACforVga ++;
2604		    break; /* only count each screen once */
2605		}
2606	    }
2607	}
2608
2609	for (i = 0; i < xf86NumScreens; i++) {
2610	    Bool needRACforMem = FALSE, needRACforIo = FALSE;
2611
2612	    for (j = 0; j < xf86Screens[i]->numEntities; j++) {
2613		if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2614		    & NEED_MEM_SHARED)
2615		    needRACforMem = TRUE;
2616		if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2617		    & NEED_IO_SHARED)
2618		    needRACforIo = TRUE;
2619		/*
2620		 * We may need RAC although we don't share any resources
2621		 * as we need to route VGA to the correct bus. This can
2622		 * only be done simultaniously for MEM and IO.
2623		 */
2624		if (needRACforVga > 1) {
2625		    if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2626			& NEED_VGA_MEM)
2627			needRACforMem = TRUE;
2628		    if (xf86Entities[xf86Screens[i]->entityList[j]]->entityProp
2629			& NEED_VGA_IO)
2630			needRACforIo = TRUE;
2631		}
2632	    }
2633
2634	    pScreen = xf86Screens[i]->pScreen;
2635	    flags = 0;
2636	    if (needRACforMem) {
2637		flags |= xf86Screens[i]->racMemFlags;
2638		xf86ErrorFVerb(3, "Screen %d is using RAC for mem\n", i);
2639	    }
2640	    if (needRACforIo) {
2641		flags |= xf86Screens[i]->racIoFlags;
2642		xf86ErrorFVerb(3, "Screen %d is using RAC for io\n", i);
2643	    }
2644
2645	    xf86RACInit(pScreen,flags);
2646	}
2647    }
2648
2649    xf86EnterServerState(OPERATING);
2650
2651}
2652
2653/*
2654 * Sets
2655 */
2656
2657
2658static resPtr
2659decomposeSparse(resRange range)
2660{
2661    resRange new;
2662    resPtr ret = NULL;
2663    memType val = range.rBegin;
2664    int i = 0;
2665
2666    new.type = (range.type & ~ResExtMask) | ResSparse;
2667
2668    while (1) {
2669	if (val & 0x01) {
2670	    new.rBase = (val << i);
2671	    new.rMask = ~((1 << i) - 1);
2672	    ret = xf86AddResToList(ret,&new,-1);
2673	    val ++;
2674	}
2675	i++;
2676	val >>= 1;
2677	if ((((val + 1) << i) - 1) > range.rEnd)
2678	    break;
2679    }
2680    i--;
2681    val <<= 1;
2682
2683    while (1) {
2684	if((((val + 1) << i) - 1)> range.rEnd) {
2685	    if (--i < 0) break;
2686	    val <<= 1;
2687	} else {
2688	    new.rBase = (val << i);
2689	    new.rMask = ~((1 << i) - 1);
2690	    val++;
2691	    ret = xf86AddResToList(ret,&new,-1);
2692	}
2693    }
2694    return ret;
2695}
2696
2697static Bool
2698x_isSubsetOf(resRange range, resPtr list1, resPtr list2)
2699{
2700    resRange range1, range2;
2701    memType m1_A_m2;
2702    Bool ret;
2703    resPtr list;
2704
2705    if (list1) {
2706	list = list1;
2707	if ((range.type & ResTypeMask) == (list->res_type & ResTypeMask)) {
2708	    switch (range.type & ResExtMask) {
2709	    case ResBlock:
2710		if ((list->res_type & ResExtMask) == ResBlock) {
2711		    if (range.rBegin >= list->block_begin
2712			&& range.rEnd <= list->block_end)
2713			return TRUE;
2714		    else if (range.rBegin < list->block_begin
2715			     && range.rEnd > list->block_end) {
2716			RANGE(range1, range.rBegin, list->block_begin - 1,
2717			      range.type);
2718			RANGE(range2, list->block_end + 1, range.rEnd,
2719			      range.type);
2720			return (x_isSubsetOf(range1,list->next,list2) &&
2721				x_isSubsetOf(range2,list->next,list2));
2722		    }
2723		    else if (range.rBegin >= list->block_begin
2724			     && range.rBegin <= list->block_end) {
2725			RANGE(range1, list->block_end + 1, range.rEnd,
2726			      range.type);
2727			return (x_isSubsetOf(range1,list->next,list2));
2728		    } else if (range.rEnd >= list->block_begin
2729			       && range.rEnd <= list->block_end) {
2730			RANGE(range1,range.rBegin, list->block_begin - 1,
2731			      range.type);
2732			return (x_isSubsetOf(range1,list->next,list2));
2733		    }
2734		}
2735		break;
2736	    case ResSparse:
2737		if ((list->res_type & ResExtMask) == ResSparse) {
2738		    memType test;
2739		    int i;
2740
2741		    m1_A_m2 = range.rMask & list->sparse_mask;
2742		    if ((range.rBase ^ list->sparse_base) & m1_A_m2)
2743			break;
2744		    /*
2745		     * We use the following system:
2746		     * let 0 ^= mask:1 base:0, 1 ^= mask:1 base:1,
2747		     * X mask:0 ; S: set TSS: test set for subset
2748		     * NTSS: new test set after test
2749		     *    S: 1   0   1   0   X   X   0   1   X
2750		     *  TSS: 1   0   0   1   1   0   X   X   X
2751		     *    T: 0   0   1   1   0   0   0   0   0
2752		     * NTSS: 1   0  0/X  1/X 1   0   1   0   X
2753		     *    R: 0   0   0   0   0   0   1   1   0
2754		     * If R != 0 TSS and S are disjunct
2755		     * If R == 0 TSS is subset of S
2756		     * If R != 0 NTSS contains elements from TSS
2757		     * which are not also members of S.
2758		     * If a T is set one of the correspondig bits
2759		     * in NTSS must be set to the specified value
2760		     * all other are X
2761		     */
2762		    test = list->sparse_mask & ~range.rMask;
2763		    if (test == 0)
2764			return TRUE;
2765		    for (i = 0; i < sizeof(memType); i++) {
2766			if ((test >> i) & 0x1) {
2767			    RANGE(range1, ((range.rBase & list->sparse_base)
2768				  | (range.rBase & ~list->sparse_mask)
2769				  | ((~list->sparse_base & list->sparse_mask)
2770				     & ~range.rMask)) & range1.rMask,
2771				  ((range.rMask | list->sparse_mask) & ~test)
2772				  | (1 << i), range.type);
2773			    return (x_isSubsetOf(range1,list->next,list2));
2774			}
2775		    }
2776		}
2777		break;
2778	    }
2779	}
2780	return (x_isSubsetOf(range,list->next,list2));
2781    } else if (list2) {
2782	resPtr tmpList = NULL;
2783	switch (range.type & ResExtMask) {
2784	case ResBlock:
2785	    tmpList = decomposeSparse(range);
2786	    while (tmpList) {
2787		if (!x_isSubsetOf(tmpList->val,list2,NULL)) {
2788		    xf86FreeResList(tmpList);
2789		    return FALSE;
2790		}
2791		tmpList = tmpList->next;
2792	    }
2793	    xf86FreeResList(tmpList);
2794	    return TRUE;
2795	    break;
2796	case ResSparse:
2797	    while (list2) {
2798		tmpList = xf86JoinResLists(tmpList,decomposeSparse(list2->val));
2799		list2 = list2->next;
2800	    }
2801	    ret = x_isSubsetOf(range,tmpList,NULL);
2802	    xf86FreeResList(tmpList);
2803	    return ret;
2804	    break;
2805	}
2806    } else
2807	return FALSE;
2808
2809    return FALSE;
2810}
2811
2812Bool
2813xf86IsSubsetOf(resRange range, resPtr list)
2814{
2815    resPtr dup = xf86DupResList(list);
2816    resPtr r_sp = NULL, r = NULL, tmp = NULL;
2817    Bool ret = FALSE;
2818
2819    while (dup) {
2820	tmp = dup;
2821	dup = dup->next;
2822	switch (tmp->res_type & ResExtMask) {
2823	case ResBlock:
2824	    tmp->next = r;
2825	    r = tmp;
2826	    break;
2827	case ResSparse:
2828	    tmp->next = r_sp;
2829	    r_sp = tmp;
2830	    break;
2831	}
2832    }
2833
2834    switch (range.type & ResExtMask) {
2835    case ResBlock:
2836	ret = x_isSubsetOf(range,r,r_sp);
2837	break;
2838    case ResSparse:
2839	ret = x_isSubsetOf(range,r_sp,r);
2840	break;
2841    }
2842    xf86FreeResList(r);
2843    xf86FreeResList(r_sp);
2844
2845    return ret;
2846}
2847
2848static resPtr
2849findIntersect(resRange Range, resPtr list)
2850{
2851    resRange range;
2852    resPtr new = NULL;
2853
2854    while (list) {
2855	    if ((Range.type & ResTypeMask) == (list->res_type & ResTypeMask)) {
2856		switch (Range.type & ResExtMask) {
2857		case ResBlock:
2858		    switch (list->res_type & ResExtMask) {
2859		    case ResBlock:
2860			if (Range.rBegin >= list->block_begin)
2861			    range.rBegin = Range.rBegin;
2862			else
2863			    range.rBegin = list->block_begin;
2864			if (Range.rEnd <= list->block_end)
2865			    range.rEnd = Range.rEnd;
2866			else
2867			    range.rEnd = list->block_end;
2868			if (range.rEnd > range.rBegin) {
2869			    range.type = Range.type;
2870			    new = xf86AddResToList(new,&range,-1);
2871			}
2872			break;
2873		    case ResSparse:
2874			new = xf86JoinResLists(new,xf86FindIntersectOfLists(new,decomposeSparse(list->val)));
2875			break;
2876		    }
2877		    break;
2878		case ResSparse:
2879		    switch (list->res_type & ResExtMask) {
2880		    case ResSparse:
2881			if (!((~(range.rBase ^ list->sparse_base)
2882			    & (range.rMask & list->sparse_mask)))) {
2883			    RANGE(range, (range.rBase & list->sparse_base)
2884				  | (~range.rMask & list->sparse_base)
2885				  | (~list->sparse_mask & range.rBase),
2886				  range.rMask | list->sparse_mask,
2887				  Range.type);
2888			    new = xf86AddResToList(new,&range,-1);
2889			}
2890			break;
2891		    case ResBlock:
2892			new = xf86JoinResLists(new,xf86FindIntersectOfLists(
2893			    decomposeSparse(range),list));
2894			break;
2895		    }
2896		}
2897	    }
2898	list = list->next;
2899    }
2900    return new;
2901}
2902
2903resPtr
2904xf86FindIntersectOfLists(resPtr l1, resPtr l2)
2905{
2906    resPtr ret = NULL;
2907
2908    while (l1) {
2909	ret = xf86JoinResLists(ret,findIntersect(l1->val,l2));
2910	l1 = l1->next;
2911    }
2912    return ret;
2913}
2914
2915#if 0	/* Not used */
2916static resPtr
2917xf86FindComplement(resRange Range)
2918{
2919    resRange range;
2920    memType tmp;
2921    resPtr new = NULL;
2922    int i;
2923
2924    switch (Range.type & ResExtMask) {
2925    case ResBlock:
2926	if (Range.rBegin > 0) {
2927	    RANGE(range, 0, Range.rBegin - 1, Range.type);
2928	    new = xf86AddResToList(new,&range,-1);
2929	}
2930	if (Range.rEnd < (memType)~0) {
2931	    RANGE(range,Range.rEnd + 1, (memType)~0, Range.type);
2932	    new = xf86AddResToList(new,&range,-1);
2933	}
2934	break;
2935    case ResSparse:
2936	tmp = Range.rMask;
2937	for (i = 0; i < sizeof(memType); i++) {
2938	    if (tmp & 0x1) {
2939		RANGE(range,(~Range.rMask & range.rMask),(1 << i), Range.type);
2940		new = xf86AddResToList(new,&range,-1);
2941	    }
2942	}
2943	break;
2944    default:
2945	break;
2946    }
2947    return new;
2948}
2949#endif
2950
2951resPtr
2952xf86ExtractTypeFromList(resPtr list, unsigned long type)
2953{
2954    resPtr ret = NULL;
2955
2956    while (list) {
2957	if ((list->res_type & ResTypeMask) == type)
2958	    ret = xf86AddResToList(ret,&(list->val),list->entityIndex);
2959	list = list->next;
2960    }
2961    return ret;
2962}
2963
2964/*------------------------------------------------------------*/
2965static void CheckGenericGA(void);
2966
2967/*
2968 * xf86FindPrimaryDevice() - Find the display device which
2969 * was active when the server was started.
2970 */
2971void
2972xf86FindPrimaryDevice()
2973{
2974    /* if no VGA device is found check for primary PCI device */
2975    if (primaryBus.type == BUS_NONE && xorgHWAccess)
2976        CheckGenericGA();
2977    if (primaryBus.type != BUS_NONE) {
2978	char *bus;
2979	char *loc = xnfcalloc(1,9);
2980	if (loc == NULL) return;
2981
2982	switch (primaryBus.type) {
2983	case BUS_PCI:
2984	    bus = "PCI";
2985	    sprintf(loc," %2.2x:%2.2x:%1.1x",primaryBus.id.pci.bus,
2986	    primaryBus.id.pci.device,primaryBus.id.pci.func);
2987	    break;
2988	case BUS_ISA:
2989	    bus = "ISA";
2990	    loc[0] = '\0';
2991	    break;
2992	case BUS_SBUS:
2993	    bus = "SBUS";
2994	    sprintf(loc," %2.2x",primaryBus.id.sbus.fbNum);
2995	    break;
2996	default:
2997	    bus = "";
2998	    loc[0] = '\0';
2999	}
3000
3001	xf86MsgVerb(X_INFO, 2, "Primary Device is: %s%s\n",bus,loc);
3002	xfree(loc);
3003    }
3004
3005}
3006
3007#if !defined(__sparc) && !defined(__sparc__) && !defined(__powerpc__) && !defined(__mips__) && !defined(__arm__)
3008#include "vgaHW.h"
3009#include "compiler.h"
3010#endif
3011
3012/*
3013 * CheckGenericGA() - Check for presence of a VGA device.
3014 */
3015static void
3016CheckGenericGA()
3017{
3018/* This needs to be changed for multiple domains */
3019#if !defined(__sparc__) && !defined(__sparc) && !defined(__powerpc__) && !defined(__mips__) && !defined(__ia64__) && !defined(__arm__) && !defined(__s390__)
3020    IOADDRESS GenericIOBase = VGAHW_GET_IOBASE();
3021    CARD8 CurrentValue, TestValue;
3022
3023    /* VGA CRTC registers are not used here, so don't bother unlocking them */
3024
3025    /* VGA has one more read/write attribute register than EGA */
3026    (void) inb(GenericIOBase + VGA_IN_STAT_1_OFFSET);  /* Reset flip-flop */
3027    outb(VGA_ATTR_INDEX, 0x14 | 0x20);
3028    CurrentValue = inb(VGA_ATTR_DATA_R);
3029    outb(VGA_ATTR_DATA_W, CurrentValue ^ 0x0F);
3030    outb(VGA_ATTR_INDEX, 0x14 | 0x20);
3031    TestValue = inb(VGA_ATTR_DATA_R);
3032    outb(VGA_ATTR_DATA_W, CurrentValue);
3033
3034    if ((CurrentValue ^ 0x0F) == TestValue) {
3035	primaryBus.type = BUS_ISA;
3036    }
3037#endif
3038}
3039
3040_X_EXPORT Bool
3041xf86NoSharedResources(int screenIndex,resType res)
3042{
3043    int j;
3044
3045    if (screenIndex > xf86NumScreens)
3046	return TRUE;
3047
3048    for (j = 0; j < xf86Screens[screenIndex]->numEntities; j++) {
3049      switch (res) {
3050      case IO:
3051	if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3052	     & NEED_IO_SHARED)
3053	  return FALSE;
3054	break;
3055      case MEM:
3056	if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3057	     & NEED_MEM_SHARED)
3058	  return FALSE;
3059	break;
3060      case MEM_IO:
3061	if ( xf86Entities[xf86Screens[screenIndex]->entityList[j]]->entityProp
3062	     & NEED_SHARED)
3063	  return FALSE;
3064	break;
3065      case NONE:
3066	break;
3067      }
3068    }
3069    return TRUE;
3070}
3071
3072_X_EXPORT void
3073xf86RegisterStateChangeNotificationCallback(xf86StateChangeNotificationCallbackFunc func, pointer arg)
3074{
3075    StateChangeNotificationPtr ptr =
3076	(StateChangeNotificationPtr)xnfalloc(sizeof(StateChangeNotificationRec));
3077
3078    ptr->func = func;
3079    ptr->arg = arg;
3080    ptr->next = StateChangeNotificationList;
3081    StateChangeNotificationList = ptr;
3082}
3083
3084_X_EXPORT Bool
3085xf86DeregisterStateChangeNotificationCallback(xf86StateChangeNotificationCallbackFunc func)
3086{
3087    StateChangeNotificationPtr *ptr = &StateChangeNotificationList;
3088    StateChangeNotificationPtr tmp;
3089
3090    while (*ptr) {
3091	if ((*ptr)->func == func) {
3092	    tmp = (*ptr);
3093	    (*ptr) = (*ptr)->next;
3094	    xfree(tmp);
3095	    return TRUE;
3096	}
3097	ptr = &((*ptr)->next);
3098    }
3099    return FALSE;
3100}
3101
3102static void
3103notifyStateChange(xf86NotifyState state)
3104{
3105    StateChangeNotificationPtr ptr = StateChangeNotificationList;
3106    while (ptr) {
3107	ptr->func(state,ptr->arg);
3108	ptr = ptr->next;
3109    }
3110}
3111
3112/* Multihead accel sharing accessor functions and entity Private handling */
3113
3114_X_EXPORT int
3115xf86GetLastScrnFlag(int entityIndex)
3116{
3117    if(entityIndex < xf86NumEntities) {
3118        return(xf86Entities[entityIndex]->lastScrnFlag);
3119    } else {
3120        return -1;
3121    }
3122}
3123
3124_X_EXPORT void
3125xf86SetLastScrnFlag(int entityIndex, int scrnIndex)
3126{
3127    if(entityIndex < xf86NumEntities) {
3128        xf86Entities[entityIndex]->lastScrnFlag = scrnIndex;
3129    }
3130}
3131
3132_X_EXPORT Bool
3133xf86IsEntityShared(int entityIndex)
3134{
3135    if(entityIndex < xf86NumEntities) {
3136        if(xf86Entities[entityIndex]->entityProp & IS_SHARED_ACCEL) {
3137	    return TRUE;
3138	}
3139    }
3140    return FALSE;
3141}
3142
3143_X_EXPORT void
3144xf86SetEntityShared(int entityIndex)
3145{
3146    if(entityIndex < xf86NumEntities) {
3147        xf86Entities[entityIndex]->entityProp |= IS_SHARED_ACCEL;
3148    }
3149}
3150
3151_X_EXPORT Bool
3152xf86IsEntitySharable(int entityIndex)
3153{
3154    if(entityIndex < xf86NumEntities) {
3155        if(xf86Entities[entityIndex]->entityProp & ACCEL_IS_SHARABLE) {
3156	    return TRUE;
3157	}
3158    }
3159    return FALSE;
3160}
3161
3162_X_EXPORT void
3163xf86SetEntitySharable(int entityIndex)
3164{
3165    if(entityIndex < xf86NumEntities) {
3166        xf86Entities[entityIndex]->entityProp |= ACCEL_IS_SHARABLE;
3167    }
3168}
3169
3170_X_EXPORT Bool
3171xf86IsPrimInitDone(int entityIndex)
3172{
3173    if(entityIndex < xf86NumEntities) {
3174        if(xf86Entities[entityIndex]->entityProp & SA_PRIM_INIT_DONE) {
3175	    return TRUE;
3176	}
3177    }
3178    return FALSE;
3179}
3180
3181_X_EXPORT void
3182xf86SetPrimInitDone(int entityIndex)
3183{
3184    if(entityIndex < xf86NumEntities) {
3185        xf86Entities[entityIndex]->entityProp |= SA_PRIM_INIT_DONE;
3186    }
3187}
3188
3189_X_EXPORT void
3190xf86ClearPrimInitDone(int entityIndex)
3191{
3192    if(entityIndex < xf86NumEntities) {
3193        xf86Entities[entityIndex]->entityProp &= ~SA_PRIM_INIT_DONE;
3194    }
3195}
3196
3197
3198/*
3199 * Allocate a private in the entities.
3200 */
3201
3202_X_EXPORT int
3203xf86AllocateEntityPrivateIndex(void)
3204{
3205    int idx, i;
3206    EntityPtr pEnt;
3207    DevUnion *nprivs;
3208
3209    idx = xf86EntityPrivateCount++;
3210    for (i = 0; i < xf86NumEntities; i++) {
3211	pEnt = xf86Entities[i];
3212	nprivs = xnfrealloc(pEnt->entityPrivates,
3213			    xf86EntityPrivateCount * sizeof(DevUnion));
3214	/* Zero the new private */
3215	bzero(&nprivs[idx], sizeof(DevUnion));
3216	pEnt->entityPrivates = nprivs;
3217    }
3218    return idx;
3219}
3220
3221_X_EXPORT DevUnion *
3222xf86GetEntityPrivate(int entityIndex, int privIndex)
3223{
3224    if (entityIndex >= xf86NumEntities || privIndex >= xf86EntityPrivateCount)
3225	return NULL;
3226
3227    return &(xf86Entities[entityIndex]->entityPrivates[privIndex]);
3228}
3229
3230