Coherent4.2.10/include/sys/ksynch.h

/* (-lgl
 *	Coherent 386 release 4.2
 *	Copyright (c) 1982, 1993 by Mark Williams Company.
 *	All rights reserved. May not be copied without permission.
 *	For copying permission and licensing info, write licensing@mwc.com
 -lgl) */

#ifndef	__SYS_KSYNCH_H__
#define	__SYS_KSYNCH_H__

/*
 * Support for basic locks.
 */

#include <common/ccompat.h>

#if	_DDI_DKI

#include <kernel/__bool.h>
#include <kernel/__pl.h>
#include <kernel/_lock.h>
#include <kernel/_sv.h>
#include <kernel/x86lock.h>

/*
 * We can provide a greater level of error checking/reporting if we know we
 * are running on a uniprocessor. Use this feature-test macro to control
 * compilation of code for uniprocessors.
 *
 * Note that many of the traditional simplifying assumptions about
 * uniprocessor systems may not apply if basic-lock acquisition can schedule
 * interrupt contexts (ie, if the system is designed such that interrupts
 * need not be turned off). Such as system would behave effectively like a
 * multiprocessor, although most of the extra checking could still be valid
 * with information gained from the interrupt-handler scheduling system. 
 */

#define	_UNIPROCESSOR	1


/*
 * For greater scalability, a "ticket lock" should be preferred over a basic
 * test-and-set or test-and-test-and-set spin lock, since it guarantees
 * FIFO ordering and reduces the level of memory contention (important for
 * multiprocessor shared-memory cache performance). See the paper cited at the
 * top of this file for more details.
 *
 * However, ticket locks take slightly more memory and are slightly slower
 * that basic test-and-set spin locks, so we only use them in situations
 * where we expect them to be needed.
 */

#if	0	/* _UNIPROCESSOR */
#define	_TICKET_LOCK		1
#endif


/*
 * Even if ticket locks are used, in the absence of a fetch-and-increment
 * atomic operation a basic test-and-set style lock must be used to control
 * the "ticket gate", so the style of test-and-set lock used will always have
 * consequences. A simple test-and-set spin lock reduces delay, while a
 * test-and-test-and-set spin reduces contention.
 */

#define	__TEST_AND_TEST__	1


/*
 * Information structure used for holding debugging information about
 * lock structures.
 */

struct lock_info {
	__CONST__ char * lk_name;	/* name for statistics gathering */
	int		lk_flags;	/* must be zero */
	int		lk_pad;
};

typedef	struct lock_info lkinfo_t;


/*
 * No lock statistics structure is currently defined, but we reserve the name.
 *
 * For the other lock structures, we publish only their names, and leave the
 * types incomplete.
 */

typedef	__VOID__		lkstat_t;
typedef	__lock_t		lock_t;
typedef	__sv_t			sv_t;
typedef struct sleep_lock	sleep_t;
typedef struct readwrite_lock	rwlock_t;


__EXTERN_C_BEGIN__

/* Basic lock functions */

__pl_t		LOCK 		__PROTO ((lock_t * _lockp, __pl_t _pl));
lock_t	      *	LOCK_ALLOC	__PROTO ((__lkhier_t _hierarchy,
					  __pl_t _min_pl, lkinfo_t * _lkinfop,
					  int _flag));
void		LOCK_DEALLOC	__PROTO ((lock_t * _lockp));
__pl_t		TRYLOCK		__PROTO ((lock_t * _lockp, __pl_t _pl));
void		UNLOCK		__PROTO ((lock_t * _lockp, __pl_t _pl));


/* Synchronization variable functions */

sv_t	      *	SV_ALLOC	__PROTO ((int _flag));
void		SV_BROADCAST	__PROTO ((sv_t * _svp, int _flags));
void		SV_DEALLOC	__PROTO ((sv_t * _svp));
void		SV_SIGNAL	__PROTO ((sv_t * _svp, int _flags));
void		SV_WAIT		__PROTO ((sv_t * _svp, int _priority,
					  lock_t * _lkp));
__bool_t	SV_WAIT_SIG	__PROTO ((sv_t * _svp, int _priority,
					  lock_t * _lkp));


/* Sleep locks */

sleep_t	      *	SLEEP_ALLOC	__PROTO ((int _arg, lkinfo_t * _lkinfop,
					  int _flag));
void		SLEEP_DEALLOC	__PROTO ((sleep_t * _lockp));
void		SLEEP_LOCK	__PROTO ((sleep_t * _lockp, int _priority));
__bool_t	SLEEP_LOCKAVAIL	__PROTO ((sleep_t * _lockp));
__bool_t	SLEEP_LOCKOWNED	__PROTO ((sleep_t * _lockp));
__bool_t	SLEEP_LOCK_SIG	__PROTO ((sleep_t * _lockp, int _priority));
__bool_t	SLEEP_TRYLOCK	__PROTO ((sleep_t * _lockp));
void		SLEEP_UNLOCK	__PROTO ((sleep_t * _lockp));


/* Read/write locks */

rwlock_t      *	RW_ALLOC	__PROTO ((__lkhier_t _hierarchy,
					  __pl_t _min_pl, lkinfo_t * _lkinfop,
					  int _flag));
void		RW_DEALLOC	__PROTO ((rwlock_t * _lockp));
__pl_t		RW_RDLOCK	__PROTO ((rwlock_t * _lockp, __pl_t _pl));
__pl_t		RW_TRYRDLOCK	__PROTO ((rwlock_t * _lockp, __pl_t _pl));
__pl_t		RW_TRYWRLOCK	__PROTO ((rwlock_t * _lockp, __pl_t _pl));
void		RW_UNLOCK	__PROTO ((rwlock_t * _lockp, __pl_t _pl));
__pl_t		RW_WRLOCK	__PROTO ((rwlock_t * _lockp, __pl_t _pl));

/* For internal self-checking */

void		LOCK_DISPATCH	__PROTO ((void));

__EXTERN_C_END__

#endif	/* _DDI_DKI */

/*
 * This type is used in the internal COHERENT source as a form of sleep lock.
 * It is used in a structure which cannot alter because the configuration
 * mechanism for COHERENT in some cases involves hot-patching the kernel.
 */

typedef	struct {
	unsigned char	_lock [2];
	unsigned short	_flags;
	char	      *	_lastlock;
} __DUMB_GATE [1], * __DUMB_GATE_P;

/*
 * Flags:
 *	__GATE_COUNT causes this gate to feed into a per-process count of
 *	locks held. When a system call returns to user level, this count is
 *	required to be zero.
 */

#define	__GATE_COUNT	1

#define	__GATE_FLAGS(g)		((g)->_flags)

#if	TRACER

#define	__GATE_DECLARE(name)	{ { { 0, 0 }, 0, NULL, NULL } }

#define	__GATE_INIT(g, name, flags) \
		((g)->_lock [1] = (g)->_lock [0] = 0, (g)->_flags = (flags))
#define __GATE_LOCKED(gate)	((gate)->_lock [0])
#define	__GATE_LOCK_COUNT(g)	((g)->_count)
#define	__GATE_LOCK_OWNER(g)	((g)->_lastlock)

#else	/* if ! defined (TRACER) */

#define	__GATE_DECLARE(name)	{ { { 0, 0 } } }
#define	__GATE_INIT(g, name, flags) \
		((g)->_lock [1] = (g)->_lock [0] = 0, (g)->_flags = (flags))
#define	__GATE_LOCKED(g)	((g)->_lock [0])

#endif	/* ! defined (TRACER) */

__EXTERN_C_BEGIN__

void		__GATE_LOCK	__PROTO ((__DUMB_GATE_P g,
					  __CONST__ char * reason));
void		__GATE_UNLOCK	__PROTO ((__DUMB_GATE_P g));
void		__GATE_TO_INTERRUPT
				__PROTO ((__DUMB_GATE_P g));

__EXTERN_C_END__

#endif	/* ! defined (__SYS_KSYNCH_H__) */