[m-rev.] diff: simplify runtime/mercury_context.[ch]
Fergus Henderson
fjh at cs.mu.OZ.AU
Tue Jun 4 18:57:32 AEST 2002
Estimated hours taken: 0.5
runtime/mercury_context.h:
runtime/mercury_context.c:
Avoid unnecessary dynamic memory allocation.
Workspace: /home/ceres/fjh/mercury
Index: runtime/mercury_context.c
===================================================================
RCS file: /home/mercury1/repository/mercury/runtime/mercury_context.c,v
retrieving revision 1.32
diff -u -d -r1.32 mercury_context.c
--- runtime/mercury_context.c 18 Feb 2002 07:01:14 -0000 1.32
+++ runtime/mercury_context.c 4 Jun 2002 08:47:26 -0000
@@ -29,13 +29,13 @@
MR_Context *MR_runqueue_head;
MR_Context *MR_runqueue_tail;
#ifdef MR_THREAD_SAFE
- MercuryLock *MR_runqueue_lock;
- MercuryCond *MR_runqueue_cond;
+ MercuryLock MR_runqueue_lock;
+ MercuryCond MR_runqueue_cond;
#endif
MR_PendingContext *MR_pending_contexts;
#ifdef MR_THREAD_SAFE
- MercuryLock *MR_pending_contexts_lock;
+ MercuryLock MR_pending_contexts_lock;
#endif
/*
@@ -47,7 +47,7 @@
*/
static MR_Context *free_context_list = NULL;
#ifdef MR_THREAD_SAFE
- static MercuryLock *free_context_list_lock;
+ static MercuryLock free_context_list_lock;
#endif
void
@@ -55,19 +55,15 @@
{
#ifdef MR_THREAD_SAFE
- MR_runqueue_lock = MR_GC_NEW(MercuryLock);
- pthread_mutex_init(MR_runqueue_lock, MR_MUTEX_ATTR);
+ pthread_mutex_init(&MR_runqueue_lock, MR_MUTEX_ATTR);
- MR_runqueue_cond = MR_GC_NEW(MercuryCond);
- pthread_cond_init(MR_runqueue_cond, MR_COND_ATTR);
+ pthread_cond_init(&MR_runqueue_cond, MR_COND_ATTR);
- free_context_list_lock = MR_GC_NEW(MercuryLock);
- pthread_mutex_init(free_context_list_lock, MR_MUTEX_ATTR);
+ pthread_mutex_init(&free_context_list_lock, MR_MUTEX_ATTR);
pthread_mutex_init(&MR_global_lock, MR_MUTEX_ATTR);
- MR_pending_contexts_lock = MR_GC_NEW(MercuryLock);
- pthread_mutex_init(MR_pending_contexts_lock, MR_MUTEX_ATTR);
+ pthread_mutex_init(&MR_pending_contexts_lock, MR_MUTEX_ATTR);
MR_KEY_CREATE(&MR_engine_base_key, NULL);
@@ -78,9 +74,9 @@
MR_finalize_runqueue(void)
{
#ifdef MR_THREAD_SAFE
- pthread_mutex_destroy(MR_runqueue_lock);
- pthread_cond_destroy(MR_runqueue_cond);
- pthread_mutex_destroy(free_context_list_lock);
+ pthread_mutex_destroy(&MR_runqueue_lock);
+ pthread_cond_destroy(&MR_runqueue_cond);
+ pthread_mutex_destroy(&free_context_list_lock);
#endif
}
@@ -170,9 +166,9 @@
{
MR_Context *c;
- MR_LOCK(free_context_list_lock, "create_context");
+ MR_LOCK(&free_context_list_lock, "create_context");
if (free_context_list == NULL) {
- MR_UNLOCK(free_context_list_lock, "create_context i");
+ MR_UNLOCK(&free_context_list_lock, "create_context i");
c = MR_GC_NEW(MR_Context);
#ifndef MR_HIGHLEVEL_CODE
c->MR_ctxt_detstack_zone = NULL;
@@ -184,7 +180,7 @@
} else {
c = free_context_list;
free_context_list = c->MR_ctxt_next;
- MR_UNLOCK(free_context_list_lock, "create_context ii");
+ MR_UNLOCK(&free_context_list_lock, "create_context ii");
}
MR_init_context(c);
@@ -195,10 +191,10 @@
void
MR_destroy_context(MR_Context *c)
{
- MR_LOCK(free_context_list_lock, "destroy_context");
+ MR_LOCK(&free_context_list_lock, "destroy_context");
c->MR_ctxt_next = free_context_list;
free_context_list = c;
- MR_UNLOCK(free_context_list_lock, "destroy_context");
+ MR_UNLOCK(&free_context_list_lock, "destroy_context");
}
void
@@ -295,7 +291,7 @@
MR_schedule(MR_Context *ctxt)
{
ctxt->MR_ctxt_next = NULL;
- MR_LOCK(MR_runqueue_lock, "schedule");
+ MR_LOCK(&MR_runqueue_lock, "schedule");
if (MR_runqueue_tail) {
MR_runqueue_tail->MR_ctxt_next = ctxt;
MR_runqueue_tail = ctxt;
@@ -303,8 +299,8 @@
MR_runqueue_head = ctxt;
MR_runqueue_tail = ctxt;
}
- MR_SIGNAL(MR_runqueue_cond);
- MR_UNLOCK(MR_runqueue_lock, "schedule");
+ MR_SIGNAL(&MR_runqueue_cond);
+ MR_UNLOCK(&MR_runqueue_lock, "schedule");
}
#ifndef MR_HIGHLEVEL_CODE
@@ -325,11 +321,11 @@
depth = MR_ENGINE(MR_eng_c_depth);
thd = MR_ENGINE(MR_eng_owner_thread);
- MR_LOCK(MR_runqueue_lock, "MR_do_runnext (i)");
+ MR_LOCK(&MR_runqueue_lock, "MR_do_runnext (i)");
while (1) {
if (MR_exit_now == MR_TRUE) {
- MR_UNLOCK(MR_runqueue_lock, "MR_do_runnext (ii)");
+ MR_UNLOCK(&MR_runqueue_lock, "MR_do_runnext (ii)");
MR_destroy_thread(MR_cur_engine());
}
tmp = MR_runqueue_head;
@@ -348,7 +344,7 @@
if (tmp != NULL) {
break;
}
- MR_WAIT(MR_runqueue_cond, MR_runqueue_lock);
+ MR_WAIT(&MR_runqueue_cond, &MR_runqueue_lock);
}
MR_ENGINE(MR_eng_this_context) = tmp;
if (prev != NULL) {
@@ -359,7 +355,7 @@
if (MR_runqueue_tail == tmp) {
MR_runqueue_tail = prev;
}
- MR_UNLOCK(MR_runqueue_lock, "MR_do_runnext (iii)");
+ MR_UNLOCK(&MR_runqueue_lock, "MR_do_runnext (iii)");
MR_load_context(MR_ENGINE(MR_eng_this_context));
MR_GOTO(MR_ENGINE(MR_eng_this_context)->MR_ctxt_resume);
}
Index: runtime/mercury_context.h
===================================================================
RCS file: /home/mercury1/repository/mercury/runtime/mercury_context.h,v
retrieving revision 1.20
diff -u -d -r1.20 mercury_context.h
--- runtime/mercury_context.h 13 Feb 2002 09:56:38 -0000 1.20
+++ runtime/mercury_context.h 4 Jun 2002 08:48:27 -0000
@@ -142,8 +142,8 @@
extern MR_Context *MR_runqueue_head;
extern MR_Context *MR_runqueue_tail;
#ifdef MR_THREAD_SAFE
- extern MercuryLock *MR_runqueue_lock;
- extern MercuryCond *MR_runqueue_cond;
+ extern MercuryLock MR_runqueue_lock;
+ extern MercuryCond MR_runqueue_cond;
#endif
/*
@@ -178,7 +178,7 @@
extern MR_PendingContext *MR_pending_contexts;
#ifdef MR_THREAD_SAFE
- extern MercuryLock *MR_pending_contexts_lock;
+ extern MercuryLock MR_pending_contexts_lock;
#endif
/*
--
Fergus Henderson <fjh at cs.mu.oz.au> | "I have always known that the pursuit
The University of Melbourne | of excellence is a lethal habit"
WWW: <http://www.cs.mu.oz.au/~fjh> | -- the last words of T. S. Garp.
--------------------------------------------------------------------------
mercury-reviews mailing list
post: mercury-reviews at cs.mu.oz.au
administrative address: owner-mercury-reviews at cs.mu.oz.au
unsubscribe: Address: mercury-reviews-request at cs.mu.oz.au Message: unsubscribe
subscribe: Address: mercury-reviews-request at cs.mu.oz.au Message: subscribe
--------------------------------------------------------------------------
More information about the reviews
mailing list