[m-rev.] diff: runtime style fixes
Zoltan Somogyi
zs at csse.unimelb.edu.au
Mon Nov 1 16:00:48 AEDT 2010
runtime/mercury_context.c:
runtime/mercury_wrapper.c:
Minor style fixes.
Zoltan.
cvs diff: Diffing .
Index: mercury_context.c
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/runtime/mercury_context.c,v
retrieving revision 1.82
diff -u -b -r1.82 mercury_context.c
--- mercury_context.c 7 Oct 2010 23:38:44 -0000 1.82
+++ mercury_context.c 1 Nov 2010 04:56:16 -0000
@@ -85,7 +85,7 @@
#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
MR_bool MR_profile_parallel_execution = MR_FALSE;
-#ifndef MR_HIGHLEVEL_CODE
+ #ifndef MR_HIGHLEVEL_CODE
static MR_Stats MR_profile_parallel_executed_global_sparks =
{ 0, 0, 0, 0 };
static MR_Stats MR_profile_parallel_executed_contexts = { 0, 0, 0, 0 };
@@ -97,13 +97,13 @@
/*
** We don't access these atomically. They are protected by the free context
-** list lock
+** list lock.
*/
static MR_Integer MR_profile_parallel_small_context_reused = 0;
static MR_Integer MR_profile_parallel_regular_context_reused = 0;
static MR_Integer MR_profile_parallel_small_context_kept = 0;
static MR_Integer MR_profile_parallel_regular_context_kept = 0;
-#endif /* ! MR_HIGHLEVEL_CODE */
+ #endif /* ! MR_HIGHLEVEL_CODE */
#endif /* MR_PROFILE_PARALLEL_EXECUTION_SUPPORT */
/*
@@ -118,7 +118,8 @@
#endif
#endif
-#if defined(MR_LL_PARALLEL_CONJ) && defined(MR_PROFILE_PARALLEL_EXECUTION_SUPPORT)
+#if defined(MR_LL_PARALLEL_CONJ) && \
+ defined(MR_PROFILE_PARALLEL_EXECUTION_SUPPORT)
/*
** This is used to give each context its own unique ID. It is accessed with
** atomic operations.
@@ -216,13 +217,12 @@
#endif
/*
- * Configure MR_num_threads if unset to match number of processors on the
- * system, If we do this then we prepare to set processor affinities later
- * on.
+ ** If MR_num_threads is unset, configure it to match number of processors
+ ** on the system. If we do this, then we prepare to set processor
+ ** affinities later on.
*/
- if (MR_num_threads == 0)
- {
-#if defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
+ if (MR_num_threads == 0) {
+ #if defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN)
long result;
result = sysconf(_SC_NPROCESSORS_ONLN);
@@ -236,28 +236,30 @@
** automatically enable thread pinning. This prevents a runtime
** warning that could unnecessarily confuse the user.
**/
-#if defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
+ #if defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
/*
** Comment this back in to enable thread pinning by default if we
** autodetected the correct number of CPUs.
*/
/* MR_thread_pinning = MR_TRUE; */
-#endif
+ #endif
}
-#else /* ! defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN) */
+ #else /* ! defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN) */
MR_num_threads = 1;
-#endif /* ! defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN) */
+ #endif /* ! defined(MR_HAVE_SYSCONF) && defined(_SC_NPROCESSORS_ONLN) */
}
-#ifdef MR_LL_PARALLEL_CONJ
- MR_granularity_wsdeque_length = MR_granularity_wsdeque_length_factor * MR_num_threads;
-#endif
+ #ifdef MR_LL_PARALLEL_CONJ
+ MR_granularity_wsdeque_length =
+ MR_granularity_wsdeque_length_factor * MR_num_threads;
+ #endif
#endif /* MR_THREAD_SAFE */
}
/*
-** Pin the primordial thread first to the CPU it is currently using. (where
+** Pin the primordial thread first to the CPU it is currently using (where
** support is available).
*/
+
void
MR_pin_primordial_thread(void)
{
@@ -282,8 +284,7 @@
#else
MR_pin_thread();
#endif
-#endif /* defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
- */
+#endif
}
void
@@ -301,8 +302,7 @@
MR_next_cpu++;
}
MR_UNLOCK(&MR_next_cpu_lock, "MR_pin_thread");
-#endif /* defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
- */
+#endif
}
#if defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
@@ -317,7 +317,7 @@
if (sched_setaffinity(0, sizeof(cpu_set_t), &cpus) == -1) {
perror("Warning: Couldn't set CPU affinity: ");
/*
- ** If this failed once it will probably fail again so we
+ ** If this failed once, it will probably fail again, so we
** disable it.
*/
MR_thread_pinning = MR_FALSE;
@@ -328,8 +328,7 @@
MR_thread_pinning = MR_FALSE;
}
}
-#endif /* defined(MR_LL_PARALLEL_CONJ) && defined(MR_HAVE_SCHED_SETAFFINITY)
- */
+#endif
void
MR_finalize_thread_stuff(void)
@@ -364,6 +363,7 @@
** Therefore a text file is used since it has the advantage of being human
** readable.
*/
+
static void
MR_write_out_profiling_parallel_execution(void)
{
@@ -377,7 +377,8 @@
if (result < 0) goto Error;
if (MR_cpu_cycles_per_sec) {
- result = fprintf(file, "CPU cycles per second: %ld\n", MR_cpu_cycles_per_sec);
+ result = fprintf(file, "CPU cycles per second: %ld\n",
+ MR_cpu_cycles_per_sec);
if (result < 0) goto Error;
}
@@ -420,11 +421,11 @@
MR_profile_parallel_regular_context_kept);
if (result < 0) goto Error;
- if (0 != fclose(file)) goto Error;
+ if (fclose(file) != 0) goto Error;
return;
- Error:
+Error:
perror(MR_PROFILE_PARALLEL_EXECUTION_FILENAME);
abort();
}
@@ -442,7 +443,8 @@
MR_INTEGER_LENGTH_MODIFIER "ur, %" MR_INTEGER_LENGTH_MODIFIER "unr)\n")
static int
-fprint_stats(FILE *stream, const char *message, MR_Stats *stats) {
+fprint_stats(FILE *stream, const char *message, MR_Stats *stats)
+{
MR_Unsigned count;
double average;
double sum_squared_over_n;
@@ -1124,7 +1126,7 @@
MR_BEGIN_CODE
MR_define_entry(MR_do_runnext);
-#ifdef MR_THREAD_SAFE
+ #ifdef MR_THREAD_SAFE
{
MR_Context *ready_context;
MR_Code *resume_point;
@@ -1133,9 +1135,9 @@
MercuryThread thd;
struct timespec timeout;
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
MR_Timer runnext_timer;
-#endif
+ #endif
/*
** If this engine is holding onto a context, the context should not be
** in the middle of running some code.
@@ -1152,19 +1154,19 @@
MR_atomic_inc_int(&MR_num_idle_engines);
-#ifdef MR_THREADSCOPE
+ #ifdef MR_THREADSCOPE
MR_threadscope_post_looking_for_global_work();
-#endif
+ #endif
MR_LOCK(&MR_runqueue_lock, "MR_do_runnext (i)");
while (1) {
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
if (MR_profile_parallel_execution) {
MR_profiling_start_timer(&runnext_timer);
}
-#endif
+ #endif
if (MR_exit_now) {
/*
@@ -1195,12 +1197,12 @@
}
/* Nothing to do, go back to sleep. */
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
if (MR_profile_parallel_execution) {
MR_profiling_stop_timer(&runnext_timer,
&MR_profile_parallel_executed_nothing);
}
-#endif
+ #endif
MR_milliseconds_from_now(&timeout, MR_worksteal_sleep_msecs);
MR_TIMED_WAIT(&MR_runqueue_cond, &MR_runqueue_lock, &timeout,
@@ -1209,39 +1211,39 @@
/* unreachable */
abort();
- ReadyContext:
+ReadyContext:
/* Discard whatever unused context we may have and switch to tmp. */
if (MR_ENGINE(MR_eng_this_context) != NULL) {
-#ifdef MR_DEBUG_STACK_SEGMENTS
+ #ifdef MR_DEBUG_STACK_SEGMENTS
MR_debug_log_message("destroying old context %p",
MR_ENGINE(MR_eng_this_context));
-#endif
+ #endif
MR_destroy_context(MR_ENGINE(MR_eng_this_context));
}
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
if (MR_profile_parallel_execution) {
MR_profiling_stop_timer(&runnext_timer,
&MR_profile_parallel_executed_contexts);
}
-#endif
+ #endif
MR_ENGINE(MR_eng_this_context) = ready_context;
MR_load_context(ready_context);
-#ifdef MR_DEBUG_STACK_SEGMENTS
+ #ifdef MR_DEBUG_STACK_SEGMENTS
MR_debug_log_message("resuming old context: %p", ready_context);
-#endif
+ #endif
resume_point = (MR_Code*)(ready_context->MR_ctxt_resume);
ready_context->MR_ctxt_resume = NULL;
MR_GOTO(resume_point);
- ReadySpark:
+ReadySpark:
-#ifdef MR_DEBUG_STACK_SEGMENTS
+ #ifdef MR_DEBUG_STACK_SEGMENTS
MR_debug_log_message("stole spark: st: %p", spark.MR_spark_sync_term);
-#endif
+ #endif
-#if 0 /* This is a complicated optimisation that may not be worth-while */
+ #if 0 /* This is a complicated optimisation that may not be worth-while */
if (!spark.MR_spark_sync_term->MR_st_is_shared) {
spark.MR_spark_sync_term_is_shared = MR_TRUE;
/*
@@ -1265,36 +1267,37 @@
MR_sched_yield();
}
}
-#endif
+ #endif
/* Grab a new context if we haven't got one then begin execution. */
if (MR_ENGINE(MR_eng_this_context) == NULL) {
MR_ENGINE(MR_eng_this_context) = MR_create_context("from spark",
MR_CONTEXT_SIZE_SMALL, NULL);
-#ifdef MR_THREADSCOPE
- MR_threadscope_post_create_context_for_spark(MR_ENGINE(MR_eng_this_context));
-#endif
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_THREADSCOPE
+ MR_threadscope_post_create_context_for_spark(
+ MR_ENGINE(MR_eng_this_context));
+ #endif
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
if (MR_profile_parallel_execution) {
MR_atomic_inc_int(
&MR_profile_parallel_contexts_created_for_sparks);
}
-#endif
+ #endif
MR_load_context(MR_ENGINE(MR_eng_this_context));
-#ifdef MR_DEBUG_STACK_SEGMENTS
+ #ifdef MR_DEBUG_STACK_SEGMENTS
MR_debug_log_message("created new context for spark: %p",
MR_ENGINE(MR_eng_this_context));
-#endif
+ #endif
} else {
-#ifdef MR_THREADSCOPE
+ #ifdef MR_THREADSCOPE
/*
** Allocate a new context Id so that someone looking at the threadscope
** profile sees this as new work.
*/
MR_ENGINE(MR_eng_this_context)->MR_ctxt_num_id = allocate_context_id();
MR_threadscope_post_run_context();
-#endif
+ #endif
}
MR_parent_sp = spark.MR_spark_sync_term->MR_st_parent_sp;
MR_SET_THREAD_LOCAL_MUTABLES(spark.MR_spark_thread_local_mutables);
@@ -1303,15 +1306,15 @@
MR_assert(MR_parent_sp != MR_sp);
MR_assert(spark.MR_spark_sync_term->MR_st_count > 0);
-#ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
+ #ifdef MR_PROFILE_PARALLEL_EXECUTION_SUPPORT
if (MR_profile_parallel_execution) {
MR_profiling_stop_timer(&runnext_timer,
&MR_profile_parallel_executed_global_sparks);
}
-#endif
+ #endif
MR_GOTO(spark.MR_spark_resume);
}
-#else /* !MR_THREAD_SAFE */
+ #else /* !MR_THREAD_SAFE */
{
if (MR_runqueue_head == NULL && MR_pending_contexts == NULL) {
MR_fatal_error("empty runqueue!");
@@ -1330,7 +1333,7 @@
MR_load_context(MR_ENGINE(MR_eng_this_context));
MR_GOTO(MR_ENGINE(MR_eng_this_context)->MR_ctxt_resume);
}
-#endif /* !MR_THREAD_SAFE */
+ #endif /* !MR_THREAD_SAFE */
MR_END_MODULE
@@ -1344,14 +1347,14 @@
MR_Context *this_context = MR_ENGINE(MR_eng_this_context);
/*
- * Atomically decrement and fetch the number of conjuncts yet to complete.
- * If we're the last conjunct to complete (the parallel conjunction is
- * finished) then jnc_last will be true.
+ ** Atomically decrement and fetch the number of conjuncts yet to complete.
+ ** If we're the last conjunct to complete (the parallel conjunction is
+ ** finished) then jnc_last will be true.
*/
/*
- * XXX: We should take the current TSC time here and use it to post the
- * various 'context stopped' threadscope events. This profile will be more
- * accurate.
+ ** XXX: We should take the current TSC time here and use it to post the
+ ** various 'context stopped' threadscope events. This profile will be more
+ ** accurate.
*/
jnc_last = MR_atomic_dec_and_is_zero_uint(&(jnc_st->MR_st_count));
@@ -1364,9 +1367,9 @@
*/
return join_label;
} else {
-#ifdef MR_THREADSCOPE
+ #ifdef MR_THREADSCOPE
MR_threadscope_post_stop_context(MR_TS_STOP_REASON_FINISHED);
-#endif
+ #endif
/*
** This context didn't originate this parallel conjunction and
** we're the last branch to finish. The originating context should
@@ -1389,9 +1392,9 @@
MR_Code *spark_resume;
/*
- * The parallel conjunction it is not yet finished. Try to work on a
- * spark from our local stack. The sparks on our stack are likely to
- * cause this conjunction to be complete.
+ ** The parallel conjunction it is not yet finished. Try to work on a
+ ** spark from our local stack. The sparks on our stack are likely to
+ ** cause this conjunction to be complete.
*/
popped = MR_wsdeque_pop_bottom(&this_context->MR_ctxt_spark_deque,
&spark_resume);
@@ -1408,19 +1411,22 @@
** Otherwise we can reuse this context for the next piece of work.
*/
if (this_context == jnc_st->MR_st_orig_context) {
-#ifdef MR_THREADSCOPE
+ #ifdef MR_THREADSCOPE
MR_threadscope_post_stop_context(MR_TS_STOP_REASON_BLOCKED);
-#endif
+ #endif
MR_save_context(this_context);
- /* XXX: Make sure the context gets saved before we set the join
- * label, use a memory barrier.*/
+ /*
+ ** XXX: Make sure the context gets saved before we set
+ ** the join label, use a memory barrier.
+ */
this_context->MR_ctxt_resume = (join_label);
MR_ENGINE(MR_eng_this_context) = NULL;
} else {
-#ifdef MR_THREADSCOPE
+ #ifdef MR_THREADSCOPE
MR_threadscope_post_stop_context(MR_TS_STOP_REASON_FINISHED);
-#endif
+ #endif
}
+
return MR_ENTRY(MR_do_runnext);
}
}
Index: mercury_wrapper.c
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/runtime/mercury_wrapper.c,v
retrieving revision 1.210
diff -u -b -r1.210 mercury_wrapper.c
--- mercury_wrapper.c 7 Oct 2010 23:38:44 -0000 1.210
+++ mercury_wrapper.c 1 Nov 2010 04:56:01 -0000
@@ -303,9 +303,9 @@
/*
** This is initialized to zero. If it is still zero after configuration of the
-** runtime but before threads are started then the number of processors on the
-** system is detected and used if support is available. Otherwise we fall back
-** to 1
+** runtime but before threads are started, then we set it to the number of
+** processors on the system (if support is available to detect this).
+** Otherwise, we fall back to 1.
*/
MR_Unsigned MR_num_threads = 0;
@@ -674,8 +674,8 @@
}
#ifdef MR_THREADSCOPE
/*
- ** TSC Synchronization is not used, support is commented out. See
- ** runtime/mercury_threadscope.h for an explanation.
+ ** TSC Synchronization is not used, support is commented out.
+ ** See runtime/mercury_threadscope.h for an explanation.
**
for (i = 1; i < MR_num_threads; i++) {
MR_threadscope_sync_tsc_master();
cvs diff: Diffing GETOPT
cvs diff: Diffing machdeps
--------------------------------------------------------------------------
mercury-reviews mailing list
Post messages to: mercury-reviews at csse.unimelb.edu.au
Administrative Queries: owner-mercury-reviews at csse.unimelb.edu.au
Subscriptions: mercury-reviews-request at csse.unimelb.edu.au
--------------------------------------------------------------------------
More information about the reviews
mailing list