Created
July 2, 2013 18:26
-
-
Save Pacifist117/5911781 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/*************************************************************************** | |
* Copyright (C) 2009-2012 Virginia Tech Real-Time Systems Lab * | |
* * | |
* Original version written by Matthew Dellinger * | |
* [email protected] * | |
* * | |
* Re-written by Aaron Lindsay * | |
* [email protected] * | |
* * | |
* This program is free software; you can redistribute it and/or modify * | |
* it under the terms of the GNU General Public License as published by * | |
* the Free Software Foundation; either version 2 of the License, or * | |
* (at your option) any later version. * | |
* * | |
* This program is distributed in the hope that it will be useful, * | |
* but WITHOUT ANY WARRANTY; without even the implied warranty of * | |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * | |
* GNU General Public License for more details. * | |
* * | |
* You should have received a copy of the GNU General Public License * | |
* along with this program; if not, write to the * | |
* Free Software Foundation, Inc., * | |
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * | |
***************************************************************************/ | |
#include "task.h" | |
#include <librttest.h> | |
#include <unistd.h> | |
/* | |
* Update the locked and unlocked execution times for a task based on | |
* the total execution time for the task, and the percentage of that | |
* execution time we are supposed to hold the locks for (cs_length). | |
* Note: We also must modify the execution time by 'usage' percentage if | |
* we're not using the entire execution time. | |
*/ | |
void set_lock_usage(struct task *t, struct test *tester) | |
{ | |
double usage = ((double)tester->options->cpu_usage) / 100; | |
double cs = ((double)tester->options->cs_length) / 100; | |
t->unlocked_usage = t->exec_time * (usage - (usage * cs)); | |
t->locked_usage = t->exec_time * usage * cs; | |
} | |
/* | |
* Calculate the number of releases this task will have in the runtime specified. | |
*/ | |
void calculate_releases(struct task *t, struct test *tester) | |
{ | |
//runtime is set to the running time of the overall test in microseconds (options->run_time is in seconds) | |
unsigned int runtime = tester->options->run_time * MILLION; | |
/* | |
* The number of releases this task will have is the floor of the total runtime | |
* divided by this task's period. I.e. the number of tasks which we gather | |
* statistics for is the highest number of tasks which will fit into the | |
* runtime without going over it. | |
*/ | |
t->max_releases = runtime / t->period; | |
/* | |
* Keep track of when this division has a remainder. If it does, we set | |
* extra_release so we know we have to run this task one more time to ensure | |
* other tasks have the proper amount of utility when they are finishing their | |
* runs. | |
*/ | |
t->extra_release = ((runtime % t->period) != 0); | |
} | |
/* | |
* Get the abort pointer for the current thread - what we can check periodically | |
* to see if we have been aborted by the scheduler. | |
*/ | |
static void setup_aborts(struct task *t) | |
{ | |
#if KERN_CHRONOS | |
//printf("Kevin: chronos case\n"); | |
t->abort_pointer = get_abort_ptr(&t->tester->abort_data); | |
#else | |
//printf("Kevin: none chronos case\n"); | |
t->abort_pointer = (char *) malloc(32768); | |
int i = 0; | |
for (i=0;i<32768;i++) { | |
t->abort_pointer[i] = 0; | |
} | |
#endif | |
if (!t->abort_pointer) | |
fatal_error("Failed to initialize abort pointer for task."); | |
} | |
/* | |
* Find the next deadline for this task (based on the start time for all tasks, | |
* this task's period, and the job number of this task) and store the result in | |
* the supplied timespec struct. | |
*/ | |
static void find_job_deadline(struct task *t, struct timespec *deadline) | |
{ | |
unsigned long long nsec, carry; | |
unsigned long offset = (t->num_releases + 1) * t->period; //the offset from the start time this deadline is | |
struct timespec *start_time = t->tester->global_start_time; //the global start time for all tasks | |
nsec = start_time->tv_nsec + (unsigned long long)(offset * THOUSAND); | |
carry = nsec / BILLION; | |
deadline->tv_nsec = nsec % BILLION; | |
deadline->tv_sec = start_time->tv_sec + carry; | |
} | |
/* | |
* Initialize the HUA abort handler for this task, if applicable. | |
*/ | |
static void setup_hua_abort_handler(struct task *t) | |
{ | |
if (t->tester->options->enable_hua && t->hua_utility > 0) { | |
add_abort_handler_selfnodeadline(t->hua_utility, | |
t->tester->lock_time * | |
t->tester->num_locks); | |
} | |
} | |
/* | |
* Execute the workload, lock/unlock locks, update runtime statistics, etc. | |
* This function handles everything that needs to be done for a single instance | |
* of a task (a job). count_stats is true if we should accrue statistics from | |
* this run, and false if we are running this workload only to provide the | |
* proper amount of concurrent utility while the other runs finish. | |
*/ | |
static void task_instance_librt(struct task *t, int count_stats, period_param* my_period) | |
{ | |
struct timespec deadline, end_time; | |
long long tardiness; | |
int aborted = 0; //orred with the return value of the workload_do_work function calls | |
FILE * fp; | |
long pid,tid; | |
char * cmd; | |
char * fn_sched_from; | |
char * fn_sched_to; | |
int fp_sched_from; | |
int fp_sched_to; | |
char buf[2400]; | |
find_job_deadline(t, &deadline); //find the deadline for this taskset | |
setup_hua_abort_handler(t); | |
//begin_rtseg_self(TASK_RUN_PRIO, t->utility, &deadline, &t->period_ts, t->unlocked_usage + t->locked_usage); //enter real-time segment | |
rt_job_begin(TASK_RUN_PRIO, t->utility, &deadline, &t->period_ts, t->unlocked_usage + t->locked_usage, my_period, 0); //enter real-time segment | |
//long rt_job_begin (int prio, int max_util, struct timespec* deadline, struct timespec* period, unsigned long exec_time, period_param* periodic_timer, int isPeriodic) | |
//pid = getpid(); | |
//tid = gettid(); | |
tid = syscall(SYS_gettid); | |
pid = syscall(SYS_getpid); | |
if (t->tester->options->locking & NESTED_LOCKING) { //do nested locking, if applicable | |
int last, lock_num; | |
for (lock_num = 0; lock_num < t->num_my_locks; lock_num++) { | |
//if (chronos_mutex_lock(t->my_locks[lock_num]) == -1) | |
if (mutex_lock(t->my_locks[lock_num]) == -1) | |
break; | |
else | |
last = lock_num; | |
} | |
aborted |= workload_do_work(t, t->locked_usage); | |
for (lock_num = last; lock_num >= 0; lock_num--) { | |
//chronos_mutex_unlock(t->my_locks[lock_num]); | |
mutex_unlock(t->my_locks[lock_num]); | |
} | |
} else if (t->tester->options->locking & LOCKING) { //do non-nested locking, if applicable | |
int lock_num; | |
for (lock_num = 0; lock_num < t->num_my_locks; lock_num++) { | |
//if (chronos_mutex_lock(t->my_locks[lock_num]) == -1) | |
if (mutex_lock(t->my_locks[lock_num]) == -1) | |
break; | |
aborted |= workload_do_work(t, t->locked_usage); | |
//chronos_mutex_unlock(t->my_locks[lock_num]); | |
mutex_unlock(t->my_locks[lock_num]); | |
} | |
} | |
aborted |= workload_do_work(t, t->unlocked_usage); //do unlocked workload time | |
clock_gettime(CLOCK_REALTIME, &end_time); //get the endtime | |
//end_rtseg_self(TASK_CLEANUP_PRIO); //end the real-time segment | |
rt_job_end(TASK_CLEANUP_PRIO); | |
fp = fopen("../logs/pids","a+"); | |
//printf("%ld %ld %ld %ld\n",pid,tid,getpid(),gettid()); | |
fprintf(fp,"%ld\n",tid); | |
fclose(fp); | |
//cmd = malloc(sizeof("cp /proc/%ld/sched /home/kevinpb/RTL/tests/librttest_version/sched_test_app_2.6/logs/proc/%ld") + 1); | |
//sprintf(cmd,"cp /proc/%ld/sched /home/kevinpb/RTL/tests/librttest_version/sched_test_app_2.6/logs/proc/%ld",tid,tid); | |
fn_sched_from = (char *)malloc(sizeof "/proc/%ld/sched"); | |
fn_sched_to = (char *)malloc(sizeof "/home/kevinpb/RTL/tests/librttest_version/" | |
"sched_test_app_2.6/logs/proc/%ld"); | |
sprintf(fn_sched_from,"/proc/%ld/sched",tid); | |
sprintf(fn_sched_to,"/home/kevinpb/RTL/tests/librttest_version/" | |
"sched_test_app_2.6/logs/proc/%ld",tid); | |
fp_sched_from = open(fn_sched_from,O_RDONLY); | |
fp_sched_to = open(fn_sched_to,O_WRONLY | O_CREAT | O_EXCL, 0666); | |
ssize_t nread; | |
while (nread = read(fp_sched_from, buf, sizeof buf), nread > 0) | |
{ | |
char *out_ptr = buf; | |
ssize_t nwritten; | |
do { | |
nwritten = write(fp_sched_to, out_ptr, nread); | |
if (nwritten >= 0) | |
{ | |
nread -= nwritten; | |
out_ptr += nwritten; | |
} | |
} while (nread > 0); | |
} | |
//system(cmd); | |
//free(cmd); | |
close(fp_sched_from); | |
close(fp_sched_to); | |
free(fn_sched_from); | |
free(fn_sched_to); | |
/* | |
* Is this run for the sole purpose of making sure the other 'real' runs have | |
* the appropriate amount of concurrent utility? If it is, return and don't | |
* count the statistics for this run. | |
*/ | |
if (!count_stats) | |
return; | |
tardiness = timespec_subtract_us(&deadline, &end_time); //calculate tardiness from deadline and endtime | |
//tabulate statistics about this run | |
if (aborted) { //have we been aborted? | |
t->num_aborted++; | |
} else if (tardiness >= 0) { //otherwise, did we meet our deadline? | |
t->deadlines_met++; //increment deadlines_met, if we met ours | |
t->utility_accrued += t->utility; //add to utility_accrued however much we accrued | |
//sleep for however much time remains before the next release of this task | |
if (tardiness > 0) { | |
#if KERN_LITMUS | |
struct timespec ts; | |
ts.tv_nsec = tardiness*1000; | |
ts.tv_sec = tardiness/1000000; | |
nanosleep(&ts,NULL); | |
#else | |
usleep(tardiness); | |
#endif | |
} | |
} else { //if we got here, we blew our deadline? | |
//figure out if our tardiness was worse than anyone else's so far | |
if (tardiness < t->max_tardiness) //this is reverse from what it ought to be | |
t->max_tardiness = tardiness; | |
} | |
//TODO maybe keep a figure on average tardiness? (would this be useful?) | |
} | |
static void task_instance(struct task *t, int count_stats) | |
{ | |
struct timespec deadline, end_time; | |
long long tardiness; | |
int aborted = 0; //orred with the return value of the workload_do_work function calls | |
find_job_deadline(t, &deadline); //find the deadline for this taskset | |
setup_hua_abort_handler(t); | |
begin_rtseg_self(TASK_RUN_PRIO, t->utility, &deadline, &t->period_ts, t->unlocked_usage + t->locked_usage); //enter real-time segment | |
if (t->tester->options->locking & NESTED_LOCKING) { //do nested locking, if applicable | |
int last, lock_num; | |
for (lock_num = 0; lock_num < t->num_my_locks; lock_num++) { | |
if (mutex_lock(t->my_locks[lock_num]) == -1) | |
break; | |
else | |
last = lock_num; | |
} | |
aborted |= workload_do_work(t, t->locked_usage); | |
for (lock_num = last; lock_num >= 0; lock_num--) { | |
mutex_unlock(t->my_locks[lock_num]); | |
} | |
} else if (t->tester->options->locking & LOCKING) { //do non-nested locking, if applicable | |
int lock_num; | |
for (lock_num = 0; lock_num < t->num_my_locks; lock_num++) { | |
if (mutex_lock(t->my_locks[lock_num]) == -1) | |
break; | |
aborted |= workload_do_work(t, t->locked_usage); | |
mutex_unlock(t->my_locks[lock_num]); | |
} | |
} | |
aborted |= workload_do_work(t, t->unlocked_usage); //do unlocked workload time | |
clock_gettime(CLOCK_REALTIME, &end_time); //get the endtime | |
end_rtseg_self(TASK_CLEANUP_PRIO); //end the real-time segment | |
/* | |
* Is this run for the sole purpose of making sure the other 'real' runs have | |
* the appropriate amount of concurrent utility? If it is, return and don't | |
* count the statistics for this run. | |
*/ | |
if (!count_stats) | |
return; | |
tardiness = timespec_subtract_us(&deadline, &end_time); //calculate tardiness from deadline and endtime | |
//tabulate statistics about this run | |
if (aborted) { //have we been aborted? | |
t->num_aborted++; | |
} else if (tardiness >= 0) { //otherwise, did we meet our deadline? | |
t->deadlines_met++; //increment deadlines_met, if we met ours | |
t->utility_accrued += t->utility; //add to utility_accrued however much we accrued | |
//sleep for however much time remains before the next release of this task | |
if (tardiness > 0) { | |
#if KERN_LITMUS | |
struct timespec ts; | |
ts.tv_nsec = tardiness*1000; | |
ts.tv_sec = tardiness/1000000; | |
nanosleep(&ts,NULL); | |
#else | |
usleep(tardiness); | |
#endif | |
} | |
} else { //if we got here, we blew our deadline? | |
//figure out if our tardiness was worse than anyone else's so far | |
if (tardiness < t->max_tardiness) //this is reverse from what it ought to be | |
t->max_tardiness = tardiness; | |
} | |
//TODO maybe keep a figure on average tardiness? (would this be useful?) | |
} | |
/* | |
* The function called from pthread_create to start a task. This function is | |
* ultimately responsible for entering and exiting real-time segments, calling | |
* the appropriate workload for the correct amount of time, and leaving the | |
* segment, then recording scheduling statistics. | |
*/ | |
void *start_task(void *arg) | |
{ | |
struct sched_param param; | |
struct task *t = (struct task *)arg; | |
//do some initialization before we actually signal we're ready to start our real-time task | |
t->thread_id = gettid(); //get our thread's tid | |
//kevin | |
setup_aborts(t); //grab our pointer which we can query to make sure we're not aborted yet | |
//set the affinity of this thread to whatever was specified in the taskset file | |
if(sched_setaffinity(0, sizeof(t->cpu_mask), (cpu_set_t *) &t->cpu_mask)) | |
fatal_error("Failed to set processor affinity of a task."); | |
workload_init_task(t); //initialize any local data the workload needs | |
//increase priority to TASK_START_PRIO | |
param.sched_priority = TASK_START_PRIO; | |
pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); | |
pthread_barrier_wait(&t->tester->barrier); //wait on the barrier for all threads to arrive | |
//get the start time and CAS on the start-time so all the threads agree on when they got here | |
clock_gettime(CLOCK_REALTIME, &t->local_start_time); | |
//Note: the next line is a gcc-specific CAS extension. This is NOT portable to a different compiler. (FIXME?) | |
__sync_bool_compare_and_swap(&t->tester->global_start_time, 0, | |
&t->local_start_time); | |
/* for each release of this task, call task_instance to do the heavy-lifting | |
* (actually execute the workload, lock/unlock locks, update runtime | |
* statistics, etc.) | |
*/ | |
for (t->num_releases = 0; t->num_releases < t->max_releases; | |
t->num_releases++) | |
task_instance(t, 1 /*count the statistics for this run */ ); | |
/* | |
* Run the task one period after it technically hit its last one, but don't | |
* measure the statistics. This makes sure all other tasks get the full load | |
* for all of their periods. | |
*/ | |
if (t->extra_release) | |
task_instance(t, 0 /*DON'T count the statistics */ ); | |
pthread_exit(NULL); | |
} | |
/********************************************************** | |
// librt versions | |
**********************************************************/ | |
void *start_task_librt(void *arg) | |
{ | |
struct sched_param param; | |
struct task *t = (struct task *)arg; | |
//do some initialization before we actually signal we're ready to start our real-time task | |
t->thread_id = gettid(); //get our thread's tid | |
//kevin | |
setup_aborts(t); //grab our pointer which we can query to make sure we're not aborted yet | |
//set the affinity of this thread to whatever was specified in the taskset file | |
if(sched_setaffinity(0, sizeof(t->cpu_mask), (cpu_set_t *) &t->cpu_mask)) | |
fatal_error("Failed to set processor affinity of a task."); | |
workload_init_task(t); //initialize any local data the workload needs | |
//increase priority to TASK_START_PRIO | |
param.sched_priority = TASK_START_PRIO; | |
pthread_setschedparam(pthread_self(), SCHED_FIFO, ¶m); | |
pthread_barrier_wait(&t->tester->barrier); //wait on the barrier for all threads to arrive | |
//get the start time and CAS on the start-time so all the threads agree on when they got here | |
clock_gettime(CLOCK_REALTIME, &t->local_start_time); | |
//Note: the next line is a gcc-specific CAS extension. This is NOT portable to a different compiler. (FIXME?) | |
__sync_bool_compare_and_swap(&t->tester->global_start_time, 0, | |
&t->local_start_time); | |
/* for each release of this task, call task_instance to do the heavy-lifting | |
* (actually execute the workload, lock/unlock locks, update runtime | |
* statistics, etc.) | |
*/ | |
period_param my_period; | |
cpu_set_t cpu; | |
CPU_ZERO(&cpu); | |
CPU_SET(t->cpu_mask,&cpu); //set in task_set? | |
//int rt_task_begin (RTIME exec_cost, RTIME period, period_param* my_period, int isPeriodic, int thread_id, cpu_set_t cpus, task t) { | |
rt_task_begin (t->unlocked_usage + t->locked_usage , t->period_ts.tv_nsec, &my_period, 0, t->thread_id, cpu,&t->thisTask); | |
for (t->num_releases = 0; t->num_releases < t->max_releases; | |
t->num_releases++) | |
task_instance_librt(t, 1 /*count the statistics for this run */ ,&my_period); | |
/* | |
* Run the task one period after it technically hit its last one, but don't | |
* measure the statistics. This makes sure all other tasks get the full load | |
* for all of their periods. | |
*/ | |
if (t->extra_release) | |
task_instance_librt(t,0 /*DON'T count the statistics */,&my_period ); | |
rt_task_end(&my_period, 0); | |
workload_cleanup_task(t); //clean up any local data for the workload | |
//printf("Kevin [%s]: end of start_task_librt\n",__FUNCTION__); | |
return NULL; | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment