// Copyright (c) 2014 Carnegie Mellon University. // All Rights Reserved. // Redistribution and use in source and binary forms, // with or without modification, are permitted provided that the // following conditions are met: // 1. Redistributions of source code must retain the above // copyright notice, this list of conditions and the following // acknowledgments and disclaimers. // 2. Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // acknowledgments and disclaimers in the documentation and/or // other materials provided with the distribution. // 3. Products derived from this software may not include // “Carnegie Mellon University,” "SEI” and/or “Software // Engineering Institute" in the name of such derived product, // nor shall “Carnegie Mellon University,” "SEI” and/or // “Software Engineering Institute" be used to endorse or // promote products derived from this software without prior // written permission. For written permission, please contact // permission@sei.cmu.edu. // ACKNOWLEDGMENTS AND DISCLAIMERS: // This material is based upon work funded and supported by the // Department of Defense under Contract No. FA8721-05-C-0003 // with Carnegie Mellon University for the operation of the // Software Engineering Institute, a federally funded research // and development center. // Any opinions, findings and conclusions or recommendations // expressed in this material are those of the author(s) and do // not necessarily reflect the views of the United States // Department of Defense. // References herein to any specific commercial product, // process, or service by trade name, trade mark, manufacturer, // or otherwise, does not necessarily constitute or imply its // endorsement, recommendation, or favoring by Carnegie Mellon // University or its Software Engineering Institute. // NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE // ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN “AS-IS” // BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY // KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER // INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR // PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED // FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT // MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM // PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT. // This material has been approved for public release and // unlimited distribution. // Carnegie Mellon® is registered in the U.S. Patent and // Trademark Office by Carnegie Mellon University. // DM-0001283 // // What does this program do? // It performs schedulability analysis of constrained-deadline sporadic tasks with co-runner dependent // execution times? // // How do I install it? // Make sure that you have Linux 64 bit. Then install Gurobi. It is free for academic use. // Compile the program to link with the Gurobi library as follows: // gcc -m64 -g -o schedanalysiscorunner schedanalysiscorunner.c -I/home/bjorn/gurobi/gurobi701/linux64/include -O3 -lm -L/home/bjorn/gurobi/gurobi701/linux64/lib -lgurobi70 // Now, you have a file schedanalysiscorunner // // How do I actually run it? // Open a console. Then type: // schedanalysiscorunner -i taskset.txt -o taskset_results_from_analysis.txt // With this command, you tell the tool that it should read the file taskset.txt and perform schedulability analysis and write the output of the analysis to // the file taskset_results_from_analysis.txt. // // What is the format of the input file? // The input file is a text file with numbers and these numbers are not comma separated. // An example of an input file is as follows: // 1 // 2 // 3 // 1 // 3 // 2 // 508.827494 51.582220 // 1 // 1 // 0.508827 // 0.050000 // 1 // 1 3 1 0.727217 // 2 // 1 // 2 // 2873.485391 1414.746314 // 1 // 1 // 4.000429 // 0.050000 // 1 // 1 3 1 0.616589 // 3 // 2 // 1 // 231.911587 123.257014 // 1 // 1 // 4.074487 // 0.050000 // 2 // 1 1 1 0.965866 // 1 2 1 0.915646 // // The first number in the file is "1". This is the id of the method that is used. So far, this is the only method supported by this tool. So this // number should always be "1". // The second number in the file is "2". This is the number of processors. // The third number in the file is "3". This is the number of tasks. // Then follows blocks of parameters for each task. // The 4th number in the file is "1". It tells us that the block of parameters that we now describe is for the task with index 1. // The 5th number in the file is "3". It tells us that the the task we now describe has priority 3. (a high number means a high priority.) // The 6th number in the file is "2". It tells us that the task we now describe is assigned to processor 2. (processors have indices starting with 1.) // The 7th and 8th number in the file is "508.827494" and "51.582220" respectively. It is the T parameter and the D parameter. // The 9th number in the file is "1". It tells us that the task we now describe has 1 segment. // The 10th number in the file is "1". It tells us that the segment we now describe has id 1. // The 11th number in the file is "0.508827". It tells us the execution requirement this segment. // The 12th number in the file is "0.050000". It tells us the pd parameter of this segment. // The 13th number in the file is "1". It tells us the cardinality of the set CO of this segment. // The 14th and 15th and 16th and 17th number in the file is "1" and "3" and "1" and "0.727217" respectively. It tells us that for this segment, // there is a co-runner set of size 1 and this co-runner set has the members: segment 1 of task 3. // It also tells us that if this segment experiences this co-runner set at run-time then the segment has progress factor 0.727217. // The file continues with a block for task 2 and then a block for task 3. // // You can learn about the input file format by reading the function "void readtasksetfromfile(char* p)" in this source code file. // // How to use this tool in batch mode? // schedanalysiscorunner -g -r -s // // How to run many experiments // Use the meta mode -m // // How to generate a taskset from building blocks of measurements? // schedanalysiscorunner -p measurements.txt // // Why is there a "#define COMPILEMSVS 0"? // Because a previous version of this software was written so that it could also be compiled under Microsoft Visual Studio. The current version // of this software has only been tested for Linux 64 and hence it should have "#define COMPILEMSVS 0". If you want to compile for Microsoft Visual Studio // then set "#define COMPILEMSVS 1". And you probably have to spend some more effort to make it work --- not recommend. That has not been tested // If you really want to compile this with Microsoft Visual Studio and run it under Windows, here is my advice: // Read 40. How do I configure a new Gurobi C++ project with Microsoft Visual Studio 2010? // at http://www.gurobi.com/resources/faqs. It states: // It's easiest to modify one of the existing projects in the examples\build subdirectory. However, if you prefer to create a new project from scratch, here are the key steps: // // Under the File menu, select New > Project...; on the Installed Templates panel, select Other Languages > Visual C++ > Win32, then select Win32 Console Application. For illustration, let's call it gurobitest. Press the Finish button to close the Wizard. // To add an existing source file: // Under Source Files below the project name in the Solution Explorer panel, right click on the initial source file that Visual Studio adds to the project (gurobitest.cpp in our example). Select Remove, then press Delete. // Right-click on the project name in the Solution Explorer panel, then select Add > Existing Item..., then choose your C++ source file. // Right-click on the project name in the Solution Explorer panel, then select Properties. // Under C/C++ / Precompiled Headers / Precompiled Header, select Not Using Precompiled Headers. // For 32-bit Gurobi libraries: // Under C/C++ / General / Additional Include Directories, add: c:\gurobi550\win32\include // Under Linker / General / Additional Library Directories, add: c:\gurobi550\win32\lib // For 64-bit Gurobi libraries: // Under C/C++ / General / Additional Include Directories, add: c:\gurobi550\win64\include // Under Linker / General / Additional Library Directories, add: c:\gurobi550\win64\lib // Press the Configuration Manager... button. Under Active solution platform, select New. Set the new platform to x64, and press OK. // Under Linker / Input / Additional Dependencies, add gurobi55.lib and gurobi_c++mdd2010.lib // // What are the limitations of this tool? // Look at the paramters // #define MAXNTASKS 16 // 128 // this needs to be set to 128 when we run the experiments for the table to check scalability // #define MAXNPROCESSORS 8 // 12 // this needs to be set to 12 when we run the experiments for the table to check scalability // #define MAXNSEGMENTSPERTASK 2 // #define MAXNSEGMENTSINCORUNNERSETSPECIFIED ((MAXNTASKS-1)*MAXNSEGMENTSPERTASK) // #define MAXNCORUNNERSETSSPECIFIED 100000 // 1000000 // #define MAXNSEGMENTSETREFS 5000000 // #define MAXNSEGMENTSETS 500000 // 5000000 // #define BUCKETSIZE 5000 // #define MAPSIZE 50000 // 500000 // #define NSUBSETS 500000 // 5000000 // below in this source file. If you run large experiments, you may need to increases these values. // // How fast is this tool? // For a system with 8 processors and 10 tasks with each task having 2 segments, the schedulability analysis finishes within 40 seconds. // // When running experiments with -m (meta mode) what should I do: // 1. unplug the ethernet cable // 2. disconnect WiFi // 3. disable networking (in Ubuntu) // 4. disable getting time from Internet. In Ubuntu, click "System settings". Click "Time and date" Uncheck the box "Automatically from the Internet" // Advice when running -m // Disable the disk cache with // sudo hdparm -W0 /dev/sda1 // // Some status about running time. // When running schedanalysiscorunner -m // after 4.5 days, it has generated tasksets until meta314 (that is 122 directories of the type metaXXX). // Each of meta directory consumes 3.5 GB // So in total, it consumed 3.5*122 GB = 420 Gb // Note that in order to finish, it needs to run for 200/122=1.6 times longer. // That is, in order to finish taskset generation, we could expect it to take 8 days and it would consume 700 GB. // // Some history of this source code: // As mentioned above, this program can be executed in two different way: (i) meta mode // where it generates a large number of taskset (each written to a file) and then analyzes // each of them and (ii) normal mode where the tool analyzes a single taskset and the file // name of this single taskset is given by the command line. Initially, when the program // reads a taskset from file, the file is a text file that is not compressed. This worked fine // for normal mode and it also worked for meta mode when the size was small. But then I // changed the meta mode so that it generated a large number of files. It took more than // 2TB and this was more than the hard drive space that I had. Therefore, I rewrote the code // so that when it writes a taskset to file, it compresses the taskset and when it reads // a taskset from file, it assumes a compressed taskset and it decompresses it. // This worked fine for meta mode and this is what I used for the TECS paper. // Unfortunately, this had the side effect that now when using the tool in normal mode, // the taskset must be in a text file that is compressed. On August 6, 2018, I realized // that this makes the tool more difficult to use for software practitioners. // For a software practitioner, there are two ways to deal with this. One way is to // just accept the tools the way it is and then you have to compress your system.txt // file before giving it to this tool. Another way it so make a minor modification of // the tool so that you can use the tool in normal mode with uncompressed file. In order // to do that, change the function readtasksetfromfile. #define COMPILEMSVS 0 #if COMPILEMSVS #else #include "gurobi_c.h" #endif // We add these in order to do file I/O like in UNIX. And ceil(..) #include #include #include // we add this in order to call the function time( ..) #if COMPILEMSVS #include #endif #include #if COMPILEMSVS #include #endif #include #include // this is needed for the constant S_IWRITE #include // this is needed for strcmp #if COMPILEMSVS #include #else #include #endif #include #include #include #define MAX_ITERATOR_IN_BATCH 100 #define MAXNTASKS 16 #define MAXNPROCESSORS 8 #define MAXNSEGMENTSPERTASK 2 #define MAXNSEGMENTSINCORUNNERSETSPECIFIED ((MAXNTASKS-1)*MAXNSEGMENTSPERTASK) #define MAXNCORUNNERSETSSPECIFIED 115000 // if we use 100000 then it is not enough. 200000 is enough. 150000 is enough. 130000 seems to be enough #define MAXNSEGMENTSETREFS 500000 #define MAXNSEGMENTSETS 500000 #define BUCKETSIZE 100 // 5000 seems to be enough. 2000 seems to be enough. 500 seems to be enough #define MAPSIZE 50000 #define NSUBSETS 500000 // The values below are used when we run experiments to get the table that shows scalability /* #define MAX_ITERATOR_IN_BATCH 10 #define MAXNTASKS 128 #define MAXNPROCESSORS 12 #define MAXNSEGMENTSPERTASK 2 #define MAXNSEGMENTSINCORUNNERSETSPECIFIED ((MAXNTASKS-1)*MAXNSEGMENTSPERTASK) #define MAXNCORUNNERSETSSPECIFIED 1000000 #define MAXNSEGMENTSETREFS 5000000 #define MAXNSEGMENTSETS 5000000 #define BUCKETSIZE 5000 #define MAPSIZE 500000 #define NSUBSETS 5000000 */ int enablelogging = 0; int counterforILPsolver = 0; int generate_experiments = 0; int run_experiments = 0; int generate_statistics = 0; int metathing = 0; int producetasksetsfromparts = 0; char inputfilename[20000]="taskset.txt"; char outputfilename[20000]="taskset_results_from_analysis.txt"; char producetasksetsfrompartsfilename[20000]="measurements.txt"; struct segmentincorunnerset { int16_t taskindex; int16_t segmentindex; }; struct corunnersetspecifiedstruct { int nsegmentsincorunnerset; struct segmentincorunnerset segmentsincorunnerset[MAXNSEGMENTSINCORUNNERSETSPECIFIED+1]; // there is no element in index 0. double progresscorunnerset; }; struct segmentset_ref { int index_of_segmentset; double pw; }; struct segmentset { int16_t taskindices[MAXNPROCESSORS]; int16_t segmentindices[MAXNPROCESSORS]; }; struct segment { int segid; double C; double pd; int ncorunnersetsspecified; struct corunnersetspecifiedstruct* corunnersetsspecified; // there is no element in index 0. int partid; // this is only used in the mode "generate from parts" int nsegmentsets; // this is used in the new version of analysis (which consumes less memory) struct segmentset_ref* segmentset_refs; int pos; // this is used to speed up hashing }; struct task { int id; int priority; int proc; double T; double D; int nsegments; struct segment segments[MAXNSEGMENTSPERTASK+1]; // there is no element in index 0. int valid; // this is computed by the MILP double RUB; // this is computed by the MILP int valid_baselinemethod; // this is computed by the tbaseline response time calculation double RUB_baselinemethod; // this is computed by the baseline response time calculation }; int useanalysismethod; int nprocessors; int ntasks; struct task tasks[MAXNTASKS+1]; // there is no element in index 0. int number_of_segments_on_processor[MAXNPROCESSORS]; int nsegmentsets; struct segmentset* segmentsets; struct mapitem { int nelements_in_bucket; int16_t* taskindices; int16_t* segmentindices; int32_t* corindices; }; struct mapitem* mapitems; int16_t template_localprocessor_taskindices[MAXNPROCESSORS]; int16_t template_localprocessor_segmentindices[MAXNPROCESSORS]; double timerequiredforschedulabilityanalysis_newmethod; double timerequiredforschedulabilityanalysis_baselinemethod; int nsg = 2; // this means "number of segments per task" and it is used in the meta mode. struct measurement { int tid; int cor1; int cor2; int cor3; double max_exec; double min_exec; double avg_exec; }; int nmeasurements; struct measurement measurements[100000]; GRBenv *env = NULL; GRBmodel *model = NULL; int error = 0; double* sol; int nelements_of_constr; int* ind; double* val; int nelements_of_obj; double* obj; char* vtype; int optimstatus; double objval; void dothememoryallocation_for_Gurobi_stuff() { sol = (double*) malloc( NSUBSETS * sizeof(double)); if (sol==NULL) { fprintf(stderr,"malloc failure in dothememoryallocation: sol\n"); exit(-1); } ind = (int*) malloc( NSUBSETS * sizeof(int)); if (ind==NULL) { fprintf(stderr,"malloc failure in dothememoryallocation: ind\n"); exit(-1); } val = (double*) malloc( NSUBSETS * sizeof(double)); if (val==NULL) { fprintf(stderr,"malloc failure in dothememoryallocation: val\n"); exit(-1); } obj = (double*) malloc( NSUBSETS * sizeof(double)); if (obj==NULL) { fprintf(stderr,"malloc failure in dothememoryallocation: obj\n"); exit(-1); } vtype = (char*) malloc( NSUBSETS * sizeof(char)); if (vtype==NULL) { fprintf(stderr,"malloc failure in dothememoryallocation: vtype\n"); exit(-1); } } void freethememory_for_Gurobi_stuff() { free( sol); free( ind); free( val); free( obj); free( vtype); } void allocate_global_variables() { int index; int i; int k; mapitems = malloc(sizeof(struct mapitem)*MAPSIZE); if (mapitems==NULL) { printf("In allocate_global_variables. Memory allocation failure. mapitems."); exit(-1); } for (index=0;indext2) { return t1; } else { return t2; } } int minint2(int t1, int t2) { if (t1t2) { return t1; } else { return t2; } } // MS Visual studio does not allow write. So we call this function mywrite that in turn // calls _write. int mywrite( int fh, char* tempstr, int len) { #if COMPILEMSVS return _write( fh, tempstr, len ); #else return write( fh, tempstr, len ); #endif } int myopen( char* fn, int m1, int m2) { #if COMPILEMSVS return _open( fn, m1, m2); #else return open( fn, m1, m2); #endif } int myclose( int fh) { #if COMPILEMSVS return _close( fh); #else return close( fh); #endif } int mychdir( char* p) { #if COMPILEMSVS return _chdir( p); #else return chdir( p); #endif } int mymakedirectory( char* p) { char tempstr[1000]; #if COMPILEMSVS return mkdir( p, 0700); #else return mkdir( p, 0700); #endif } int myrandom() { #if COMPILEMSVS return rand(); #else return rand(); #endif } double mydrand48() { int temp; int temp2; double temp3; double temp4; #if COMPILEMSVS temp = rand(); temp2 = temp % 2000; temp3 = temp2; temp4 = (1+temp3)/(2000.0+1.0); return temp4; #else return drand48(); #endif } void mydeletefile( char* p) { char tempstr[1000]; #if COMPILEMSVS sprintf( tempstr, "del %s", p); system(tempstr); #else sprintf( tempstr, "rm %s", p); system(tempstr); #endif } double getutiloftask(int i) { int s; double Ctot; Ctot = 0.0; for (s=1;s<=tasks[i].nsegments;s++) { Ctot = Ctot + tasks[i].segments[s].C; } return Ctot/tasks[i].T; } double getutilofprocessor(int p) { int i; double usum; usum = 0.0; for (i=1;i<=ntasks;i++) { if (tasks[i].proc!=-1) { if (tasks[i].proc==p) { usum = usum + getutiloftask(i); } } } return usum; } int istasksetguaranteedtobeschedulable() { int i; for (i=1;i<=ntasks;i++) { if (!tasks[i].valid) { return 0; } else { if (tasks[i].RUB>tasks[i].D) { return 0; } } } return 1; } int istasksetguaranteedtobeschedulable_baselinemethod() { int i; for (i=1;i<=ntasks;i++) { if (!tasks[i].valid_baselinemethod) { return 0; } else { if (tasks[i].RUB_baselinemethod>tasks[i].D) { return 0; } } } return 1; } void printtask(FILE* myfile, int i) { int myiteratorvar; int myiteratorvar2; int myiteratorvar3; fprintf(myfile,"tasks[%d].id=%d\n", i, tasks[i].id ); fprintf(myfile,"tasks[%d].priority=%d\n", i, tasks[i].priority ); fprintf(myfile,"tasks[%d].proc=%d\n", i, tasks[i].proc ); fprintf(myfile,"tasks[%d].T=%lf ", i, tasks[i].T ); fprintf(myfile,"tasks[%d].D=%lf\n", i, tasks[i].D ); fprintf(myfile,"task[%d].nsegments=%d\n", i, tasks[i].nsegments ); for (myiteratorvar=1;myiteratorvar<=tasks[i].nsegments;myiteratorvar++) { fprintf(myfile,"tasks[%d].segments[%d].segid=%d\n", i,myiteratorvar,tasks[i].segments[myiteratorvar].segid ); fprintf(myfile,"tasks[%d].segments[%d].C=%lf\n", i,myiteratorvar,tasks[i].segments[myiteratorvar].C ); fprintf(myfile,"tasks[%d].segments[%d].pd=%lf\n", i,myiteratorvar,tasks[i].segments[myiteratorvar].pd ); fprintf(myfile,"tasks[%d].segments[%d].ncorunnersetsspecified=%d\n", i,myiteratorvar,tasks[i].segments[myiteratorvar].ncorunnersetsspecified ); for (myiteratorvar2=1;myiteratorvar2<=tasks[i].segments[myiteratorvar].ncorunnersetsspecified;myiteratorvar2++) { fprintf(myfile,"tasks[%d].segments[%d].corunnersetsspecified[%d].nsegmentsincorunnerset=%d", i,myiteratorvar,myiteratorvar2,tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset ); for (myiteratorvar3=1;myiteratorvar3<=tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset;myiteratorvar3++) { fprintf(myfile,"tasks[%d].segments[%d].corunnersetspecified[%d].segmentsincorunnerset[%d],taskindex=%hd", i,myiteratorvar,myiteratorvar2,myiteratorvar3,tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].taskindex ); fprintf(myfile,"tasks[%d].segments[%d].corunnersetspecified[%d].segmentsincorunnerset[%d].segmentindex=%hd", i,myiteratorvar,myiteratorvar2,myiteratorvar3,tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].segmentindex ); } fprintf(myfile,"tasks[%d].segments[%d].corunnersetspecified[%d].progresscorunnerset=%lf\n", i,myiteratorvar,myiteratorvar2,tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].progresscorunnerset ); } } fprintf( myfile, "\n valid = %d\n", tasks[i].valid); fprintf( myfile, "\n RUB = %lf\n", tasks[i].RUB); fprintf( myfile, "\n"); } void printtasksinternal(FILE* myfile) { int i; fprintf( myfile, "useanalysismethod = %d\n", useanalysismethod); fprintf( myfile, "nprocessors = %d\n", nprocessors); fprintf( myfile, "ntasks = %d\n", ntasks); for (i=1;i<=ntasks;i++) { printtask( myfile, i); } } void printtasks() { printtasksinternal( stdout); } void printtaskstooutputfile() { FILE* myfile; myfile = fopen( outputfilename, "w"); printtasksinternal( myfile); fclose( myfile); } void readtasksetfromfile_task(FILE* myfile, int i) { int myiteratorvar; int myiteratorvar2; int myiteratorvar3; fscanf(myfile,"%d", &(tasks[i].id) ); fscanf(myfile,"%d", &(tasks[i].priority) ); fscanf(myfile,"%d", &(tasks[i].proc) ); fscanf(myfile,"%lf", &(tasks[i].T) ); fscanf(myfile,"%lf", &(tasks[i].D) ); fscanf(myfile,"%d", &(tasks[i].nsegments) ); for (myiteratorvar=1;myiteratorvar<=tasks[i].nsegments;myiteratorvar++) { fscanf(myfile,"%d", &(tasks[i].segments[myiteratorvar].segid) ); fscanf(myfile,"%lf", &(tasks[i].segments[myiteratorvar].C) ); fscanf(myfile,"%lf", &(tasks[i].segments[myiteratorvar].pd) ); fscanf(myfile,"%d", &(tasks[i].segments[myiteratorvar].ncorunnersetsspecified) ); for (myiteratorvar2=1;myiteratorvar2<=tasks[i].segments[myiteratorvar].ncorunnersetsspecified;myiteratorvar2++) { fscanf(myfile,"%d", &(tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset) ); for (myiteratorvar3=1;myiteratorvar3<=tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset;myiteratorvar3++) { fscanf(myfile,"%hd", &(tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].taskindex) ); fscanf(myfile,"%hd", &(tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].segmentindex) ); } fscanf(myfile,"%lf", &(tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].progresscorunnerset) ); } } } void readtasksetfromfile_internal(char* fn) { int temp; FILE* myfile; int i; myfile = fopen( fn, "rt"); if (myfile==NULL) { printf("Error opening file in function readtasksetfromfile. fn=%s\n", fn); scanf("%d", &temp); exit(0); } fscanf(myfile, "%d", &useanalysismethod); fscanf(myfile, "%d", &nprocessors); fscanf(myfile, "%d", &ntasks); printf("ntasks = %d\n", ntasks); for (i=1;i<=ntasks;i++) { readtasksetfromfile_task(myfile,i); } fclose( myfile); } #define COPYINGBUFSIZE 1048576 char buf[COPYINGBUFSIZE]; void mycopyfile(char* fromfn,char* tofn) { int count_read; int count_written; int fdin; int fdout; fdin = open(fromfn,O_RDONLY,S_IRUSR); if (fdin==-1) { printf("Error opening infile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } fdout = open(tofn, O_CREAT | O_WRONLY,S_IRUSR | S_IWUSR); if (fdout==-1) { printf("Error opening outfile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } count_read = read(fdin,buf,COPYINGBUFSIZE); if (count_read==-1) { printf("Error reading infile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } while (count_read==COPYINGBUFSIZE) { count_written=write(fdout,buf,count_read); if (count_written==-1) { printf("Error writing outfile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } count_read = read(fdin,buf,COPYINGBUFSIZE); if (count_read==-1) { printf("Error reading infile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } } if (count_read>0) { count_written=write(fdout,buf,count_read); if (count_written==-1) { printf("Error writing outfile in mycopyfile. fromfn=%s tofn=%s\n", fromfn, tofn); exit(-1); } } close( fdin); close( fdout); } void readtasksetfromfile(char* fn) { char command[500]; char fromfn[500]; char tofngz[500]; char tofn[500]; sprintf(fromfn, "%s.gz",fn); sprintf(tofngz, "/tmp/apa.gz"); sprintf(tofn, "/tmp/apa"); remove(tofngz); remove(tofn); mycopyfile(fromfn,tofngz); sprintf(command, "sync"); system(command); sprintf(command, "gunzip %s", tofngz); system(command); readtasksetfromfile_internal(tofn); } void writetasksettofile_task(FILE* myfile, int i) { int myiteratorvar; int myiteratorvar2; int myiteratorvar3; fprintf(myfile,"%d\n", tasks[i].id ); fprintf(myfile,"%d\n", tasks[i].priority ); fprintf(myfile,"%d\n", tasks[i].proc ); fprintf(myfile,"%lf ", tasks[i].T ); fprintf(myfile,"%lf\n", tasks[i].D ); fprintf(myfile,"%d\n", tasks[i].nsegments ); for (myiteratorvar=1;myiteratorvar<=tasks[i].nsegments;myiteratorvar++) { fprintf(myfile,"%d\n", tasks[i].segments[myiteratorvar].segid ); fprintf(myfile,"%lf\n", tasks[i].segments[myiteratorvar].C ); fprintf(myfile,"%lf\n", tasks[i].segments[myiteratorvar].pd ); fprintf(myfile,"%d\n", tasks[i].segments[myiteratorvar].ncorunnersetsspecified ); for (myiteratorvar2=1;myiteratorvar2<=tasks[i].segments[myiteratorvar].ncorunnersetsspecified;myiteratorvar2++) { fprintf(myfile,"%d ", tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset ); for (myiteratorvar3=1;myiteratorvar3<=tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].nsegmentsincorunnerset;myiteratorvar3++) { fprintf(myfile,"%hd ", tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].taskindex ); fprintf(myfile,"%hd ", tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].segmentsincorunnerset[myiteratorvar3].segmentindex ); } fprintf(myfile,"%lf\n", tasks[i].segments[myiteratorvar].corunnersetsspecified[myiteratorvar2].progresscorunnerset ); } } } void writetasksettofile_internal(char* fn) { int temp; FILE* myfile; int i; char tempstring[20000]; myfile = fopen( fn, "w"); if (myfile==NULL) { printf("Error opening file in function writetasksettofile. fn=%s\n", fn); scanf("%d", &temp); exit(0); } fprintf(myfile, "%d\n", useanalysismethod); fprintf(myfile, "%d\n", nprocessors); fprintf(myfile, "%d\n", ntasks); for (i=1;i<=ntasks;i++) { writetasksettofile_task( myfile, i); } fclose( myfile); } void writetasksettofile(char* fn) { char command[2000]; writetasksettofile_internal(fn); sprintf(command, "sync"); system(command); sprintf(command, "gzip %s", fn); system(command); } void writeutilizations(FILE* myfile) { int i; int p; for (i=1;i<=ntasks;i++) { fprintf(myfile,"task %d has utilization %lf\n", i, getutiloftask(i) ); } for (p=1;p<=nprocessors;p++) { fprintf(myfile,"processor %d has utilization %lf\n", p, getutilofprocessor(p) ); } } void writeresponsetimestofile(char* fn) { FILE* myfile; int i; int temp; myfile = fopen( fn, "w"); if (myfile==NULL) { printf("Error opening file CCC\n"); scanf("%d", &temp); exit(0); } fprintf(myfile,"%d\n", istasksetguaranteedtobeschedulable()); fprintf(myfile,"%lf\n", timerequiredforschedulabilityanalysis_newmethod); fprintf(myfile,"%d\n", istasksetguaranteedtobeschedulable_baselinemethod()); fprintf(myfile,"%lf\n", timerequiredforschedulabilityanalysis_baselinemethod); for (i=1;i<=ntasks;i++) { fprintf(myfile,"task with id=%d priority=%d proc=%d has valid=%d RUB=%lf valid_baselinemethod=%d RUBbaselinemethod=%lf\n", tasks[i].id, tasks[i].priority, tasks[i].proc, tasks[i].valid, tasks[i].RUB, tasks[i].valid_baselinemethod, tasks[i].RUB_baselinemethod); } writeutilizations(myfile); fclose( myfile); } int is_in_hepx( int i, int j) { if ((j!=i) && (tasks[j].priority >= tasks[i].priority) && (tasks[j].proc==tasks[i].proc)) return 1; else return 0; } int is_in_hep( int i, int j) { if ((tasks[j].priority >= tasks[i].priority) && (tasks[j].proc==tasks[i].proc)) return 1; else return 0; } int is_in_top( int i, int j) { if (tasks[j].proc!=tasks[i].proc) return 1; else return 0; } int getnheptasks( int iprime) { int nheptasks; int iprimeprime; nheptasks = 0; for (iprimeprime=1;iprimeprime<=ntasks;iprimeprime++) { if (is_in_hep( iprime, iprimeprime)) { nheptasks = nheptasks + 1; } } return nheptasks; } int isinTS(int i,int iprime) { if (tasks[iprime].proc == tasks[i].proc) { if (is_in_hep( i, iprime)) { return 1; } else { return 0; } } else { return 1; } } void setup_number_of_segments_on_processor() { int pprime; int iprime; int kprime; int localpos; for (pprime=1;pprime<=nprocessors;pprime++) { number_of_segments_on_processor[pprime-1] = 0; for (iprime=1;iprime<=ntasks;iprime++) { if (tasks[iprime].proc==pprime) { number_of_segments_on_processor[pprime-1] = number_of_segments_on_processor[pprime-1] + tasks[iprime].nsegments; } } } for (pprime=1;pprime<=nprocessors;pprime++) { localpos = 1; for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { if (tasks[iprime].proc==pprime) { tasks[ iprime ].segments[ kprime ].pos = localpos; localpos = localpos + 1; } } } } } // i is the task we are analyzing void fill_segmentsets_internal(int i, int p) { int pprime; int allempty; int temp; int iprime; int kprime; if (p>nprocessors) { allempty = 1; for (pprime=1;pprime<=nprocessors;pprime++) { allempty = allempty && (template_localprocessor_taskindices[pprime-1]==-1) && (template_localprocessor_segmentindices[pprime-1]==-1); } if (!allempty) { for (pprime=1;pprime<=nprocessors;pprime++) { segmentsets[nsegmentsets].taskindices[pprime-1] = template_localprocessor_taskindices[pprime-1]; segmentsets[nsegmentsets].segmentindices[pprime-1] = template_localprocessor_segmentindices[pprime-1]; } nsegmentsets = nsegmentsets + 1; if (nsegmentsets>MAXNSEGMENTSETS) { printf("In fill_segmentsets_internal: out of memory.\n"); exit(0); } } } else { if (tasks[i].proc!=p) { template_localprocessor_taskindices[ p-1] = -1; template_localprocessor_segmentindices[ p-1] = -1; fill_segmentsets_internal( i, p+1); } for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { if ( (tasks[iprime].proc ==p) && (isinTS(i,iprime)) ) { if (tasks[iprime].proc == tasks[i].proc) { if (is_in_hep( i, iprime)) { template_localprocessor_taskindices[ p-1] = iprime; template_localprocessor_segmentindices[ p-1] = kprime; fill_segmentsets_internal( i, p+1); } } else { template_localprocessor_taskindices[ p-1] = iprime; template_localprocessor_segmentindices[ p-1] = kprime; fill_segmentsets_internal( i, p+1); } } } } } } // i is the task we are analyzing void fill_segmentsets(int i) { nsegmentsets = 0; fill_segmentsets_internal(i, 1); } void form_template(int iprime, int kprime, int corindex) { int pprime; int corsegindex; int iprimeprime; int kprimeprime; for (pprime=1;pprime<=nprocessors;pprime++) { template_localprocessor_taskindices[ pprime-1] = -1; template_localprocessor_segmentindices[ pprime-1] = -1; } template_localprocessor_taskindices[ tasks[iprime].proc-1] = iprime; template_localprocessor_segmentindices[ tasks[iprime].proc-1] = kprime; for (corsegindex=1;corsegindex<=tasks[iprime].segments[kprime].corunnersetsspecified[corindex].nsegmentsincorunnerset;corsegindex++) { iprimeprime = tasks[iprime].segments[kprime].corunnersetsspecified[corindex].segmentsincorunnerset[corsegindex].taskindex; kprimeprime = tasks[iprime].segments[kprime].corunnersetsspecified[corindex].segmentsincorunnerset[corsegindex].segmentindex; template_localprocessor_taskindices[ tasks[iprimeprime].proc-1] = iprimeprime; template_localprocessor_segmentindices[ tasks[iprimeprime].proc-1] = kprimeprime; } } int compute_hash_based_on_template() { int sum; int pprime; int scalefactor; int localpos; sum = 0; scalefactor = 1; for (pprime=1;pprime<=nprocessors;pprime++) { if ((template_localprocessor_taskindices[ pprime-1]==-1) && (template_localprocessor_segmentindices[ pprime-1]==-1)) { localpos = 0; } else { localpos = tasks[ template_localprocessor_taskindices[ pprime-1] ].segments[ template_localprocessor_segmentindices[ pprime-1] ].pos; } sum = sum + localpos*scalefactor; scalefactor = scalefactor * (1+number_of_segments_on_processor[pprime-1]); } return (sum % MAPSIZE); } int template_localprocessorindices_matches_segmentset(int segmentsetindex) { int match; int pprime; match = 1; for (pprime=1;pprime<=nprocessors;pprime++) { match = match && (template_localprocessor_taskindices[pprime-1] == segmentsets[segmentsetindex].taskindices[pprime-1] ) && (template_localprocessor_segmentindices[pprime-1] == segmentsets[segmentsetindex].segmentindices[pprime-1] ); } return match; } double getpw(int iprime,int kprime, int segmentsetindex) { int pprime; int bucketindex; int match; int iterator; int corindex; match = 1; for (pprime=1;pprime<=nprocessors;pprime++) { if (pprime!=tasks[iprime].proc) { match = match && (segmentsets[segmentsetindex].taskindices[pprime-1]==-1) && (segmentsets[segmentsetindex].segmentindices[pprime-1]==-1); } } if (match) { return 1.0; } for (pprime=1;pprime<=nprocessors;pprime++) { template_localprocessor_taskindices[pprime-1] = segmentsets[segmentsetindex].taskindices[pprime-1]; template_localprocessor_segmentindices[pprime-1] = segmentsets[segmentsetindex].segmentindices[pprime-1]; } bucketindex = compute_hash_based_on_template(); for (iterator=0;iteratorMAXNSEGMENTSETREFS ) { printf("In fill_segmentsets_that_includes_segment: out of memory.\n"); exit(0); } } } } void setup_map(int i) { int iprime; int kprime; int corindex; int bucketindex; for (bucketindex=0;bucketindex=BUCKETSIZE) { printf("In setup_map: out of memory.\n"); exit(0); } mapitems[bucketindex].nelements_in_bucket = mapitems[bucketindex].nelements_in_bucket + 1; } } } } } double getworstpwforthissegment(int j, int k) { int index_lowest_so_far; double value_lowest_so_far; int segmentsetindex; double v; index_lowest_so_far = -1; for (segmentsetindex=0;segmentsetindex0); } if ((getnheptasks(iprime)==1) && (allnonzero)) { LST = 0; for (kprimeprime=1;kprimeprime<=kprime-1;kprimeprime++) { LST = LST + tasks[iprime].segments[kprimeprime].C / getworstpwforthissegment(iprime, kprimeprime); } consideredinterval = t+mindouble2(tasks[iprime].D,LST); nwholejobs = ceil( consideredinterval / tasks[iprime].T); sum = nwholejobs*tasks[iprime].segments[kprime].C; return sum; } else { consideredinterval = t + tasks[iprime].D; nwholejobs = ceil( consideredinterval / tasks[iprime].T); sum = nwholejobs*tasks[iprime].segments[kprime].C; return sum; } } void fillobj_for_objective_function() { int temp; nelements_of_obj = nsegmentsets; if (nelements_of_obj>NSUBSETS) { printf("In fillobj_for_objective_function. Not enough memory\n"); exit(-1); } for (temp=0;tempNSUBSETS) { printf("In fillindandval_for_constraintsexec. Not enough memory\n"); exit(-1); } } } double gurobi_run_time; // this optimization problem is always feasible so we do not need to return feasible/infeasible. // but we return it anyway, for historical reasons. It should return feas=true // this function is req_lp in the paper "Schedulability Analysis of Tasks with Co-Runner Dependent Execution Times" void req_basic( int i, double t, int* feas, double* value) { int iprime; int kprime; double rhs; char miplogfn[20000]; char mipfn[20000]; char mipfnlp[20000]; counterforILPsolver = counterforILPsolver + 1; sprintf(miplogfn,"mip%d.log", counterforILPsolver); sprintf(mipfn,"mip%d", counterforILPsolver); sprintf(mipfnlp,"mip%d.lp", counterforILPsolver); // dothememoryallocation(); // if (enablelogging) { error = GRBloadenv( &env, miplogfn); // } else { // error = GRBloadenv( &env, NULL); // } if (error || env == NULL) { fprintf(stderr, "Error: could not create environment. errorcode = %d.\n", error); exit(0); } // error = GRBsetintparam(env, GRB_INT_PAR_METHOD, 1); // method = 1 means dual simplex // if (error) goto QUIT; error = GRBnewmodel( env, &model, mipfn, 0, NULL, NULL, NULL, NULL, NULL ); if (error) goto QUIT; fillobj_for_objective_function(i); error = GRBaddvars( model, nelements_of_obj, 0, NULL, NULL, NULL, obj, NULL, NULL, vtype, NULL); if (error) goto QUIT; error = GRBsetintattr( model, GRB_INT_ATTR_MODELSENSE, GRB_MAXIMIZE); if (error) goto QUIT; error = GRBupdatemodel( model); if (error) goto QUIT; for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { if (is_in_hep(i,iprime)) { rhs = ceil( t/tasks[iprime].T) * tasks[iprime].segments[kprime].C; fillindandval_for_constraintsexec(iprime, kprime); error = GRBaddconstr( model, nelements_of_constr, ind, val, GRB_LESS_EQUAL, rhs, NULL ); if (error) goto QUIT; } } } for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { if (is_in_top(i,iprime)) { rhs = xUB(iprime, kprime, t); fillindandval_for_constraintsexec(iprime, kprime); error = GRBaddconstr( model, nelements_of_constr, ind, val, GRB_LESS_EQUAL, rhs, NULL ); if (error) goto QUIT; } } } error = GRBupdatemodel( model); if (error) goto QUIT; error = GRBoptimize(model); if (error) goto QUIT; error = GRBupdatemodel( model); if (error) goto QUIT; if (enablelogging) { error = GRBwrite( model, mipfnlp); if (error) goto QUIT; } error = GRBgetintattr( model, GRB_INT_ATTR_STATUS, &optimstatus); if (error) goto QUIT; error = GRBgetdblattr( model, GRB_DBL_ATTR_OBJVAL, &objval); if (error) goto QUIT; error = GRBgetdblattrarray( model, GRB_DBL_ATTR_X, 0, nelements_of_obj, sol); if (error) goto QUIT; error = GRBgetdblattr( model, GRB_DBL_ATTR_RUNTIME, &gurobi_run_time); if (error) goto QUIT; *feas = 0; printf("\nOptimization complete\n"); if (optimstatus == GRB_OPTIMAL) { printf("Optimal objective: %.4e\n", objval); *feas = 1; *value = objval; } else { printf("Did not get an objective.\n"); } QUIT: if (error) { printf("ERROR: %s\n", GRBgeterrormsg(env) ); exit(1); } GRBfreemodel( model); GRBfreeenv( env); // freethememory(); } double compute_lb_of_worstcase_response_time(int i) { double temp; int k; temp = 0.0; for (k=1;k<=tasks[i].nsegments;k++) { temp = temp + tasks[i].segments[k].C; } return temp; } #if COMPILEMSVS unsigned __int128 diff_us_timeval(struct timeval *x, struct timeval *y) { unsigned __int128 amillion = 1000000; unsigned __int128 u = ((x->tv_sec * amillion) + x->tv_usec); unsigned __int128 v = ((y->tv_sec * amillion) + y->tv_usec); return u-v; } double diff_s_timeval(struct timeval *x, struct timeval *y) { unsigned __int128 amillion = 1000000; unsigned __int128 t; unsigned __int128 tsec; unsigned __int128 tusec; double t_as_double_secondpart; double t_as_double_usecondpart; double t_as_double; t = diff_us_timeval( x, y); tsec = t / amillion; tusec = t % amillion; t_as_double_secondpart = tsec; t_as_double_usecondpart = tusec; t_as_double_usecondpart = t_as_double_usecondpart / amillion; t_as_double = t_as_double_secondpart + t_as_double_usecondpart; return t_as_double; } #else unsigned long long diff_ns_timespec(struct timespec *x, struct timespec *y) { unsigned long long abillion = 1000000000; unsigned long long u; u = x->tv_sec; u = u * abillion; u = u + x->tv_nsec; unsigned long long v; v = y->tv_sec; v = v * abillion; v = v + y->tv_nsec; return u-v; } double diff_s_timespec (struct timespec *x, struct timespec *y) { unsigned long long abillion = 1000000000; unsigned long long t; unsigned long long tsec; unsigned long long tnsec; double t_as_double_secondpart; double t_as_double_usecondpart; double t_as_double; t = diff_ns_timespec( x, y); tsec = t / abillion; tnsec = t % abillion; t_as_double_secondpart = tsec; t_as_double_usecondpart = tnsec; t_as_double_usecondpart = t_as_double_usecondpart / abillion; t_as_double = t_as_double_secondpart + t_as_double_usecondpart; return t_as_double; } #endif #if COMPILEMSVS #define CLOCK_REALTIME 0 LARGE_INTEGER getFILETIMEoffset() { SYSTEMTIME s; FILETIME f; LARGE_INTEGER t; s.wYear = 1970; s.wMonth = 1; s.wDay = 1; s.wHour = 0; s.wMinute = 0; s.wSecond = 0; s.wMilliseconds = 0; SystemTimeToFileTime(&s, &f); t.QuadPart = f.dwHighDateTime; t.QuadPart <<= 32; t.QuadPart |= f.dwLowDateTime; return (t); } int my_clock_gettime(int X, struct timeval *tv) { LARGE_INTEGER t; FILETIME f; double microseconds; static LARGE_INTEGER offset; static double frequencyToMicroseconds; static int initialized = 0; static BOOL usePerformanceCounter = 0; if (!initialized) { LARGE_INTEGER performanceFrequency; initialized = 1; usePerformanceCounter = QueryPerformanceFrequency(&performanceFrequency); if (usePerformanceCounter) { QueryPerformanceCounter(&offset); frequencyToMicroseconds = (double)performanceFrequency.QuadPart / 1000000.; } else { offset = getFILETIMEoffset(); frequencyToMicroseconds = 10.; } } if (usePerformanceCounter) { QueryPerformanceCounter(&t); } else { GetSystemTimeAsFileTime(&f); t.QuadPart = f.dwHighDateTime; t.QuadPart <<= 32; t.QuadPart |= f.dwLowDateTime; } t.QuadPart -= offset.QuadPart; microseconds = (double)t.QuadPart / frequencyToMicroseconds; t.QuadPart = microseconds; tv->tv_sec = t.QuadPart / 1000000; tv->tv_usec = t.QuadPart % 1000000; return (0); } #endif void printmapitems(int fromindex, int toindex, int np) { int temp; int temp2; for (temp=fromindex;temp<=toindex;temp++) { printf("mapitems[%d].nelements_in_bucket = %d\n", temp, mapitems[temp].nelements_in_bucket ); for (temp2=0;temp2tasks[i].D); } if ((convg) && (!dmiss)) { tasks[i].valid = 1; tasks[i].RUB = newt; } } allvalid = 1; for (i=1;i<=ntasks;i++) { allvalid = allvalid && (tasks[i].valid); } #if COMPILEMSVS status_clock_gettime_end = my_clock_gettime(CLOCK_REALTIME, &myend); #else status_clock_gettime_end = clock_gettime(CLOCK_MONOTONIC_RAW, &myend); #endif #if COMPILEMSVS timerequiredforschedulabilityanalysis_newmethod = diff_s_timeval(&myend, &mybegin); #else timerequiredforschedulabilityanalysis_newmethod = diff_s_timespec(&myend, &mybegin); #endif foutclockinfo = fopen( "clock_info_reqlpmethod.txt", "w"); fprintf( foutclockinfo, "%lf %ld %ld %ld %ld %d %d %lf", timerequiredforschedulabilityanalysis_newmethod, mybegin.tv_sec, mybegin.tv_nsec, myend.tv_sec, myend.tv_nsec, status_clock_gettime_begin, status_clock_gettime_end, gurobi_total_run_time); fclose(foutclockinfo); if (allvalid) { printf("Schedulable\n"); return 1; } else { printf("Don't know\n"); return 0; } } double getbaselineexecutiontimeoftask( int j) { int k; double sum; double term; double worstpwforthissegment; int allnonzero; allnonzero = 1; for (k=1;k<=tasks[j].nsegments;k++) { allnonzero = allnonzero && (getworstpwforthissegment(j, k)>0); } if (!allnonzero) { printf("Error in getbaselineexecutiontimeoftask. There is a segment that can have zero progress. This is not necessarily a bad system. But it is a system for which this operation cannot be performed.\n"); exit(-1); } sum = 0.0; for (k=1;k<=tasks[j].nsegments;k++) { worstpwforthissegment = getworstpwforthissegment(j, k); term = tasks[j].segments[k].C / worstpwforthissegment; sum = sum + term; } return sum; } void req_basic_baselinemethod( int i, double t, int* feas, double* value ) { int j; double sum; double term; sum = getbaselineexecutiontimeoftask( i); for (j=1;j<=ntasks;j++) { if (is_in_hepx( i, j)) { term = ceil(t/tasks[j].T) * getbaselineexecutiontimeoftask( j); sum = sum + term; } } *feas = 1; *value = sum; } int determineschedulabilityandcomputeresponsetimes_with_baselinemethod() { int i; double t; double newt; int feas; int convg; int dmiss; int allvalid; FILE* foutclockinfo; int iprime; int kprime; #if COMPILEMSVS struct timeval mybegin; struct timeval myend; #else struct timespec mybegin; struct timespec myend; #endif int status_clock_gettime_begin; int status_clock_gettime_end; #if COMPILEMSVS status_clock_gettime_begin = my_clock_gettime(CLOCK_REALTIME, &mybegin); #else status_clock_gettime_begin = clock_gettime(CLOCK_MONOTONIC_RAW, &mybegin); #endif dmiss = 0; // added in June 2016. for (i=1;i<=ntasks;i++) { tasks[i].valid_baselinemethod = 0; } for (i=1;((i<=ntasks) && (!dmiss));i++) { // modified in June 2016. do_setup_data_structures( i); convg = 0; dmiss = 0; newt = getbaselineexecutiontimeoftask( i); // compute_lb_of_worstcase_response_time(i); while ((!convg) && (!dmiss)) { t = newt; req_basic_baselinemethod( i, t, &feas, &newt); convg = (newt<=t); dmiss = (newt>tasks[i].D); } if ((convg) && (!dmiss)) { tasks[i].valid_baselinemethod = 1; tasks[i].RUB_baselinemethod = newt; } } allvalid = 1; for (i=1;i<=ntasks;i++) { allvalid = allvalid && (tasks[i].valid_baselinemethod); } #if COMPILEMSVS status_clock_gettime_end = clock_gettime(CLOCK_REALTIME, &myend); #else status_clock_gettime_end = clock_gettime(CLOCK_MONOTONIC_RAW, &myend); #endif #if COMPILEMSVS timerequiredforschedulabilityanalysis_baselinemethod = diff_s_timeval(&myend, &mybegin); #else timerequiredforschedulabilityanalysis_baselinemethod = diff_s_timespec(&myend, &mybegin); #endif foutclockinfo = fopen( "clock_info_baselinemethod.txt", "w"); fprintf( foutclockinfo, "%lf %ld %ld %ld %ld %d %d", timerequiredforschedulabilityanalysis_newmethod, mybegin.tv_sec, mybegin.tv_nsec, myend.tv_sec, myend.tv_nsec, status_clock_gettime_begin, status_clock_gettime_end); fclose(foutclockinfo); if (allvalid) { printf("Schedulable. baselinemethod\n"); return 1; } else { printf("Don't know. baselinemethod\n"); return 0; } } int determineschedulabilityandcomputeresponsetimes() { int flag; system("sync"); // we do this to ensure that data is written back to disk. We do not want such writeback to happen when we run our schedulability analysis because this would interfer // with timing measurements // sleep(1); // we do this in order to let the processor cool down if (useanalysismethod==1) { flag = determineschedulabilityandcomputeresponsetimes_with_reqlpmethod(); } // sleep(1); // we do this in order to let the processor cool down determineschedulabilityandcomputeresponsetimes_with_baselinemethod(); return flag; } void parsearguments( int argc, char *argv[], char *envp[]) { int i; i = 1; while (i<=argc-1) { if (strcmp( argv[i], "-i")==0) { strcpy( inputfilename, argv[i+1] ); i = i + 2; } else { if (strcmp( argv[i], "-o")==0) { strcpy( outputfilename, argv[i+1] ); i = i + 2; } else { if (strcmp( argv[i], "-g")==0) { // do in batchmode: generate tasksets generate_experiments = 1; i = i + 1; } else { if (strcmp( argv[i], "-r")==0) { // do in batchmode: check schedulability of the generated tasksets run_experiments = 1; i = i + 1; } else { if (strcmp( argv[i], "-s")==0) { // do in batchmode: gather statistics generate_statistics = 1; i = i + 1; } else { if (strcmp( argv[i], "-m")==0) { // do many experiments in batchmode metathing = 1; i = i + 1; } else { if (strcmp( argv[i], "-p")==0) { // produce tasksets from parts producetasksetsfromparts = 1; strcpy( producetasksetsfrompartsfilename, argv[i+1] ); i = i + 2; } else { i++; } } } } } } } } } int my_int_random(int lb, int ub) { int t; int t2; int t3; t = myrandom(); t2 = t % (ub-lb+1); t3 = lb + t2; return t3; } double my_double_random(double lo, double hi) { double t; double t2; double t3; t = mydrand48(); t2 = t*(hi-lo); t3 = lo + t2; return t3; } int getntasksbasedonvector(int* ntasks_on_proc) { int p; int actualntasks; actualntasks = 0; for (p=1;p<=nprocessors;p++) { actualntasks = actualntasks + ntasks_on_proc[p]; } return actualntasks; } // void generate_tasks_randomly(double targetutil) void generate_tasks_randomly() { int i; int k; double targetutilfortask; double targetutilforsegment; double loboundC; double hiboundC; targetutilfortask = (1.0*nprocessors)/ntasks; for (i=1;i<=ntasks;i++) { tasks[i].id = i; tasks[i].priority = ntasks - i; tasks[i].proc = -1; tasks[i].T = 1.0; tasks[i].D = 1.0; tasks[i].nsegments = nsg; targetutilforsegment = targetutilfortask / tasks[i].nsegments; loboundC = 0.001; hiboundC = 2*targetutilforsegment - loboundC; for (k=1;k<=tasks[i].nsegments;k++) { // tasks[i].segments[k].segid = 1; tasks[i].segments[k].segid = k; // On July 17, 2020, I found that the line above (assigning segid = 1) is incorrect; it should be segid=k. // So, I corrected it. // Fortunately, this error does not impact any computed result in terms of determining scheedulability. // Its impact is that when the generated tasksets are written to file, there all segments of a single // tasks will be written as if they have the same index. But once again, segid is not used in schedulability // analysis to the schedulability analysis is correct anyway. tasks[i].segments[k].C = my_double_random(loboundC,hiboundC); tasks[i].segments[k].pd = 1.0; tasks[i].segments[k].ncorunnersetsspecified = 0; } } } double mypow( double t1, double t2) { double temp; double temp2; temp = t2*log(t1); temp2 = exp( temp); return temp2; } void changeToftasks() { int i; int s; double t0; double t1; for (i=1;i<=ntasks;i++) { t0 = my_double_random(0.0,4.0); t1 = mypow( 10.0, 2+t0); tasks[i].D = t1 * tasks[i].D; tasks[i].T = t1 * tasks[i].T; for (s=1;s<=tasks[i].nsegments;s++) { tasks[i].segments[s].C = tasks[i].segments[s].C * t1; } } } void setTDsothatDTmaybedifferent() { int i; double t1; for (i=1;i<=ntasks;i++) { t1 = 1.0; tasks[i].D = t1 * tasks[i].T; } } double getsumC(int i) { double sumC; int s; sumC = 0.0; for (s=1;s<=tasks[i].nsegments;s++) { sumC = sumC + tasks[i].segments[s].C; } return sumC; } void assignTDCoftasks() { generate_tasks_randomly(); changeToftasks(); setTDsothatDTmaybedifferent(); } int gettaskwithhighestutilamongtasksthatarenotassigned() { int i; int taskwithhighestutil; taskwithhighestutil = -1; for (i=1;i<=ntasks;i++) { if (tasks[i].proc==-1) { if (taskwithhighestutil!=-1) { if (getutiloftask(taskwithhighestutil)t) && (newt<=tasks[i].D)) { t = newt; newt = 0.0; for (j=1;j<=ntasks;j++) { if (is_in_hep( i, j)) { newt = newt + ceil(t/tasks[j].T) * getsumC(j); } } } if (newt>tasks[i].D) { return 0; } } return 1; } void scale_taskset_by_factor( double factor) { int i; int s; for (i=1;i<=ntasks;i++) { for (s=1;s<=tasks[i].nsegments;s++) { tasks[i].segments[s].C = tasks[i].segments[s].C * factor; } } } void scale_taskset_so_that_it_is_critically_schedulable_without_co_runner_interference() { int flag; double factor; double tolerance; factor = 2.0; flag = check_sched_without_co_runners(); while (flag) { scale_taskset_by_factor( factor); flag = check_sched_without_co_runners(); } while (!flag) { scale_taskset_by_factor( 1.0/factor); flag = check_sched_without_co_runners(); } // now we have a taskset that is schedulable but multiplying execution times by factor makes it unschedulable. tolerance = 1.01; while (factor>tolerance) { scale_taskset_by_factor( sqrt(factor)); flag = check_sched_without_co_runners(); if (!flag) { scale_taskset_by_factor( 1.0/sqrt(factor)); } factor = sqrt(factor); } } int is_segmentset_equal_to(int iprime, int kprime, int segmentsetindex) { return ( (segmentsets[segmentsetindex].taskindices[tasks[iprime].proc-1] == iprime) && (segmentsets[segmentsetindex].segmentindices[tasks[iprime].proc-1] == kprime) ); } int is_segmentset_empty(int pprime, int segmentsetindex) { return ( (segmentsets[segmentsetindex].taskindices[pprime-1] == -1) && (segmentsets[segmentsetindex].segmentindices[pprime-1] == -1) ); } void assigninitialcorunnerdependenciesforgivensegmentforagivencurrentset(int iprime, int kprime, int corindex, double r, int segmentsetindex) { double t1; int counter; int iprimeprime; int kprimeprime; t1 = my_double_random( r, 1.00); tasks[iprime].segments[kprime].corunnersetsspecified[ corindex ].progresscorunnerset = t1; tasks[iprime].segments[kprime].corunnersetsspecified[ corindex ].nsegmentsincorunnerset = 0; // this lines is actually not needed counter = 0; for (iprimeprime=1;iprimeprime<=ntasks;iprimeprime++) { for (kprimeprime=1;kprimeprime<=tasks[iprimeprime].nsegments;kprimeprime++) { if ( (tasks[iprimeprime].proc != tasks[iprime].proc) && (is_segmentset_equal_to(iprimeprime,kprimeprime,segmentsetindex))) { counter = counter + 1; tasks[iprime].segments[kprime].corunnersetsspecified[ corindex ].segmentsincorunnerset[counter].taskindex = iprimeprime; tasks[iprime].segments[kprime].corunnersetsspecified[ corindex ].segmentsincorunnerset[counter].segmentindex = kprimeprime; } } } tasks[iprime].segments[kprime].corunnersetsspecified[ corindex ].nsegmentsincorunnerset = counter; } int segmentset_includes_this_segment_and_at_least_one_more(int iprime, int kprime,int segmentsetindex) { int pprime; int found_self; int found_empty; int found_other; found_self = 0; found_other = 0; for (pprime=1;pprime<=nprocessors;pprime++) { if (pprime==tasks[iprime].proc) { if (is_segmentset_equal_to(iprime,kprime,segmentsetindex)) { found_self = 1; } } else { if (!is_segmentset_empty(pprime, segmentsetindex)) { found_other = 1; } } } return (found_self && found_other); } void assigninitialcorunnerdependenciesforgivensegment(int iprime, int kprime, double r) { int segmentsetindex; tasks[iprime].segments[kprime].ncorunnersetsspecified = 0; for (segmentsetindex=0;segmentsetindex MAXNCORUNNERSETSSPECIFIED) { printf("Error in assigninitialcorunnerdependenciesforgivensegment. Not enough memory.\n"); fflush(stdout); exit(-1); } assigninitialcorunnerdependenciesforgivensegmentforagivencurrentset( iprime, kprime, tasks[iprime].segments[kprime].ncorunnersetsspecified, r, segmentsetindex); } } } void assigninitialcorunnerdependencies(double r) { int iprime; int kprime; for (iprime=1;iprime<=ntasks;iprime++) { do_setup_data_structures(iprime); for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { assigninitialcorunnerdependenciesforgivensegment( iprime, kprime, r); } } } // dominates menas that there is something that cri1 has but cri2 does not int dominatessegmentsubset(int i, int s, int cri1, int cri2) { int t1; int t2; int found; for (t1=1;t1<=tasks[i].segments[s].corunnersetsspecified[ cri1 ].nsegmentsincorunnerset;t1++) { found = 0; for (t2=1;t2<=tasks[i].segments[s].corunnersetsspecified[ cri2 ].nsegmentsincorunnerset;t2++) { if ( (tasks[i].segments[s].corunnersetsspecified[ cri1 ].segmentsincorunnerset[t1].taskindex == tasks[i].segments[s].corunnersetsspecified[ cri2 ].segmentsincorunnerset[t2].taskindex) && (tasks[i].segments[s].corunnersetsspecified[ cri1 ].segmentsincorunnerset[t1].segmentindex == tasks[i].segments[s].corunnersetsspecified[ cri2 ].segmentsincorunnerset[t2].segmentindex) ) { found = 1; } } if (!found) { return 0; } } return 1; } void detectinversion(int i, int s, int* pcri1, int* pcri2) { int found; int cri1; int cri2; *pcri1 = -1; *pcri2 = -1; found = 0; for (cri1=1;cri1<=tasks[i].segments[s].ncorunnersetsspecified;cri1++) { for (cri2=cri1+1;cri2<=tasks[i].segments[s].ncorunnersetsspecified;cri2++) { if (!found) { if (dominatessegmentsubset(i,s, cri1, cri2)) { if (tasks[i].segments[s].corunnersetsspecified[ cri1 ].progresscorunnerset < tasks[i].segments[s].corunnersetsspecified[ cri2 ].progresscorunnerset ) { *pcri1 = cri1; *pcri2 = cri2; found = 1; } } } } } } void recomputepdforaspecificsegment(int i, int s, double r) { tasks[i].segments[s].pd = r; } void recomputepd(double r) { int i; int s; for (i=1;i<=ntasks;i++) { for (s=1;s<=tasks[i].nsegments;s++) { recomputepdforaspecificsegment(i,s,r); } } } void swap_corunnersetsspecified(int i,int s,int index_i,int index_j) { int temp; int l; struct segmentincorunnerset q; l = maxint2( tasks[i].segments[s].corunnersetsspecified[index_i].nsegmentsincorunnerset, tasks[i].segments[s].corunnersetsspecified[index_j].nsegmentsincorunnerset); temp = tasks[i].segments[s].corunnersetsspecified[index_i].nsegmentsincorunnerset; tasks[i].segments[s].corunnersetsspecified[index_i].nsegmentsincorunnerset = tasks[i].segments[s].corunnersetsspecified[index_j].nsegmentsincorunnerset; tasks[i].segments[s].corunnersetsspecified[index_j].nsegmentsincorunnerset = temp; for (temp=1;temp<=l;temp++) { q = tasks[i].segments[s].corunnersetsspecified[index_i].segmentsincorunnerset[temp]; tasks[i].segments[s].corunnersetsspecified[index_i].segmentsincorunnerset[temp] = tasks[i].segments[s].corunnersetsspecified[index_j].segmentsincorunnerset[temp]; tasks[i].segments[s].corunnersetsspecified[index_j].segmentsincorunnerset[temp] = q; } } void rearrangecorunnerdependencies_size(int i,int s,int lo,int hi) { int index_i; int index_j; int go_on; int p; int pivot; struct corunnersetspecifiedstruct* cortemp; if (lo pivot); do { index_i = index_i + 1; } while ((tasks[i].segments[s].corunnersetsspecified[index_i].nsegmentsincorunnerset) < pivot); if (index_i < index_j) { swap_corunnersetsspecified(i,s,index_i,index_j); } else { go_on = 0; p = index_j; } } rearrangecorunnerdependencies_size(i,s,lo,p); rearrangecorunnerdependencies_size(i,s,p+1,hi); } } void rearrangecorunnerdependencies_progresscorunnerset(int i,int s,int lo,int hi) { int index_i; int index_j; int go_on; int p; double pivot; double progresstemp; if (lo pivot); if (index_i < index_j) { progresstemp = tasks[i].segments[s].corunnersetsspecified[index_i].progresscorunnerset; tasks[i].segments[s].corunnersetsspecified[index_i].progresscorunnerset = tasks[i].segments[s].corunnersetsspecified[index_j].progresscorunnerset; tasks[i].segments[s].corunnersetsspecified[index_j].progresscorunnerset = progresstemp; } else { go_on = 0; p = index_j; } } rearrangecorunnerdependencies_progresscorunnerset(i,s,lo,p); rearrangecorunnerdependencies_progresscorunnerset(i,s,p+1,hi); } } // we do this so as to avoid conflicts; this makes the code run faster -- see below. void rearrangecorunnerdependencies() { int i; int s; for (i=1;i<=ntasks;i++) { for (s=1;s<=tasks[i].nsegments;s++) { rearrangecorunnerdependencies_size( i,s,1,tasks[i].segments[s].ncorunnersetsspecified); } } for (i=1;i<=ntasks;i++) { for (s=1;s<=tasks[i].nsegments;s++) { rearrangecorunnerdependencies_progresscorunnerset( i,s,1,tasks[i].segments[s].ncorunnersetsspecified); } } } void sorttasksinDMorderandassignpriorities() { int i; int j; int ntasks_with_shorter_deadline; for (i=1;i<=ntasks;i++) { ntasks_with_shorter_deadline = 0; for (j=1;j<=ntasks;j++) { if (j!=i) { if (tasks[j].D < tasks[i].D) { ntasks_with_shorter_deadline = ntasks_with_shorter_deadline + 1; } } } tasks[i].priority = ntasks - ntasks_with_shorter_deadline; } } void getindices_from_metaindex(int loc_metaindex, int* p_index1, int* p_index2) { *p_index1 = (((loc_metaindex-1) / 20)+1); *p_index2 = (((loc_metaindex-1) % 20)+1); } void get_mul_and_r_from_metaindex(int loc_metaindex, double* p_mult, double* p_r) { int index1; int index2; getindices_from_metaindex( loc_metaindex, &index1, &index2); *p_mult = index1 * 0.05; *p_r = index2 * 0.05; if ((loc_metaindex>=401) || (loc_metaindex<=0)) { printf("Error in do_generate_experiments\n"); exit(0); } } int should_this_metaindex_be_included(int metaindex) { int index1; int index2; getindices_from_metaindex( metaindex, &index1, &index2); return (index1>index2); } void write_mult_and_PROGMIN_to_file_based_on_metaindex(char* fn, int loc_metaindex) { double loc_mult; double loc_PROGMIN; FILE* fout; get_mul_and_r_from_metaindex(loc_metaindex, &loc_mult, &loc_PROGMIN); fout = fopen(fn, "w"); fprintf(fout,"mult = %lf PROGMIN=%lf\n", loc_mult, loc_PROGMIN); fclose( fout); } void do_generate_experiments(int metaindex) { char tempstr[20000]; int iterator; double mult; char tempstring2[20000]; int error1; int error2; double r; for (iterator=1;iterator<=MAX_ITERATOR_IN_BATCH;iterator++) { mymakedirectory("experiments"); mychdir("experiments"); mymakedirectory("experiment1"); mychdir("experiment1"); sprintf(tempstr,"taskset%d",iterator); error1 = mymakedirectory(tempstr); sprintf(tempstr,"taskset%d",iterator); error2 = mychdir(tempstr); useanalysismethod = 1; if (metathing==0) { nprocessors = 4; } get_mul_and_r_from_metaindex( metaindex, &mult, &r); if (metathing==0) { ntasks = 10; } assignTDCoftasks(); sorttasksinDMorderandassignpriorities(); assigntaskstoprocessors(); scale_taskset_so_that_it_is_critically_schedulable_without_co_runner_interference(); scale_taskset_by_factor( mult); assigninitialcorunnerdependencies( r); rearrangecorunnerdependencies(); recomputepd(r); writetasksettofile("taskset.txt"); mychdir(".."); mychdir(".."); mychdir(".."); } } void do_run_experiments() { int iterator; char tempstr[20000]; for (iterator=1;iterator<=MAX_ITERATOR_IN_BATCH;iterator++) { mychdir("experiments"); mychdir("experiment1"); sprintf(tempstr,"taskset%d",iterator); mychdir(tempstr); readtasksetfromfile("taskset.txt"); determineschedulabilityandcomputeresponsetimes(); writeresponsetimestofile(outputfilename); mychdir(".."); mychdir(".."); mychdir(".."); } } void readsuccessandtime(char* infn, int* psuccess_newmethod, double* ptimetaken_newmethod, int* psuccess_baselinemethod, double* ptimetaken_baselinemethod) { FILE* myfile; myfile = fopen( infn, "r"); fscanf( myfile, "%d %lf %d %lf", psuccess_newmethod, ptimetaken_newmethod, psuccess_baselinemethod, ptimetaken_baselinemethod); fclose( myfile); } void do_generate_statistics() { char infn[20000]; char outfn[20000]; int success_newmethod; double timetaken_newmethod; int success_baselinemethod; double timetaken_baselinemethod; int iterator; FILE* myfile; #if COMPILEMSVS sprintf(outfn,"experiments\\experiment1\\statistics.txt"); #else sprintf(outfn,"experiments/experiment1/statistics.txt"); #endif myfile = fopen( outfn, "w"); for (iterator=1;iterator<=MAX_ITERATOR_IN_BATCH;iterator++) { #if COMPILEMSVS printf(infn,"experiments\\experiment1\\taskset%d\\taskset_results_from_analysis.txt",iterator); #else sprintf(infn,"experiments/experiment1/taskset%d/taskset_results_from_analysis.txt",iterator); #endif readsuccessandtime(infn, &success_newmethod, &timetaken_newmethod, &success_baselinemethod, &timetaken_baselinemethod); fprintf( myfile, "%d %lf %d %lf\n", success_newmethod, timetaken_newmethod, success_baselinemethod, timetaken_baselinemethod); } fclose( myfile); } void initialize_random_generator() { #if COMPILEMSVS srand(137); #else srand(137); #endif } // this function is not used. But we keep it in case we need it in the future. void OLD_addtooverallstatistics( FILE* myfile, char* fninput) { FILE* fin; char readstring[20000]; int nlinesread; double successnewmethod; double mintnewmethod; double maxtnewmethod; double successbaselinemethod; double mintbaselinemethod; double maxtbaselinemethod; int t1; double t2; int t3; double t4; nlinesread = 0; successnewmethod = 0; mintnewmethod = -1; maxtnewmethod = -1; successbaselinemethod = 0; mintbaselinemethod = -1; maxtbaselinemethod = -1; fin = fopen( fninput, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %lf %d %lf", &t1, &t2, &t3, &t4); nlinesread = nlinesread+ 1; successnewmethod = successnewmethod + t1; if (mintnewmethod==-1) { mintnewmethod = t2; } else { if (t2maxtnewmethod) { maxtnewmethod = t2; } } successbaselinemethod = successbaselinemethod + t3; if (mintbaselinemethod==-1) { mintbaselinemethod = t4; } else { if (t4maxtbaselinemethod) { maxtbaselinemethod = t4; } } } fclose( fin); successnewmethod = successnewmethod / nlinesread; successbaselinemethod = successbaselinemethod / nlinesread; fprintf( myfile, "%d %d %d %lf %lf %lf %lf %lf %lf\n", nprocessors, ntasks, nsg, successnewmethod, mintnewmethod, maxtnewmethod, successbaselinemethod, mintbaselinemethod, maxtbaselinemethod ); } void addtooverallstatistics( FILE* myfile, char* fninput) { FILE* fin; char readstring[20000]; int nlinesread; double successnewmethod; double mintnewmethod; double maxtnewmethod; double successbaselinemethod; double mintbaselinemethod; double maxtbaselinemethod; int t1; double t2; int t3; double t4; nlinesread = 0; successnewmethod = 0; mintnewmethod = -1; maxtnewmethod = -1; successbaselinemethod = 0; mintbaselinemethod = -1; maxtbaselinemethod = -1; fin = fopen( fninput, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %lf %d %lf", &t1, &t2, &t3, &t4); nlinesread = nlinesread+ 1; successnewmethod = successnewmethod + t1; if (mintnewmethod==-1) { mintnewmethod = t2; } else { if (t2maxtnewmethod) { maxtnewmethod = t2; } } successbaselinemethod = successbaselinemethod + t3; if (mintbaselinemethod==-1) { mintbaselinemethod = t4; } else { if (t4maxtbaselinemethod) { maxtbaselinemethod = t4; } } } fclose( fin); successnewmethod = successnewmethod / nlinesread; successbaselinemethod = successbaselinemethod / nlinesread; fprintf( myfile, "%d %d %d %lf %lf %lf %lf\n", nprocessors, ntasks, nsg, successnewmethod, maxtnewmethod, successbaselinemethod, maxtbaselinemethod ); } void createdirectorynameforthismetacase(char* metadirectoryforthiscase, int nprocessors, int ntasks, int nsg) { sprintf( metadirectoryforthiscase, "m%dn%ds%d", nprocessors, ntasks, nsg); } double roundup_to_ms(double t) { return ((ceil(1000.0*t))/1000.0); } // This function is currently not used. But we my want to use it in the future. void old_generate_latex_from_overallstatistics(char* sourcefn, char* destfn) { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fin = fopen( sourcefn, "r"); fout = fopen( destfn, "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{3}{c|}{New method} & \\multicolumn{3}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & succ & mint & maxt & succ & mint & maxt \\\\ \\hline \\hline \n"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &mint_newmethod, &maxt_newmethod, &succ_baselinemethod, &mint_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %d & %d & %d & %.2lf & %6.3lf & %6.3lf & %.2lf & %6.3lf & %6.3lf \\\\ \\hline \n",temp1, temp2, temp3, succ_newmethod, roundup_to_ms(mint_newmethod), roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(mint_baselinemethod), roundup_to_ms(maxt_baselinemethod) ); } fprintf(fout,"\\end{tabular} \n"); fclose( fout); fclose( fin); } void generate_latex_from_overallstatistics(char* sourcefn, char* destfn) { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fin = fopen( sourcefn, "r"); fout = fopen( destfn, "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{2}{c|}{New method} & \\multicolumn{2}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & succ & maxt & succ & maxt \\\\ \\hline \\hline \n"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n",temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fprintf(fout,"\\end{tabular} \n"); fclose( fout); fclose( fin); } // This function is currently not used. But we my want to use it in the future. void old_do_metathing(int metaindex) { char metadirectoryforthiscase[20000]; FILE* myfile; myfile = fopen( "overallstatistics.txt", "w"); for (nprocessors=2;nprocessors<=8;nprocessors++) { ntasks = nprocessors+1; for (nsg=1;nsg<=2;nsg++) { createdirectorynameforthismetacase(metadirectoryforthiscase, nprocessors, ntasks, nsg); mymakedirectory(metadirectoryforthiscase); mychdir(metadirectoryforthiscase); do_generate_experiments(metaindex); do_run_experiments(); do_generate_statistics(); #if COMPILEMSVS addtooverallstatistics( myfile, "experiments\\experiment1\\statistics.txt"); #else addtooverallstatistics( myfile, "experiments/experiment1/statistics.txt"); #endif mychdir(".."); } ntasks = 10; for (nsg=1;nsg<=2;nsg++) { createdirectorynameforthismetacase(metadirectoryforthiscase, nprocessors, ntasks, nsg); mymakedirectory(metadirectoryforthiscase); mychdir(metadirectoryforthiscase); do_generate_experiments(metaindex); do_run_experiments(); do_generate_statistics(); #if COMPILEMSVS addtooverallstatistics( myfile, "experiments\\experiment1\\statistics.txt"); #else addtooverallstatistics( myfile, "experiments/experiment1/statistics.txt"); #endif mychdir(".."); } } fclose( myfile); generate_latex_from_overallstatistics("overallstatistics.txt","overallstatistics.tex"); } void do_metathing_generate_experiments(int metaindex) { char metadirectoryforthiscase[20000]; for (nprocessors=2;nprocessors<=8;nprocessors++) { for (ntasks=2;ntasks<=16;ntasks+=2) { for (nsg=1;nsg<=2;nsg++) { createdirectorynameforthismetacase(metadirectoryforthiscase, nprocessors, ntasks, nsg); mymakedirectory(metadirectoryforthiscase); mychdir(metadirectoryforthiscase); do_generate_experiments(metaindex); mychdir(".."); } } } } void do_metathing_run_experiments(int metaindex) { char metadirectoryforthiscase[20000]; FILE* myfile; myfile = fopen( "overallstatistics.txt", "w"); for (nprocessors=2;nprocessors<=8;nprocessors++) { for (ntasks=2;ntasks<=16;ntasks+=2) { for (nsg=1;nsg<=2;nsg++) { createdirectorynameforthismetacase(metadirectoryforthiscase, nprocessors, ntasks, nsg); mychdir(metadirectoryforthiscase); do_run_experiments(); do_generate_statistics(); #if COMPILEMSVS addtooverallstatistics( myfile, "experiments\\experiment1\\statistics.txt"); #else addtooverallstatistics( myfile, "experiments/experiment1/statistics.txt"); #endif mychdir(".."); } } } fclose( myfile); generate_latex_from_overallstatistics("overallstatistics.txt","overallstatistics.tex"); } void do_metathing(int metaindex) { do_metathing_generate_experiments(metaindex); do_metathing_run_experiments(metaindex); } // This function is currently not used. But we my want to use it in the future. void old_createone_of_the_three_tables(char* sourcefn1, char* sourcefn2, char* destfn) { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fout = fopen( destfn, "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$\\mathrm{PROGMIN}$ & $|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{3}{c|}{New method} & \\multicolumn{3}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & & succ & mint & maxt & succ & mint & maxt \\\\ \\hline \\hline \n"); fin = fopen( sourcefn1, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &mint_newmethod, &maxt_newmethod, &succ_baselinemethod, &mint_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %6.3lf & %.2lf & %6.3lf & %6.3lf \\\\ \\hline \n",1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(mint_newmethod), roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(mint_baselinemethod), roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fin = fopen( sourcefn1, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &mint_newmethod, &maxt_newmethod, &succ_baselinemethod, &mint_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %6.3lf & %.2lf & %6.3lf & %6.3lf \\\\ \\hline \n",1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(mint_newmethod), roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(mint_baselinemethod), roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fprintf(fout,"\\end{tabular} \n"); fclose( fout); } void createone_of_the_three_tables(char* sourcefn1, char* sourcefn2, char* destfn) { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fout = fopen( destfn, "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$\\mathrm{PROGMIN}$ & $|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{2}{c|}{New method} & \\multicolumn{2}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & & succ & maxt & succ & maxt \\\\ \\hline \\hline \n"); fin = fopen( sourcefn1, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n",1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fin = fopen( sourcefn2, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n",1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fprintf(fout,"\\end{tabular} \n"); fclose( fout); } void createthreetables() { #if COMPILEMSVS createone_of_the_three_tables("meta1\\overallstatistics.txt","meta2\\overallstatistics.txt", "table1.tex"); #else createone_of_the_three_tables("meta1/overallstatistics.txt","meta2/overallstatistics.txt", "table1.tex"); #endif #if COMPILEMSVS createone_of_the_three_tables("meta3\\overallstatistics.txt","meta4\\overallstatistics.txt", "table2.tex"); #else createone_of_the_three_tables("meta3/overallstatistics.txt","meta4/overallstatistics.txt", "table2.tex"); #endif #if COMPILEMSVS createone_of_the_three_tables("meta5\\overallstatistics.txt","meta6\\overallstatistics.txt", "table3.tex"); #else createone_of_the_three_tables("meta5/overallstatistics.txt","meta6/overallstatistics.txt", "table3.tex"); #endif } void createfourtables() { #if COMPILEMSVS createone_of_the_three_tables("meta1\\overallstatistics.txt","meta2\\overallstatistics.txt", "table1.tex"); #else createone_of_the_three_tables("meta1/overallstatistics.txt","meta2/overallstatistics.txt", "table1.tex"); #endif #if COMPILEMSVS createone_of_the_three_tables("meta3\\overallstatistics.txt","meta4\\overallstatistics.txt", "table2.tex"); #else createone_of_the_three_tables("meta3/overallstatistics.txt","meta4/overallstatistics.txt", "table2.tex"); #endif #if COMPILEMSVS createone_of_the_three_tables("meta5\\overallstatistics.txt","meta6\\overallstatistics.txt", "table3.tex"); #else createone_of_the_three_tables("meta5/overallstatistics.txt","meta6/overallstatistics.txt", "table3.tex"); #endif #if COMPILEMSVS createone_of_the_three_tables("meta7\\overallstatistics.txt","meta8\\overallstatistics.txt", "table4.tex"); #else createone_of_the_three_tables("meta7/overallstatistics.txt","meta8/overallstatistics.txt", "table4.tex"); #endif } void createsingletable() { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fout = fopen( "single_table.tex", "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$\\mathrm{mult}$ & $\\mathrm{PROGMIN}$ & $|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{2}{c|}{New method} & \\multicolumn{2}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & & & succ & maxt & succ & maxt \\\\ \\hline \\hline \n"); #if COMPILEMSVS fin = fopen( "meta1\\overallstatistics.txt", "r"); #else fin = fopen( "meta1/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.10, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta2\\overallstatistics.txt", "r"); #else fin = fopen( "meta2/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.10, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta5\\overallstatistics.txt", "r"); #else fin = fopen( "meta5/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.25, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta6\\overallstatistics.txt", "r"); #else fin = fopen( "meta6/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.25, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fprintf(fout,"\\end{tabular} \n"); fclose( fout); } void createtwotables() { FILE* fin; FILE* fout; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; fout = fopen( "single_table_first.tex", "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$\\mathrm{mult}$ & $\\mathrm{PROGMIN}$ & $|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{2}{c|}{New method} & \\multicolumn{2}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & & & succ & maxt & succ & maxt \\\\ \\hline \\hline \n"); #if COMPILEMSVS fin = fopen( "meta1\\overallstatistics.txt", "r"); #else fin = fopen( "meta1/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.10, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta2\\overallstatistics.txt", "r"); #else fin = fopen( "meta2/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.10, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta3\\overallstatistics.txt", "r"); #else fin = fopen( "meta3/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.40, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta4\\overallstatistics.txt", "r"); #else fin = fopen( "meta4/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.40, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fprintf(fout,"\\end{tabular} \n"); fclose( fout); fout = fopen( "single_table_second.tex", "w"); fprintf(fout, "\\begin{tabular}{|r|r|r|r|r|r|r|r|r|} \\hline \\hline \n"); fprintf(fout, "$\\mathrm{mult}$ & $\\mathrm{PROGMIN}$ & $|\\mathrm{procs}(\\Pi)|$ & $|\\tau|$ & nsg & \\multicolumn{2}{c|}{New method} & \\multicolumn{2}{c|}{Baseline method} \\\\ \n"); fprintf(fout, " & & & & & succ & maxt & succ & maxt \\\\ \\hline \\hline \n"); #if COMPILEMSVS fin = fopen( "meta5\\overallstatistics.txt", "r"); #else fin = fopen( "meta5/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.70, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta6\\overallstatistics.txt", "r"); #else fin = fopen( "meta6/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 0.70, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta7\\overallstatistics.txt", "r"); #else fin = fopen( "meta7/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 1.00, 1/11.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); #if COMPILEMSVS fin = fopen( "meta8\\overallstatistics.txt", "r"); #else fin = fopen( "meta8/overallstatistics.txt", "r"); #endif while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); fprintf(fout, " %4.2lf & %4.2lf & %d & %d & %d & %.2lf & %6.3lf & %.2lf & %6.3lf \\\\ \\hline \n", 1.00, 1/2.0, temp1, temp2, temp3, succ_newmethod, roundup_to_ms(maxt_newmethod), succ_baselinemethod, roundup_to_ms(maxt_baselinemethod) ); } fclose( fin); fprintf(fout,"\\end{tabular} \n"); fclose( fout); } double getisolatedCfrompart(int partid) { int index; for (index=1;index<=nmeasurements;index++) { if (measurements[index].tid==partid) { if ((measurements[index].cor1==0) && (measurements[index].cor2==0) && (measurements[index].cor3==0)) { return measurements[index].max_exec; } } } } // In the article, obtained co-runner interference data through experiments and we saved them in a file measurements.txt // This data is shown at the bottom of this source code file. // This file is taken as input to the tool. void getmeasurements() { FILE* fin; char readstring[20000]; int tid; int cor1; int cor2; int cor3; double max_exec; double min_exec; double avg_exec; fin = fopen( producetasksetsfrompartsfilename, "r"); nmeasurements = 0; fgets( readstring, 20000, fin); // we read the first line which show headers as text while (fgets( readstring, 20000, fin)!=NULL) { nmeasurements = nmeasurements + 1; if (sscanf( readstring, "%d %d %d %d %lf %lf %lf", &tid, &cor1, &cor2, &cor3, &max_exec, &min_exec, &avg_exec)!=7) { printf("Error when reading input file. Measurements\n"); exit(-1); } measurements[ nmeasurements].tid = tid; measurements[ nmeasurements].cor1 = cor1; measurements[ nmeasurements].cor2 = cor2; measurements[ nmeasurements].cor3 = cor3; measurements[ nmeasurements].max_exec = max_exec; measurements[ nmeasurements].min_exec = min_exec; measurements[ nmeasurements].avg_exec = avg_exec; } fclose( fin); } double getminexecalone( int i, int k) { int index; int tid; int cor1; int cor2; int cor3; double max_exec; double min_exec; double avg_exec; for (index=1;index<=nmeasurements;index++) { tid = measurements[ index].tid; cor1 = measurements[ index].cor1; cor2 = measurements[ index].cor2; cor3 = measurements[ index].cor3; max_exec = measurements[ index].max_exec; min_exec = measurements[ index].min_exec; avg_exec = measurements[ index].avg_exec; if ((tid!=0) && (cor1==0) && (cor2==0) && (cor3==0)) { if (tasks[i].segments[k].partid==tid) { return min_exec; } } } printf("Error in getminexecalone\n"); exit(-1); return -1; } int aresegmentsondifferentprocessors(int i,int k,int iprime,int kprime) { if (tasks[i].proc != tasks[iprime].proc) { return 1; } else { return 0; } } void setcorunnerspecifications() { int index; int tid; int cor1; int cor2; int cor3; double max_exec; double min_exec; double avg_exec; int i; int k; int iprime; int kprime; int iprimeprime; int kprimeprime; int iprimeprimeprime; int kprimeprimeprime; for (index=1;index<=nmeasurements;index++) { tid = measurements[ index].tid; cor1 = measurements[ index].cor1; cor2 = measurements[ index].cor2; cor3 = measurements[ index].cor3; max_exec = measurements[ index].max_exec; min_exec = measurements[ index].min_exec; avg_exec = measurements[ index].avg_exec; if ((tid!=0) && (cor1!=0) && (cor2==0) && (cor3==0)) { for (i=1;i<=ntasks;i++) { for (k=1;k<=tasks[i].nsegments;k++) { for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { if ( (tasks[i].segments[k].partid==tid) && (tasks[iprime].segments[kprime].partid==cor1) ) { if (aresegmentsondifferentprocessors(i,k,iprime,kprime)) { tasks[i].segments[k].ncorunnersetsspecified = tasks[i].segments[k].ncorunnersetsspecified + 1; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].nsegmentsincorunnerset = 1; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].taskindex = iprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].segmentindex = kprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].progresscorunnerset = getminexecalone( i, k) / max_exec; } } } } } } } if ((tid!=0) && (cor1!=0) && (cor2!=0) && (cor3==0)) { for (i=1;i<=ntasks;i++) { for (k=1;k<=tasks[i].nsegments;k++) { for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { for (iprimeprime=1;iprimeprime<=ntasks;iprimeprime++) { for (kprimeprime=1;kprimeprime<=tasks[iprimeprime].nsegments;kprimeprime++) { if ( (tasks[i].segments[k].partid==tid) && (tasks[iprime].segments[kprime].partid==cor1) && (tasks[iprimeprime].segments[kprimeprime].partid==cor2) ) { if ((aresegmentsondifferentprocessors(i,k,iprime,kprime)) && (aresegmentsondifferentprocessors(i,k,iprimeprime,kprimeprime)) && (aresegmentsondifferentprocessors(iprime,kprime,iprimeprime,kprimeprime))) { tasks[i].segments[k].ncorunnersetsspecified = tasks[i].segments[k].ncorunnersetsspecified + 1; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].nsegmentsincorunnerset = 2; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].taskindex = iprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].segmentindex = kprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[2].taskindex = iprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[2].segmentindex = kprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].progresscorunnerset = getminexecalone( i, k) / max_exec; } } } } } } } } } if ((tid!=0) && (cor1!=0) && (cor2!=0) && (cor3!=0)) { for (i=1;i<=ntasks;i++) { for (k=1;k<=tasks[i].nsegments;k++) { for (iprime=1;iprime<=ntasks;iprime++) { for (kprime=1;kprime<=tasks[iprime].nsegments;kprime++) { for (iprimeprime=1;iprimeprime<=ntasks;iprimeprime++) { for (kprimeprime=1;kprimeprime<=tasks[iprimeprime].nsegments;kprimeprime++) { for (iprimeprimeprime=1;iprimeprimeprime<=ntasks;iprimeprimeprime++) { for (kprimeprimeprime=1;kprimeprimeprime<=tasks[iprimeprimeprime].nsegments;kprimeprimeprime++) { if ((tasks[i].segments[k].partid==tid) && (tasks[iprime].segments[kprime].partid==cor1) && (tasks[iprimeprime].segments[kprimeprime].partid==cor2) && (tasks[iprimeprimeprime].segments[kprimeprimeprime].partid==cor3)) { if ((aresegmentsondifferentprocessors(i,k,iprime,kprime)) && (aresegmentsondifferentprocessors(i,k,iprimeprime,kprimeprime)) && (aresegmentsondifferentprocessors(i,k,iprimeprimeprime,kprimeprimeprime)) && (aresegmentsondifferentprocessors(iprime,kprime,iprimeprime,kprimeprime)) && (aresegmentsondifferentprocessors(iprime,kprime,iprimeprimeprime,kprimeprimeprime)) && (aresegmentsondifferentprocessors(iprimeprime,kprimeprime,iprimeprimeprime,kprimeprimeprime))) { tasks[i].segments[k].ncorunnersetsspecified = tasks[i].segments[k].ncorunnersetsspecified + 1; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].nsegmentsincorunnerset = 3; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].taskindex = iprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[1].segmentindex = kprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[2].taskindex = iprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[2].segmentindex = kprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[3].taskindex = iprimeprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].segmentsincorunnerset[3].segmentindex = kprimeprimeprime; tasks[i].segments[k].corunnersetsspecified[ tasks[i].segments[k].ncorunnersetsspecified ].progresscorunnerset = getminexecalone( i, k) / max_exec; } } } } } } } } } } } } } void printmeasurements() { int index; for (index=1;index<=nmeasurements;index++) { printf("%d %d %d %d %lf %lf %lf", measurements[ index].tid, measurements[ index].cor1, measurements[ index].cor2, measurements[ index].cor3, measurements[ index].max_exec, measurements[ index].min_exec, measurements[ index].avg_exec); } } void generatetasksetforvalidation() { getmeasurements(); // task 1 is assigned to processor 1 and it consists of part 1 // task 2 is assigned to processor 1 and it consists of part 2 and 3 // task 3 is assigned to processor 2 and it consists of part 4 // task 4 is assigned to processor 2 and it consists of part 5 // task 5 is assigned to processor 3 and it consists of part 6 // task 6 is assigned to processor 4 and it consists of part 2 and 6 useanalysismethod = 1; nprocessors = 4; ntasks = 6; tasks[1].id = 1; tasks[1].priority = 6; tasks[1].proc = 1; tasks[1].T = 200.0; tasks[1].D = 200.0; tasks[1].nsegments = 1; tasks[1].segments[1].segid = 1; tasks[1].segments[1].C = getisolatedCfrompart(1); tasks[1].segments[1].pd = 0.05; tasks[1].segments[1].ncorunnersetsspecified = 0; tasks[1].segments[1].partid = 1; tasks[2].id = 2; tasks[2].priority = 5; tasks[2].proc = 1; tasks[2].T = 600.0; tasks[2].D = 600.0; tasks[2].nsegments = 2; tasks[2].segments[1].segid = 1; tasks[2].segments[1].C = getisolatedCfrompart(2);; tasks[2].segments[1].pd = 0.05; tasks[2].segments[1].ncorunnersetsspecified = 0; tasks[2].segments[1].partid = 2; tasks[2].segments[2].segid = 2; tasks[2].segments[2].C = getisolatedCfrompart(3); tasks[2].segments[2].pd = 0.05; tasks[2].segments[2].ncorunnersetsspecified = 0; tasks[2].segments[2].partid = 3; tasks[3].id = 3; tasks[3].priority = 4; tasks[3].proc = 2; tasks[3].T = 200.0; tasks[3].D = 200.0; tasks[3].nsegments = 1; tasks[3].segments[1].segid = 1; tasks[3].segments[1].C = getisolatedCfrompart(4); tasks[3].segments[1].pd = 0.05; tasks[3].segments[1].ncorunnersetsspecified = 0; tasks[3].segments[1].partid = 4; tasks[4].id = 4; tasks[4].priority = 3; tasks[4].proc = 3; tasks[4].T = 200.0; tasks[4].D = 200.0; tasks[4].nsegments = 1; tasks[4].segments[1].segid = 1; tasks[4].segments[1].C = getisolatedCfrompart(5); tasks[4].segments[1].pd = 0.05; tasks[4].segments[1].ncorunnersetsspecified = 0; tasks[4].segments[1].partid = 5; tasks[5].id = 5; tasks[5].priority = 2; tasks[5].proc = 4; tasks[5].T = 10000.0; tasks[5].D = 150.0; tasks[5].nsegments = 1; tasks[5].segments[1].segid = 1; tasks[5].segments[1].C = getisolatedCfrompart(6); tasks[5].segments[1].pd = 0.05; tasks[5].segments[1].ncorunnersetsspecified = 0; tasks[5].segments[1].partid = 6; tasks[6].id = 6; tasks[6].priority = 1; tasks[6].proc = 4; tasks[6].T = 500.0; tasks[6].D = 500.0; tasks[6].nsegments = 2; tasks[6].segments[1].segid = 1; tasks[6].segments[1].C = getisolatedCfrompart(2); tasks[6].segments[1].pd = 0.05; tasks[6].segments[1].ncorunnersetsspecified = 0; tasks[6].segments[1].partid = 2; tasks[6].segments[2].segid = 2; tasks[6].segments[2].C = getisolatedCfrompart(6); tasks[6].segments[2].pd = 0.05; tasks[6].segments[2].ncorunnersetsspecified = 0; tasks[6].segments[2].partid = 6; setcorunnerspecifications(); writetasksettofile("taskset_generated_from_synthetic_programs.txt"); exit(0); } double get_succ_newmethod_fromfile(char* fn, int search_for_nprocessors) { FILE* fin; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; int counter; double aggregate_so_far; counter = 0; aggregate_so_far = 0.0; fin = fopen( fn, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); if (temp1==search_for_nprocessors) { counter++; aggregate_so_far = aggregate_so_far + succ_newmethod; } } fclose( fin); if (counter>=1) { return aggregate_so_far/counter; } else { printf("Error in get_succ_newmethod_fromfile\n"); fflush(stdout); exit(-1); } } double get_maxtime_newmethod_fromfile(char* fn, int search_for_nprocessors) { FILE* fin; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; int counter; double aggregate_so_far; counter = 0; aggregate_so_far = 0.0; fin = fopen( fn, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); if (temp1==search_for_nprocessors) { counter++; aggregate_so_far = maxdouble2( aggregate_so_far, maxt_newmethod); } } fclose( fin); if (counter>=1) { return aggregate_so_far; } else { printf("Error in get_maxtime_newmethod_fromfile\n"); fflush(stdout); exit(-1); } } double get_succ_baselinemethod_fromfile(char* fn, int search_for_nprocessors) { FILE* fin; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; int counter; double aggregate_so_far; counter = 0; aggregate_so_far = 0.0; fin = fopen( fn, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); if (temp1==search_for_nprocessors) { counter++; aggregate_so_far = aggregate_so_far + succ_baselinemethod; } } fclose( fin); if (counter>=1) { return aggregate_so_far/counter; } else { printf("Error in get_succ_baselinemethod_fromfile\n"); fflush(stdout); exit(-1); } } double get_maxtime_baselinemethod_fromfile(char* fn, int search_for_nprocessors) { FILE* fin; char readstring[20000]; int temp1; int temp2; int temp3; double succ_newmethod; double mint_newmethod; double maxt_newmethod; double succ_baselinemethod; double mint_baselinemethod; double maxt_baselinemethod; int counter; double aggregate_so_far; counter = 0; aggregate_so_far = 0.0; fin = fopen( fn, "r"); while (fgets( readstring, 20000, fin)!=NULL) { sscanf( readstring, "%d %d %d %lf %lf %lf %lf", &temp1, &temp2, &temp3, &succ_newmethod, &maxt_newmethod, &succ_baselinemethod, &maxt_baselinemethod ); if (temp1==search_for_nprocessors) { counter++; aggregate_so_far = maxdouble2( aggregate_so_far, maxt_baselinemethod); } } fclose( fin); if (counter>=1) { return aggregate_so_far; } else { printf("Error in get_maxtime_baselinemethod_fromfile\n"); fflush(stdout); exit(-1); } } void compute_aggregate_statistics(int filter, int selectedmetaindex) { FILE* fout; double aggregate_so_far; int counter; int metaindex; char metadir[20000]; if (filter) { fout = fopen( "succ_newmethod_filtered.txt", "w"); } else { fout = fopen( "succ_newmethod.txt", "w"); } for (nprocessors=2;nprocessors<=8;nprocessors++) { counter = 0; aggregate_so_far = 0.0; for (metaindex=1;metaindex<=400;metaindex++) { sprintf( metadir, "meta%d", metaindex); mymakedirectory(metadir); mychdir(metadir); if ((!filter) || (selectedmetaindex==metaindex)) { aggregate_so_far = aggregate_so_far + get_succ_newmethod_fromfile("overallstatistics.txt", nprocessors); counter++; } mychdir(".."); } fprintf(fout, "%d %lf\n", nprocessors, aggregate_so_far/counter); } fclose( fout); if (filter) { fout = fopen( "maxtime_required_newmethod_filtered.txt", "w"); } else { fout = fopen( "maxtime_required_newmethod.txt", "w"); } for (nprocessors=2;nprocessors<=8;nprocessors++) { counter = 0; aggregate_so_far = 0.0; for (metaindex=1;metaindex<=400;metaindex++) { sprintf( metadir, "meta%d", metaindex); mymakedirectory(metadir); mychdir(metadir); if ((!filter) || (selectedmetaindex==metaindex)) { aggregate_so_far = maxdouble2( aggregate_so_far, get_maxtime_newmethod_fromfile("overallstatistics.txt", nprocessors) ); counter++; } mychdir(".."); } fprintf(fout, "%d %lf\n", nprocessors, aggregate_so_far); } fclose( fout); if (filter) { fout = fopen( "succ_baselinemethod_filtered.txt", "w"); } else { fout = fopen( "succ_baselinemethod.txt", "w"); } for (nprocessors=2;nprocessors<=8;nprocessors++) { counter = 0; aggregate_so_far = 0.0; for (metaindex=1;metaindex<=400;metaindex++) { sprintf( metadir, "meta%d", metaindex); mymakedirectory(metadir); mychdir(metadir); if ((!filter) || (selectedmetaindex==metaindex)) { aggregate_so_far = aggregate_so_far + get_succ_baselinemethod_fromfile("overallstatistics.txt", nprocessors); counter++; } mychdir(".."); } fprintf(fout, "%d %lf\n", nprocessors, aggregate_so_far/counter); } fclose( fout); if (filter) { fout = fopen( "maxtime_required_baselinemethod_filtered.txt", "w"); } else { fout = fopen( "maxtime_required_baselinemethod.txt", "w"); } for (nprocessors=2;nprocessors<=8;nprocessors++) { counter = 0; aggregate_so_far = 0.0; for (metaindex=1;metaindex<=400;metaindex++) { sprintf( metadir, "meta%d", metaindex); mymakedirectory(metadir); mychdir(metadir); if ((!filter) || (selectedmetaindex==metaindex)) { aggregate_so_far = maxdouble2( aggregate_so_far, get_maxtime_baselinemethod_fromfile("overallstatistics.txt", nprocessors) ); counter++; } mychdir(".."); } fprintf(fout, "%d %lf\n", nprocessors, aggregate_so_far); } fclose( fout); } void do_part_for_create_text_for_appendix(FILE* fout, int metaindex, double mul, double PROGMIN) { char fnwithdir[20000]; char readstring[20000]; FILE* fin; int counter; int startdataindex; int finishdataindex; int middlepoint; fprintf(fout, "\\begin{table*}[t]\n"); #if COMPILEMSVS sprintf(fnwithdir,"meta%d\\overallstatistics.tex", metaindex); #else sprintf(fnwithdir,"meta%d/overallstatistics.tex", metaindex); #endif fprintf(fout, "\\scalebox{.7}{\n"); startdataindex = 0; finishdataindex = 0; counter = 0; fin = fopen( fnwithdir, "r"); while (fgets( readstring, 20000, fin)!=NULL) { if (strncmp(readstring," &",10)==0) { startdataindex = counter+1; } if (strncmp(readstring,"\\end",4)==0) { finishdataindex = counter-1; } counter++; } fclose( fin); middlepoint = (startdataindex + finishdataindex)/2; counter = 0; fin = fopen( fnwithdir, "r"); while (fgets( readstring, 20000, fin)!=NULL) { if ((middlepoint=1;nsg--) { // we do it in decreasing order in order to get early termination in case of large time for (index1=1;index1<=20;index1++) { for (index2=1;index2<=20;index2++) { if (index1>index2) { for (iterator=1;iterator<=MAX_ITERATOR_IN_BATCH;iterator++) { useanalysismethod = 1; mult = index1*0.05; PROGMIN = index2*0.05; assignTDCoftasks(); sorttasksinDMorderandassignpriorities(); assigntaskstoprocessors(); scale_taskset_so_that_it_is_critically_schedulable_without_co_runner_interference(); scale_taskset_by_factor( mult); assigninitialcorunnerdependencies( PROGMIN); rearrangecorunnerdependencies(); recomputepd(PROGMIN); sprintf( localfn, "taskset_%d_nsg%d_index1_%d_index2_%d.txt", local_counter, nsg, index1, index2); sprintf( outputfilename, "taskset_%d_nsg%d_index1_%d_index2_%d_results_from_analysis.txt", local_counter, nsg, index1, index2); // writetasksettofile(localfn); // we do not need to write and then read the taskset to/from file. // readtasksetfromfile(localfn); determineschedulabilityandcomputeresponsetimes(); writeresponsetimestofile(outputfilename); max_time_so_far = maxdouble2(timerequiredforschedulabilityanalysis_newmethod,max_time_so_far); if (timerequiredforschedulabilityanalysis_newmethod>maxtime_threshold) { return 0; } local_counter++; } } } } } writedoubletofile("maxtimefound.txt", max_time_so_far); return 1; } #define XDIM 11 #define YDIM 7 int compute_index_from_x_and_y(int x, int y) { return y*XDIM+x; } int power2(int n) { if (n==0) { return 1; } else { return 2*power2(n-1); } } // When running this function, there may be failures because of insufficient memory. We treat that as if the running time exceeds the threshold. And then run again with new cells in the table. void create_plots_that_shows_scalability_as_table() { int resultarray[XDIM*YDIM]; int index; int x; int y; int flag; FILE* fout; char tempdirname[20000]; for (index=0;index