schedulers
1.0.0
|
00001 /* 00002 Copyright 2015 Nicolas Melot 00003 00004 This file is part of Crown. 00005 00006 Crown is free software: you can redistribute it and/or modify 00007 it under the terms of the GNU General Public License as published by 00008 the Free Software Foundation, either version 3 of the License, or 00009 (at your option) any later version. 00010 00011 Crown is distributed in the hope that it will be useful, 00012 but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 GNU General Public License for more details. 00015 00016 You should have received a copy of the GNU General Public License 00017 along with Crown. If not, see <http://www.gnu.org/licenses/>. 00018 00019 */ 00020 00021 00022 #include <fstream> 00023 00024 #include <pelib/Task.hpp> 00025 #include <pelib/XMLSchedule.hpp> 00026 00027 #include <pelib/scheduler.h> 00028 00029 #ifdef debug 00030 #undef debug 00031 #endif 00032 00033 #if 1 00034 #define debug(var) cout << "[" << __FILE__ << ":" << __FUNCTION__ << ":" << __LINE__ << "] " << #var << " = \"" << var << "\"" << endl; 00035 #else 00036 #define debug(var) 00037 #endif 00038 00039 using namespace std; 00040 using namespace pelib; 00041 00042 // Compare task per workload 00043 struct task_per_work : binary_function <Task, Task, bool> 00044 { 00045 bool 00046 operator()(const Task &t1, const Task &t2) const 00047 { 00048 if(t1.isStreaming() == t2.isStreaming()) 00049 { 00050 if(t1.getWorkload() == t2.getWorkload()) 00051 { 00052 return t2.getName().compare(t1.getName()); 00053 } 00054 else 00055 { 00056 return t2.getWorkload() < t1.getWorkload(); 00057 } 00058 } 00059 else 00060 { 00061 return t2.isStreaming() < t1.isStreaming(); 00062 } 00063 } 00064 }; 00065 00066 // Compare sequences per workload 00067 struct core_per_work : binary_function <Schedule::sequence, Schedule::sequence, bool> 00068 { 00069 bool 00070 operator()(const Schedule::sequence &s1, const Schedule::sequence &s2) const 00071 { 00072 double stop1 = 0; 00073 if(s1.size() > 0) 00074 { 00075 const Task *t1 = s1.rbegin()->second.first; 00076 stop1 = s1.rbegin()->first + t1->runtime(t1->getWidth(), t1->getFrequency()); 00077 } 00078 00079 double stop2 = 0; 00080 if(s2.size() > 0) 00081 { 00082 const Task *t2 = s2.rbegin()->second.first; 00083 stop2 = s2.rbegin()->first + t2->runtime(t2->getWidth(), t2->getFrequency()); 00084 } 00085 00086 return stop1 < stop2; 00087 } 00088 }; 00089 00090 const pelib::Schedule* 00091 pelib_schedule(const pelib::Taskgraph &taskgraph, const pelib::Platform &pt, size_t argc, char** argv, map<const string, double>& stats) 00092 { 00093 Taskgraph tg = taskgraph; 00094 set<Task, task_per_work> sorted_start_tasks, sorted_tasks; 00095 multiset<Schedule::sequence, core_per_work> table; 00096 multiset<Schedule::sequence, core_per_work> start_table; 00097 multiset<Schedule::sequence, core_per_work> final_table; 00098 00099 // Sort tasks per workload 00100 for(set<Task>::const_iterator i = tg.getTasks().begin(); i != tg.getTasks().end(); i++) 00101 { 00102 if(i->isStreaming()) 00103 { 00104 sorted_tasks.insert(*i); 00105 } 00106 else 00107 { 00108 sorted_start_tasks.insert(*i); 00109 } 00110 } 00111 00112 // Initialize schedule 00113 for(set<const Core*>::const_iterator i = pt.getCores().begin(); i != pt.getCores().end(); i++) 00114 { 00115 table.insert(Schedule::sequence()); 00116 start_table.insert(Schedule::sequence()); 00117 } 00118 00119 // Perform mapping of starting tasks with LPT 00120 for(set<Task>::iterator i = sorted_start_tasks.begin(); i != sorted_start_tasks.end(); i++) 00121 { 00122 Schedule::sequence seq = *start_table.begin(); 00123 start_table.erase(start_table.begin()); 00124 00125 Task &t = (Task&)*i; 00126 if(!t.isStreaming()) 00127 { 00128 t.setFrequency(*(*pt.getCores().begin())->getFrequencies().rbegin()); 00129 if(seq.size() > 0) 00130 { 00131 const Task *last = seq.rbegin()->second.first; 00132 double start_time = last->getStartTime() + tg.getTasks().find(*last)->runtime(last->getWidth(), last->getFrequency()); 00133 t.setStartTime(start_time); 00134 Schedule::work work(&t, t.getWorkload()); 00135 pair<float, Schedule::work> pair(start_time, work); 00136 seq.insert(pair); 00137 } 00138 else 00139 { 00140 seq.insert(pair<float, Schedule::work>(0, Schedule::work(&t, t.getWorkload()))); 00141 } 00142 start_table.insert(seq); 00143 } 00144 } 00145 00146 // Do mapping with LPT 00147 // Iterate through task in decreasing workload order 00148 for(set<Task>::iterator i = sorted_tasks.begin(); i != sorted_tasks.end(); i++) 00149 { 00150 Schedule::sequence seq = *table.begin(); 00151 table.erase(table.begin()); 00152 00153 Task &t = (Task&)*i; 00154 if(t.isStreaming()) 00155 { 00156 t.setFrequency(*(*pt.getCores().begin())->getFrequencies().rbegin()); 00157 if(seq.size() > 0) 00158 { 00159 const Task *last = seq.rbegin()->second.first; 00160 double start_time = last->getStartTime() + tg.getTasks().find(*last)->runtime(last->getWidth(), last->getFrequency()); 00161 t.setStartTime(start_time); 00162 Schedule::work work(&t, t.getWorkload()); 00163 pair<float, Schedule::work> pair(start_time, work); 00164 seq.insert(pair); 00165 } 00166 else 00167 { 00168 seq.insert(pair<float, Schedule::work>(0, Schedule::work(&t, t.getWorkload()))); 00169 } 00170 table.insert(seq); 00171 } 00172 } 00173 00174 // Merge both start and run tasks into one unique sequence 00175 for(multiset<Schedule::sequence, core_per_work>::const_iterator i = table.begin(); i != table.end(); i++) 00176 { 00177 size_t offset = std::distance(table.begin(), i); 00178 multiset<Schedule::sequence, core_per_work>::iterator ii = start_table.begin(); 00179 Schedule::sequence seq; 00180 if(start_table.size() > offset) 00181 { 00182 std::advance(ii, offset); 00183 seq = *ii; 00184 //start_table.erase(ii); 00185 } 00186 double time = 0; 00187 // If there is a task already mapped to this core in the start time scheduling phase 00188 // Then offset the starting time by the time the last task terminates 00189 if(seq.size() > 0) 00190 { 00191 const Task *last = seq.rbegin()->second.first; 00192 time = last->getStartTime() + tg.getTasks().find(*last)->runtime(last->getWidth(), last->getFrequency()); // + tg.getTasks().find(*last)->runtime(last->getWidth(), last->getFrequency()); 00193 } 00194 00195 // Now add all tasks from the run table 00196 for(Schedule::sequence::const_iterator j = i->begin(); j != i->end(); j++) 00197 { 00198 pair<float, Schedule::work> w = *j; 00199 w.first += time; 00200 Task *t = (Task*)w.second.first; 00201 t->setStartTime(w.first); 00202 seq.insert(w); 00203 } 00204 00205 final_table.insert(seq); 00206 } 00207 00208 // Do frequency scaling with bin packing 00209 for(multiset<Schedule::sequence, core_per_work>::const_iterator i = table.begin(); i != table.end(); i++) 00210 { 00211 // TODO: something 00212 } 00213 00214 int core = 0; 00215 Schedule::table sched; 00216 for(multiset<Schedule::sequence, core_per_work>::const_iterator i = final_table.begin(); i != final_table.end(); i++, core++) 00217 { 00218 sched.insert(pair<int, Schedule::sequence>(pt.getCores().size() - core, *i)); 00219 } 00220 00221 Schedule *schedule = new Schedule("lpt", tg.getName(), sched); 00222 00223 return schedule; 00224 } 00225 00226 void 00227 pelib_delete(pelib::Schedule *sched) 00228 { 00229 delete sched; 00230 }