GreenCloud Simulator
resourceprovider.cc
Go to the documentation of this file.
1 /*#
2  * resourceprovider.cc
3  *
4  * @date Mar 11, 2013
5  * @author Guzek:Mateusz
6  */
7 
8 #include "resourceprovider.h"
9 #include "vm.h"
10 #include "tskcomsink.h"
11 #include "dchost.h"
12 
13 double ResourceProvider::MTU=1500.0;
14 double ResourceProvider::useful_bytes=1460.0;
15 double ResourceProvider::uplink_overhead=ResourceProvider::MTU/ResourceProvider::useful_bytes; // used to include the tcp headers of uplink communication
16 
17 
18 ResourceProvider::ResourceProvider() : id_(0), ntasks_(0),
19  currentLoad_ (0.0), currentLoadMem_(0.0), currentLoadStor_(0.0), currentLoadNet_(0.0),
20  eDVFS_enabled_(0.0), tskFailed_(0),tskComAgent(NULL), host(NULL), started_(false)
21 {
22 
23  for(int i = 0; i <= LastResType; i++){
24  resource_list.push_back(std::vector <DcResource*>());
25  }
26  for(int i = 0; i <= LastResType; i++){
27  resource_utilization[i] = 0.0;
28  }
29  hosted_vms_.clear();
31  poagent_ = NULL;
32 }
33 
35  std::vector <std::vector<DcResource*> >::iterator iter;
36  for(iter = resource_list.begin(); iter!=resource_list.end() ;iter++){
37  std::vector<DcResource*>::iterator iter2;
38  for(iter2 = iter->begin(); iter2!=iter->end() ;iter2++){
39  delete (*iter2);
40  }
41  }
42  delete poagent_;
43 
44 }
45 
46 
48  std::vector <ResDemand*>::iterator u_res;
49 
50  // TODO (possible) 1. Sort the provider resources according to the free capacity (descending)
51  // TODO (possible) 2. Sort the consumer ... (the same).
52 
53  /*For each resource demand of consumer:*/
54  for (u_res = rc->res_demands.begin() ; u_res!=rc->res_demands.end(); u_res++)
55  {
56  // /*For dynamic consumers (e.g. tasks) do not reserve computing and networking resource.
57  // * For non-dynamic resources (e.g. VMs): reserve computing and networking. */
58  if(((*u_res)->getType()!=Computing && (*u_res)->getType()!=Networking ) || rc->isTask == false){
59  bool possible = false;
60 
61  std::vector <Capacity>::iterator req_cap_cons = (*u_res)->capacity.begin();
62  std::vector <Capacity *>::iterator loc_cap_cons = (*u_res)->capacity_location.begin();
63  std::vector <CoreScheduler*>::iterator u_core;
64  if((*u_res)->getType()==Computing && (*u_res)->supported_virtual_resource){
65  u_core = ((CPU*)((*u_res)->supported_virtual_resource))->cores_schedulers_.begin();
66  }
67 
68  std::vector <DcResource*>::iterator p_res;
69  std::vector <CoreScheduler*>::iterator p_core;
70  /*For each DcResource of provider:*/
71  for(p_res = resource_list[(*u_res)->getType()].begin(); p_res!=resource_list[(*u_res)->getType()].end() ;p_res++){
72 
73  /*Check architecture*/
74  if((*u_res)->getArch() <= (*p_res)->getArch()){
75 
76  if((*u_res)->capacity.empty()==true){
77  possible = true;
78  }
79 
80  std::vector <Capacity>::iterator aval_cap_prov = (*p_res)->capacity.begin();
81  if((*u_res)->getType()==Computing && (*u_res)->supported_virtual_resource){
82  p_core = ((CPU*)(*p_res))->cores_schedulers_.begin();
83  }
84  /*Case of empty capacity vector - check only arch constraint.*/
85 
86  for(; aval_cap_prov!= (*p_res)->capacity.end() ; ){
87  if((*aval_cap_prov)>=(*req_cap_cons)){
88  (*aval_cap_prov)-=(*req_cap_cons);
89  /*Demands capacities are linked with the supported resources capacities (1 to 1),
90  * so the intermediary ResDemand is neglected:*/
91  if(rc->isVM){
92  (*aval_cap_prov).virtual_capacities.push_back(req_cap_cons->virtual_capacities.at(0));
93  }
94  (*loc_cap_cons)=&(*aval_cap_prov);
95  if((*u_res)->getType()==Computing && (*u_res)->supported_virtual_resource){
96  (*p_core)->addVcoreScheduler((*u_core));
97  }
98  req_cap_cons++;
99  loc_cap_cons++;
100  if(req_cap_cons== (*u_res)->capacity.end()){
101  possible = true;
102  break;
103  }
104  if((*u_res)->getType()==Computing && (*u_res)->supported_virtual_resource){
105  u_core++;
106  }
107 
108  } else {
109  aval_cap_prov++;
110  if((*u_res)->getType()==Computing && (*u_res)->supported_virtual_resource){
111  p_core++;
112  }
113  }
114  }
115 
116  }
117  /*Resource architecture rejected:*/
118  else {
119  //std::cerr << "Arch, Requested: " << (*u_res)->getArch() << "\tProvided: " <<(*p_res)->getArch() << "\n";
120  }
121  if(possible == true){
122  /*Break the main loop of scanning the provider resources.*/
123  break;
124  }
125  }
126  if(possible==false){
127  //std::cerr << "Impossible to allocate here.\n";
128  releaseAllocation(rc);
129  return false;
130  }
131  }
132  }
133  //std::cerr << "Allocation success.\n";
134  return true;
135 }
136 
138  if((*rc).res_demands.empty()){
139  std::cerr << "Nothing to release \n";
140  return true;
141  } // std::cerr << "Something to release \n";
142 
143  std::vector <ResDemand*>::iterator u_res;
144  for (u_res = rc->res_demands.begin() ; u_res!=rc->res_demands.end(); u_res++)
145  {
146 
147  if(((*u_res)->getType()!=Computing && (*u_res)->getType()!=Networking ) || rc->isTask == false){
148  std::vector <Capacity>::iterator consumption;
149  std::vector <Capacity *>::iterator location;
150  for(consumption=(*u_res)->capacity.begin(),
151  location=(*u_res)->capacity_location.begin();
152  consumption!=(*u_res)->capacity.end();
153  consumption++,location++){
154  if((*location)==NULL){
155  } else {
156  **location = (**location)+(*consumption);
157  *location = NULL;
158  }
159  }
160  }
161  if(rc->isTask==false){
162  if((*u_res)->supported_virtual_resource != NULL){
163  if( (*u_res)->supported_virtual_resource->getType()==Computing){
164  CPU* cpu = (CPU*)(*u_res)->supported_virtual_resource;
165  std::vector<CoreScheduler* >::iterator cs;
166  for(cs = cpu->cores_schedulers_.begin(); cs != cpu->cores_schedulers_.end(); cs++){
167  CoreScheduler* host_cs = (*cs)->getHostScheduler();
168  if(host_cs!=NULL){
169  host_cs->removeVcoreScheduler(*cs);
170  }
171  }
172  }
173  }
174  }
175 
176  }
177  if(rc->isTask==false){
178  // non-task (VM or migration) specific cleanup. Handled in the respective classes.
179  }
180 
181  return true;
182 }
183 
185  return host;
186 }
187 
188 
190  if(host == NULL){
191  DcHost* root = static_cast<DcHost*>(this);
192  return root;
193  } else {
194  return host->getRootHost();
195  }
196 
197 }
198 
200 
202 
203  if(tryToAllocate(newVm)){
204  ((newVm))->setHost(this);
205  hosted_vms_.push_back(newVm);
206  return true;
207  } else {
208  return false;
209  }
210 }
211 
214  if(releaseAllocation(vm)){
215  (vm)->setHost(NULL);
216  hosted_vms_.erase(remove(hosted_vms_.begin(),hosted_vms_.end(),vm),
217  hosted_vms_.end()); /*erase-remove idiom*/
218  return true;
219  } else {
220  return false;
221  }
222 }
223 
224 
225 
226 
227 
229 {
231  if(rcobj->isTask==true){
232  vector<CloudTask*>::iterator iter;
233  CloudTask* tskobj = (CloudTask*) rcobj;
234 
235  ntasks_ ++; // update total number of the received tasks
236 
237  if(tskobj->scheduled_==false){
238  if(trySchedulingTsk(tskobj)==false){
239  tskobj->fail(this);
240 // std::cout << "Unscheduled task failed due to insufficient resources";
241  return;
242  }
243  }
244  std::vector <CoreScheduler*>::iterator core_s;
245  std::vector <DcResource*>::iterator cpu_iter;
246 
247  /*If it is posible to allocate:*/
248  if(tryToAllocate(tskobj)){
249  for(cpu_iter=resource_list[Computing].begin(); cpu_iter != resource_list[Computing].end(); cpu_iter++){
250  CPU* cpu = (CPU*) (*cpu_iter);
251  for(core_s=cpu->cores_schedulers_.begin(); core_s != cpu->cores_schedulers_.end(); core_s++){
252  (*core_s)->startTaskExecution(tskobj);
253  }
254  }
255  /*Otherwise task fails!*/
256  } else {
257  tskobj->fail(this);
258  std::cout << "Task failed due to insufficient resources";
259  return;
260  }
261 
262  } else {
263  std::cerr <<"It is not a task!";
264  return;
265  }
266 }
267 
268 
269 
270 void ResourceProvider::nextEvent(double delay)
271 {
272  if (status_ == EVENT_PENDING) {
273  _cancel();
275  }
276 
277  event_.handler_ = this;
278  event_.time_ = Scheduler::instance().clock();
279 
280  _sched(delay);
282 }
283 
284 void ResourceProvider::handle(Event* event)
285 {
286  std::vector <CoreScheduler*>::iterator core_s;
287  std::vector <DcResource*>::iterator cpu_iter;
288  for(cpu_iter=resource_list[Computing].begin(); cpu_iter != resource_list[Computing].end(); cpu_iter++){
289  CPU* cpu = (CPU*) (*cpu_iter);
290  for(core_s=cpu->cores_schedulers_.begin(); core_s != cpu->cores_schedulers_.end(); core_s++){
291  (*core_s)->updateTskList();
292  }
293  }
294 
295 
296 }
297 
299  double free_cap = getFreeCap(type);
300 
301  std::vector <ResourceConsumer*>::iterator vm_iter;
302  for(vm_iter=hosted_vms_.begin(); vm_iter != hosted_vms_.end(); vm_iter++){
303  VM* vm = static_cast<VM*>(*vm_iter);
304  free_cap += vm->getFreeCapRecursive(type);
305  }
306  return free_cap;
307 }
309  double free_cap = 0;
310  std::vector <DcResource*>::iterator dc_res;
311  for(dc_res=resource_list[type].begin(); dc_res != resource_list[type].end(); dc_res++){
312  std::vector <Capacity>::iterator free_cap_iter;
313  for(free_cap_iter = (*dc_res)->capacity.begin();
314  free_cap_iter != (*dc_res)->capacity.end();
315  free_cap_iter++){
316  free_cap += *free_cap_iter;
317  }
318  }
319 
320  return free_cap;
321 }
322 
323 double ResourceProvider::getUsedNet(bool in, bool out){
324  double result = 0;
325  if(in){
326  double elapsed_time = Scheduler::instance().clock() - this->tskComSink_->getLastBytesSinceTime();
327  if(elapsed_time>0){
328  double down_link_util = this->tskComSink_->resetBytesSince();
329  std::vector<VmMigrationSink*>::iterator vms;
330  for(vms = vm_migration_sinks_.begin();vms!= vm_migration_sinks_.end();vms++){
331  double recent_bytes = (*vms)->resetBytesSince();
332  down_link_util += recent_bytes; }
333  result += (down_link_util/elapsed_time);
334  }
335  }
336  if(out){
337  double elapsed_time = Scheduler::instance().clock() - this->poagent_->updateTime();
338  if(elapsed_time>0){
339  double up_link_util = this->poagent_->updateAgentDataBytes();
340  std::vector<ProviderOutAgent*>::iterator poa;
341  for(poa = vm_migration_sources_.begin();poa!= vm_migration_sources_.end();poa++){
342  double recent_bytes = (*poa)->updateAgentDataBytes();
343  up_link_util += recent_bytes;
344  }
345  result += ((up_link_util*ResourceProvider::uplink_overhead)/elapsed_time);
346  }
347  }
348 
349  return result;
350 }
351 
352 double ResourceProvider::getUsedNetRecursive(bool in, bool out){
353  double used_net = getUsedNet(in,out);
354  std::vector <ResourceConsumer*>::iterator vm_iter;
355  for(vm_iter=hosted_vms_.begin(); vm_iter != hosted_vms_.end(); vm_iter++){
356  VM* vm = static_cast<VM*>(*vm_iter);
357  used_net += vm->getUsedNetRecursive(in,out);
358  }
359 
360  return used_net;
361 }
362 
364  double total_cap = 0;
365  std::vector <DcResource*>::iterator dc_res;
366  for(dc_res=resource_list[type].begin(); dc_res != resource_list[type].end(); dc_res++){
367  std::vector <Capacity>::iterator total_cap_iter;
368  for(total_cap_iter = (*dc_res)->specification->capacity.begin();
369  total_cap_iter != (*dc_res)->specification->capacity.end();
370  total_cap_iter++){
371  total_cap += *total_cap_iter;
372  }
373 
374  }
375  return total_cap;
376 
377 }
378 
380  if(type==Networking){
382  } else {
383  return updateResTypeUtil(type);
384  }
385 }
386 
388  if(type==Computing){
391  } else if(type == Networking){
392  double total_cap = getTotalCap(type) * 2; // Bidirectional links
393  double used_net_bytes = getUsedNetRecursive(true,true);
394  double result = used_net_bytes/total_cap;
397  } else {
398  double total_cap = getTotalCap(type);
399  if(total_cap==0){
400  return 0; //There is no components of this resource type
401  }
402  double free_cap = getFreeCapRecursive(type);
403  double result = 1 - (free_cap/total_cap);
404  if(type == Memory){
405  currentLoadMem_ = result;
406  } else if(type==Storage){
407  currentLoadStor_ = result;
408  }
409  resource_utilization[type] = result;
410  return result;
411  }
412 }
413 
415 {
416  double nominal_mips = 0;
417  double current_mips = 0;
418  std::vector <DcResource*>::iterator cpu_iter;
419  for(cpu_iter=resource_list[Computing].begin(); cpu_iter != resource_list[Computing].end(); cpu_iter++){
420  DcResource* res = *cpu_iter;
421  CPU* cpu = (CPU*) res;
422  nominal_mips += cpu->getNominalMIPS();
423  current_mips += cpu->getCurrentMIPS();
424  }
425  currentLoad_ = current_mips/nominal_mips;
426 
427  return currentLoad_;
428 }
429 
431  int result = trySchedulingTsk(tskobj);
432  if(result){
433  releaseAllocation(tskobj);
434  tskobj->releaseAllTaskAllocs();
435  tskobj->scheduled_=false;
436  }
437  return result;
438 }
439 
441 {
442 
443  /* get minimum processing rate required by the task */
444  if(tryToAllocate(tskobj)){
445  releaseAllocation(tskobj);
446  } else {
447  return false;
448  }
449 
450  std::vector<TaskAlloc*> tmp_task_allocs;
451  tmp_task_allocs.clear();
452 
453  std::vector<TaskAlloc*>::iterator iter;
454  for(iter = tskobj->task_allocations_.begin() ; iter != tskobj->task_allocations_.end();iter++){
455  TaskAlloc* task_alloc = (*iter);
456  bool core_found = false;
457  tmp_task_allocs.push_back(task_alloc);
458 
459  double tskrate = (double)task_alloc->getMIPS()/(task_alloc->getDeadline() - Scheduler::instance().clock());
460 
461  std::vector <CoreScheduler*>::iterator core_s;
462  std::vector <DcResource*>::iterator cpu_iter;
463  for(cpu_iter=resource_list[Computing].begin(); cpu_iter != resource_list[Computing].end(); cpu_iter++){
464  CPU* cpu = (CPU*) (*cpu_iter);
465  for(core_s=cpu->cores_schedulers_.begin(); core_s != cpu->cores_schedulers_.end(); core_s++){
466 
467  double maxrate = (*core_s)->getMostUrgentTaskRate();
468  if (tskrate > maxrate){maxrate = tskrate;}
469  if (maxrate*((*core_s)->getAllTasksNumber() + 1) <= (*core_s)->getAvailableMIPS()){
470  /* task can be scheduled, add to the in-fly list */
471  (*core_s)->assignTask(task_alloc);
472  core_found = true;
473  break;
474  }
475  }
476  if(core_found){break;}
477  }
478  if(core_found==false){
479  tskobj->releaseAllTaskAllocs();
480  //Release all tmp_task_allocs
481 // std::vector<TaskAlloc*>::iterator failed_alloc;
482 // for(failed_alloc = tmp_task_allocs.begin();failed_alloc != tmp_task_allocs.end(); failed_alloc++){
483 // CoreScheduler* core_of_failed = (*failed_alloc)->getCoreScheduler();
484 // if(core_of_failed!=NULL){
485 // core_of_failed->removeFromAssginedList((*failed_alloc));
486 // }
487 // }
488  return false;
489  }
490  }
491  tskobj->scheduled_=true;
492  return true;
493 
494 }
495 
496 
497 
499 
500  resource_list[res->getType()].push_back(res);
501  if(res->getType()==Computing){
502  CPU* cpu_res = static_cast<CPU*>(res);
503  cpu_res->setDVFS(eDVFS_enabled_);
504  cpu_res->setProvider(this);
505  }
506  if(res->getType()==Networking){
507  NIC* nic_res = static_cast<NIC*>(res);
508  nic_res->setRp(this);
509  }
510 
511 }
512 
514  this->tskComSink_ =tcs;
515 }
516 
517 inline void ResourceProvider::_sched(double delay) {
518  (void)Scheduler::instance().schedule(this, &event_, delay);
519 }
521  (void)Scheduler::instance().cancel(&event_);
522  // no need to free event_ since it's statically allocated
523 }
524 
525 
527  std::cout << "printTasklist Status: (FUNCTION UNDER CONSTRUCTION)\n";
528 
529 }
531  poagent_ = agent;
532 }
533 
535  return poagent_;
536 
537 }
538 
540  this->tskComAgent = agnt;
541 }
542 
544  return this->tskComAgent;
545 }
546 
548  if ((getAgent()) && (task->getOutput() != 0)) {
549  /*Record finish time of task on the server.*/
550  task->info_->setServerFinishTime(Scheduler::instance().clock());
551  task->info_->setResourceProvider(this);
552  /*Send task output.*/
553  poagent_->sendmsg(task->getOutput(),task);
554  }
555 }
556 
557 
558 void ResourceProvider::scheduleNextExent(double nextDeadline){
559  /* reschedule next update */
560  if (nextDeadline != DBL_MAX) nextEvent(nextDeadline);
561 
562 
563 }
564 
566  vm_migration_sinks_.push_back(vm_mig_sink);
567 }
568 
569 
571  vm_migration_sources_.push_back(poa);
572 }
573 
575  vm_migration_sinks_.erase(remove(vm_migration_sinks_.begin(),vm_migration_sinks_.end(),vm_mig_sink),
576  vm_migration_sinks_.end()); /*erase-remove idiom*/
577 }
578 
579 
581  vm_migration_sources_.erase(remove(vm_migration_sources_.begin(),vm_migration_sources_.end(),poa),
582  vm_migration_sources_.end()); /*erase-remove idiom*/
583 }
584 
585 int ResourceProvider::command(int argc, const char*const* argv)
586 {
587  Tcl& tcl = Tcl::instance();
588 
589  if (argc == 2) {
590  return (TCL_ERROR);
591 
592  } else if (argc == 3) {
593  if (strcmp(argv[1], "attach-agent") == 0) {
594  setAgent((ProviderOutAgent*) TclObject::lookup(argv[2]));
595  if (getAgent() == 0) {
596  tcl.resultf("no such agent %s", argv[2]);
597  return(TCL_ERROR);
598  }
599  return(TCL_OK);
600  }
601  else if (strcmp(argv[1], "set-taskcomagent") == 0) {
602  TskComAgent *agnt = dynamic_cast<TskComAgent*> (TclObject::lookup(argv[2]));
603  if(agnt){
604  setTskComAgent(agnt);
605  return (TCL_OK);
606  }
607  return (TCL_ERROR);
608  }
609  else if (strcmp(argv[1], "attach-vm-mig-sink") == 0) {
610  VmMigrationSink* vm_mig_sink = ((VmMigrationSink*) TclObject::lookup(argv[2]));
611  attachSink(vm_mig_sink);
612  if (getAgent() == 0) {
613  tcl.resultf("no such agent %s", argv[2]);
614  return(TCL_ERROR);
615  }
616  return(TCL_OK);
617  } else if (strcmp(argv[1], "attach-vm-mig-source") == 0) {
618  ProviderOutAgent* vm_migration_source = ((ProviderOutAgent*) TclObject::lookup(argv[2]));
619  attachSource(vm_migration_source);
620  if (getAgent() == 0) {
621  tcl.resultf("no such agent %s", argv[2]);
622  return(TCL_ERROR);
623  }
624  return(TCL_OK);
625  } else if (strcmp(argv[1], "detach-vm-mig-sink") == 0) {
626  VmMigrationSink* vm_mig_sink = ((VmMigrationSink*) TclObject::lookup(argv[2]));
627  detachSink(vm_mig_sink);
628  if (getAgent() == 0) {
629  tcl.resultf("no such agent %s", argv[2]);
630  return(TCL_ERROR);
631  }
632  return(TCL_OK);
633  } else if (strcmp(argv[1], "detach-vm-mig-source") == 0) {
634  ProviderOutAgent* vm_migration_source = ((ProviderOutAgent*) TclObject::lookup(argv[2]));
635  detachSource(vm_migration_source);
636  if (getAgent() == 0) {
637  tcl.resultf("no such agent %s", argv[2]);
638  return(TCL_ERROR);
639  }
640  return(TCL_OK);
641  } else if (strcmp(argv[1], "add-resource") == 0) {
642  DcResource* res = (DcResource*) TclObject::lookup(argv[2]);
643  if (res == NULL) {
644  tcl.resultf("no such resource %s", argv[2]);
645  return(TCL_ERROR);
646  }
647  addResource(res);
648  return(TCL_OK);
649  } else if (strcmp(argv[1], "add-vm") == 0) {
650  VM* vm = (VM*) TclObject::lookup(argv[2]);
651  if (vm == NULL) {
652  tcl.resultf("no such vm %s", argv[2]);
653  return(TCL_ERROR);
654  }
655  if(addVM(vm)){
656  return(TCL_OK);
657  } else {
658  /* It was impossible to allocate vm on the host.*/
659  std::cerr << "ERROR: A VM was allocated on a machine that has not enough resources. (Creation was called from Tcl)";
660  return(TCL_ERROR);
661  }
662  }
663  }
664  return (TCL_ERROR);
665 }
double getFreeCapRecursive(res_type type)
std::vector< std::vector< DcResource * > > resource_list
void setTskComAgent(TskComAgent *agnt)
CoreScheduler * getHostScheduler()
void scheduleNextExent(double nextDeadline)
void setTskComSink(TskComSink *tcs)
bool scheduled_
Definition: cloudtask.h:52
void detachSource(ProviderOutAgent *tcp_agent)
void setDVFS(int eDVFS_enabled_)
Definition: cpu.cc:98
virtual ~ResourceProvider()
std::vector< CoreScheduler * > cores_schedulers_
Definition: cpu.h:37
TskComSink * tskComSink_
int getOutput()
Definition: cloudtask.h:24
TaskInfo * info_
Definition: cloudtask.h:61
res_type
Definition: resource.h:19
void sendTaskOutput(CloudTask *task)
double updateResTypeUtil(res_type type)
int resetBytesSince()
Definition: bytecounter.cc:19
std::vector< ProviderOutAgent * > vm_migration_sources_
int trySchedulingTsk(CloudTask *tskobj)
virtual void addResource(DcResource *res)
static double useful_bytes
double getLastBytesSinceTime()
Definition: bytecounter.cc:26
void attachSource(ProviderOutAgent *tcp_agent)
static double uplink_overhead
ProviderOutAgent * poagent_
double getResTypeUtil(res_type type)
TskComAgent * tskComAgent
double getNominalMIPS()
Definition: cpu.cc:121
double getMIPS()
Definition: taskalloc.cc:21
double getTotalCap(res_type type)
virtual void printTasklist()
double getUsedNet(bool in, bool out)
std::vector< ResDemand * > res_demands
bool addVM(VM *newVm)
double getUsedNetRecursive(bool in, bool out)
void setProvider(ResourceProvider *provider)
Definition: cpu.cc:85
TskComAgent * getTskComAgent()
std::vector< TaskAlloc * > task_allocations_
Definition: cloudtask.h:60
void attachSink(VmMigrationSink *vm_mig_sink)
int tryToAllocate(ResourceConsumer *rc)
void recv(ResourceConsumer *rcobj)
std::vector< VmMigrationSink * > vm_migration_sinks_
ResourceProvider * host
bool removeVM(VM *vm)
void fail(ResourceProvider *provider)
Definition: cloudtask.cc:59
virtual void handle(Event *event)
double getFreeCap(res_type type)
Definition: nic.h:14
double getCurrentMIPS()
Definition: cpu.cc:110
void releaseAllTaskAllocs()
Definition: cloudtask.cc:70
Definition: dchost.h:27
void setServerFinishTime(double time)
Definition: taskinfo.cc:42
void removeVcoreScheduler(CoreScheduler *cs)
double resource_utilization[LastResType+1]
void nextEvent(double delay)
res_type getType()
Definition: resource.h:89
Definition: cpu.h:18
bool releaseAllocation(ResourceConsumer *rc)
void setAgent(ProviderOutAgent *agent)
std::vector< ResourceConsumer * > hosted_vms_
void detachSink(VmMigrationSink *vm_mig_sink)
ResourceProvider * getHost()
virtual void updateEnergyAndConsumption()=0
int testSchedulingPossibility(CloudTask *tskobj)
virtual int command(int argc, const char *const *argv)
double getDeadline()
Definition: taskalloc.cc:24
void setResourceProvider(ResourceProvider *rp)
Definition: taskinfo.cc:53
TcpAgent * getAgent()
Definition: vm.h:29
void _sched(double delay)
void sendmsg(int nbytes, void *pointer)
static double MTU
void setRp(ResourceProvider *rp)
Definition: nic.cc:28