Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Lucas Cavalcante
cloudsimplus
Commits
f0f23899
Commit
f0f23899
authored
May 25, 2018
by
Eduardo Falcão
Browse files
implementing provisioner for shared resources
parent
ee13df37
Changes
1
Hide whitespace changes
Inline
Side-by-side
cloudsim-plus/src/main/java/org/cloudbus/cloudsim/provisioners/ResourceProvisionerShared.java
View file @
f0f23899
...
...
@@ -8,13 +8,16 @@
package
org.cloudbus.cloudsim.provisioners
;
import
org.apache.commons.lang3.tuple.Triple
;
import
org.cloudbus.cloudsim.resources.Pe
;
import
org.cloudbus.cloudsim.vms.Vm
;
import
org.cloudbus.cloudsim.resources.ResourceManageable
;
import
java.util.ArrayList
;
import
java.util.Collections
;
import
java.util.HashMap
;
import
java.util.Iterator
;
import
java.util.LinkedList
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.TreeMap
;
...
...
@@ -118,86 +121,127 @@ public class ResourceProvisionerShared extends ResourceProvisionerAbstract {
return
0
;
}
class
VmRequestedAllocated
implements
Comparable
<
VmRequestedAllocated
>{
private
Vm
vm
;
private
Long
requested
,
allocated
;
public
VmRequestedAllocated
(
Vm
vm
,
Long
requested
,
Long
allocated
)
{
super
();
this
.
vm
=
vm
;
this
.
requested
=
requested
;
this
.
allocated
=
allocated
;
}
@Override
public
boolean
equals
(
Object
obj
)
{
return
vm
.
equals
(
obj
);
}
public
int
compareTo
(
VmRequestedAllocated
vra
)
{
if
(
this
.
requested
<
vra
.
getRequested
())
{
return
-
1
;
}
if
(
this
.
requested
>
vra
.
getRequested
())
{
return
1
;
}
return
0
;
}
public
Vm
getVm
()
{
return
vm
;
}
public
void
setVm
(
Vm
vm
)
{
this
.
vm
=
vm
;
}
public
Long
getRequested
()
{
return
requested
;
}
public
void
setRequested
(
Long
requested
)
{
this
.
requested
=
requested
;
}
public
void
subtractRequested
(
Long
requested
)
{
this
.
requested
-=
requested
;
}
public
Long
getAllocated
()
{
return
allocated
;
}
public
void
setAllocated
(
Long
allocated
)
{
this
.
allocated
=
allocated
;
}
public
void
addAllocated
(
Long
extraAllocation
)
{
this
.
allocated
+=
extraAllocation
;
}
}
private
void
allocateResourcesEqually
(
final
Vm
vm
,
final
long
newTotalVmResourceCapacity
){
Map
<
Long
,
List
<
Vm
>>
requestedPerVm
=
new
TreeMap
<
Long
,
List
<
Vm
>>();
Map
<
Vm
,
Long
>
resourcesToBeAllocated
=
new
HashMap
<
Vm
,
Long
>();
//the final allocation map
List
<
VmRequestedAllocated
>
finalList
=
new
LinkedList
<
VmRequestedAllocated
>();
List
<
VmRequestedAllocated
>
vmRequestedAndAllocatedList
=
new
LinkedList
<
VmRequestedAllocated
>();
for
(
Entry
<
Vm
,
Long
>
entry
:
getResourceAllocationMap
().
entrySet
()){
vmRequestedAndAllocatedList
.
add
(
new
VmRequestedAllocated
(
entry
.
getKey
(),
entry
.
getValue
(),
0L
));
}
for
(
Entry
<
Vm
,
Long
>
entry
:
getResourceAllocationMap
().
entrySet
()){
resourcesToBeAllocated
.
put
(
entry
.
getKey
(),
0L
);
//initially the resourcesToBeAllocated of each VM is 0
if
(!
requestedPerVm
.
containsKey
(
entry
.
getValue
())){
requestedPerVm
.
put
(
entry
.
getValue
(),
new
ArrayList
<
Vm
>());
}
requestedPerVm
.
get
(
entry
.
getValue
()).
add
(
entry
.
getKey
());
//fulfill the requestedPerVm map with values from resourceAllocationMap
if
(
vmRequestedAndAllocatedList
.
contains
(
vm
)){
vmRequestedAndAllocatedList
.
get
(
vmRequestedAndAllocatedList
.
indexOf
(
vm
)).
setRequested
(
newTotalVmResourceCapacity
);
}
else
{
vmRequestedAndAllocatedList
.
add
(
new
VmRequestedAllocated
(
vm
,
newTotalVmResourceCapacity
,
0L
));
}
long
capacity
=
getResource
().
getCapacity
();
long
allocated
=
0L
;
long
available
=
capacity
;
int
numVms
=
resourcesToBeAllocated
.
size
();
int
numVms
=
vmRequestedAndAllocatedList
.
size
();
Collections
.
sort
(
vmRequestedAndAllocatedList
);
/**
* here we iterate over
r
equested
PerVm
map distributing the available resources equally
* here we iterate over
vmR
equested
AndAllocatedList
map distributing the available resources equally
*/
Iterator
<
Map
.
Entry
<
Long
,
List
<
Vm
>>>
it
=
requestedPerVm
.
entrySet
().
iterator
();
while
(
it
.
hasNext
())
{
Map
.
Entry
<
Long
,
List
<
Vm
>>
entry
=
it
.
next
();
long
requested
=
entry
.
getKey
();
//gets the minimum requested value and
if
(
requested
*
numVms
<
available
){
//then allocates it to each vm (if possible)
for
(
Map
.
Entry
<
Vm
,
Long
>
entryAllocation
:
resourcesToBeAllocated
.
entrySet
())
{
entryAllocation
.
setValue
(
entryAllocation
.
getValue
()+
requested
);
//increase the current allocation by the requested value
Iterator
<
Map
.
Entry
<
Long
,
List
<
Vm
>>>
it2
=
requestedPerVm
.
entrySet
().
iterator
();
//and then subtract this value in requestedPerVm for all occurrences
while
(
it2
.
hasNext
())
{
//as long as we have a map we need to use an iterator to remove
Map
.
Entry
<
Long
,
List
<
Vm
>>
entry2
=
it2
.
next
();
long
entry2requested
=
entry2
.
getKey
();
if
(
entry2requested
-
requested
==
0
){
//if this current allocation fulfills the requirements, remove from requestedPerVm
it2
.
remove
();
}
else
{
if
(!
requestedPerVm
.
containsKey
(
entry2requested
-
requested
)){
//if there is no other VM with this amount of request, then just put this new value in map
requestedPerVm
.
put
(
entry2requested
-
requested
,
entry2
.
getValue
());
}
else
{
}
it2
.
remove
();
}
}
}
allocated
+=
requested
*
numVms
;
available
=
capacity
-
allocated
;
it
.
remove
();
Iterator
<
VmRequestedAllocated
>
it
=
vmRequestedAndAllocatedList
.
listIterator
();
while
(
it
.
hasNext
()
&&
available
>
0
)
{
VmRequestedAllocated
vra
=
it
.
next
();
long
toBeAllocated
=
Math
.
min
(
vra
.
getRequested
(),
available
);
//gets the minimum between minimum requested value and available resources
if
(
toBeAllocated
*
vmRequestedAndAllocatedList
.
size
()
<
available
){
//then allocates it to each vm (if possible)
Iterator
<
VmRequestedAllocated
>
it2
=
vmRequestedAndAllocatedList
.
listIterator
();
while
(
it2
.
hasNext
())
{
VmRequestedAllocated
vra2
=
it2
.
next
();
vra2
.
addAllocated
(
toBeAllocated
);
//increase the current allocation by the requested value
vra2
.
subtractRequested
(
toBeAllocated
);
//and subtract the current requested by the recently allocated value
if
(
vra2
.
getRequested
()
==
0
){
//if this vm is satisfied
finalList
.
add
(
vra2
);
//add it on final list
it2
.
remove
();
//and remove from the main list
}
allocated
+=
toBeAllocated
;
//updates allocated and available
available
-=
toBeAllocated
;
}
}
else
{
}
}
//basically chooses the vm with less resources requested and give this amount to all Vms
//keep doing it until all you can not allocate resources equally to the VMs
//after this, split the remaining resources and give to th remaininh VMs
// int numberOfVms = getResourceAllocationMap().size();
// if(!getResourceAllocationMap().containsKey(vm))
// numberOfVms++;
//
// long resourcesPerVm = getResource().getCapacity()/numberOfVms;
//
//
//
//
// long resourcesMissingToAllocate = resourcesPerVm - (newTotalVmResourceCapacity - getResource().getAvailableResource());
//
//
// for(Entry<Vm, Long> entry : getResourceAllocationMap().entrySet()){
// if(entry.getValue().longValue() > resourcesPerVm){
// getResource().allocateResource(amountToAllocate)
// }
// }
// getResourceAllocationMap();
// int numberOfVms
// getResourceAllocationMap()
toBeAllocated
=
available
/
vmRequestedAndAllocatedList
.
size
();
Iterator
<
VmRequestedAllocated
>
it2
=
vmRequestedAndAllocatedList
.
listIterator
();
while
(
it2
.
hasNext
())
{
VmRequestedAllocated
vra2
=
it2
.
next
();
vra2
.
addAllocated
(
toBeAllocated
);
//increase the current allocation by the requested value
vra2
.
subtractRequested
(
toBeAllocated
);
//and subtract the current requested by the recently allocated value
finalList
.
add
(
vra2
);
//add it on final list because its the final allocation
it2
.
remove
();
//and remove from the main list
allocated
+=
toBeAllocated
;
//updates allocated and available
available
-=
toBeAllocated
;
}
}
Collections
.
sort
(
vmRequestedAndAllocatedList
);
//is it necessary?
it
=
vmRequestedAndAllocatedList
.
listIterator
();
//update iterator with new state of the list
}
}
@Override
...
...
@@ -216,5 +260,93 @@ public class ResourceProvisionerShared extends ResourceProvisionerAbstract {
final
long
allocationDifference
=
newVmTotalAllocatedResource
-
currentAllocatedResource
;
return
getResource
().
getAvailableResource
()
>=
allocationDifference
;
}
// private void allocateResourcesEquallyBKUP(final Vm vm, final long newTotalVmResourceCapacity){
//
// Map<Long,List<Vm>> requestedPerVm = new TreeMap<Long,List<Vm>>();
// Map<Vm,Long> resourcesToBeAllocated = new HashMap<Vm,Long>(); //the final allocation map
//
// for(Entry<Vm, Long> entry : getResourceAllocationMap().entrySet()){
// resourcesToBeAllocated.put(entry.getKey(), 0L); //initially the resourcesToBeAllocated of each VM is 0
// if(!requestedPerVm.containsKey(entry.getValue())){
// requestedPerVm.put(entry.getValue(), new ArrayList<Vm>());
// }
// requestedPerVm.get(entry.getValue()).add(entry.getKey()); //fulfill the requestedPerVm map with values from resourceAllocationMap
// }
//
// long capacity = getResource().getCapacity();
// long allocated = 0L;
// long available = capacity;
//
// int numVms = resourcesToBeAllocated.size();
//
// /**
// * here we iterate over requestedPerVm map distributing the available resources equally
// */
// Iterator<Map.Entry<Long,List<Vm>>> it = requestedPerVm.entrySet().iterator();
// while (it.hasNext()) {
// Map.Entry<Long,List<Vm>> entry = it.next();
// long requested = entry.getKey(); //gets the minimum requested value and
// if(requested*numVms<available){ //then allocates it to each vm (if possible)
// for (Map.Entry<Vm,Long> entryAllocation : resourcesToBeAllocated.entrySet()) {
// entryAllocation.setValue(entryAllocation.getValue()+requested); //increase the current allocation by the requested value
// Iterator<Map.Entry<Long,List<Vm>>> it2 = requestedPerVm.entrySet().iterator(); //and then subtract this value in requestedPerVm for all occurrences
// while (it2.hasNext()) { //as long as we have a map we need to use an iterator to remove
// Map.Entry<Long,List<Vm>> entry2 = it2.next();
// long entry2requested = entry2.getKey();
// if(entry2requested-requested==0){ //if this current allocation fulfills the requirements
// if(entry2.getValue().size()==1){ //and if there's only one VM with this requested
// it2.remove(); //then, remove this entry from requestedPerVm
// } else{ //otherwise, there is/are more VM(s) requesting this same amount
// Iterator<Vm> it3 = entry2.getValue().listIterator();
// while (it3.hasNext()) {
//
// }
// //entry2.getValue().remove(entryAllocation.getKey());
// }
// } else{
// if(!requestedPerVm.containsKey(entry2requested-requested)){ //if there is no other VM with this amount of request, then just put this new value in map
// requestedPerVm.put(entry2requested-requested, entry2.getValue());
// } else{ //if there is other VM with this amount of request, then add this VM on the list (inside map)
// requestedPerVm.get(entry.getKey()).add(e)
// }
// it2.remove();
// }
// }
// }
// allocated += requested*numVms;
// available = capacity - allocated;
// it.remove();
// } else{
//
// }
// }
//basically chooses the vm with less resources requested and give this amount to all Vms
//keep doing it until all you can not allocate resources equally to the VMs
//after this, split the remaining resources and give to th remaininh VMs
// int numberOfVms = getResourceAllocationMap().size();
// if(!getResourceAllocationMap().containsKey(vm))
// numberOfVms++;
//
// long resourcesPerVm = getResource().getCapacity()/numberOfVms;
//
//
//
//
// long resourcesMissingToAllocate = resourcesPerVm - (newTotalVmResourceCapacity - getResource().getAvailableResource());
//
//
// for(Entry<Vm, Long> entry : getResourceAllocationMap().entrySet()){
// if(entry.getValue().longValue() > resourcesPerVm){
// getResource().allocateResource(amountToAllocate)
// }
// }
// getResourceAllocationMap();
// int numberOfVms
// getResourceAllocationMap()
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment