forked from Bitmessage/virtpool
added asynchronous handling of libvirt
This commit is contained in:
parent
db5b2e96c1
commit
656071fa6e
|
@ -33,9 +33,10 @@ class RPCClient():
|
||||||
pass
|
pass
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
import random
|
||||||
client= RPCClient()
|
client= RPCClient()
|
||||||
print("Creating VM...")
|
print("Creating VM...")
|
||||||
name= "CentOS-8-Demo"
|
name= "CentOS-8-Demo-{}-{}".format(random.sample("abcdefghijklmn", 1)[0], random.sample(range(10),1)[0])
|
||||||
iso_src= "/home/coolguy/Downloads/CentOS-8.2.2004-x86_64-boot.iso"
|
iso_src= "/home/coolguy/Downloads/CentOS-8.2.2004-x86_64-boot.iso"
|
||||||
img_src= "/var/lib/libvirt/images/vol.img"
|
img_src= "/var/lib/libvirt/images/vol.img"
|
||||||
|
|
||||||
|
|
101
virtpool.py
101
virtpool.py
|
@ -7,6 +7,9 @@ import sys
|
||||||
import cherrypy
|
import cherrypy
|
||||||
import libvirt
|
import libvirt
|
||||||
import json
|
import json
|
||||||
|
import threading
|
||||||
|
from multiprocessing.pool import ThreadPool
|
||||||
|
pool = ThreadPool(processes=1)
|
||||||
|
|
||||||
import uuid
|
import uuid
|
||||||
from jinja2 import Environment, FileSystemLoader
|
from jinja2 import Environment, FileSystemLoader
|
||||||
|
@ -17,6 +20,8 @@ TEMPLATE_ENVIRONMENT = Environment(
|
||||||
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
|
loader=FileSystemLoader(os.path.join(PATH, 'templates')),
|
||||||
trim_blocks=False)
|
trim_blocks=False)
|
||||||
|
|
||||||
|
#TEMPLATE_ENVIRONMENT.globals['STATIC_PREFIX'] = '/templates/'
|
||||||
|
|
||||||
def render_template(template_filename, context):
|
def render_template(template_filename, context):
|
||||||
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
|
return TEMPLATE_ENVIRONMENT.get_template(template_filename).render(context)
|
||||||
|
|
||||||
|
@ -111,14 +116,90 @@ class Client():
|
||||||
|
|
||||||
host= self.connections[self.uris[0]]
|
host= self.connections[self.uris[0]]
|
||||||
if host:
|
if host:
|
||||||
try:
|
async_result = pool.apply_async(self.__create_vm, (host, xml))
|
||||||
vm= host.createXML(xml)
|
return async_result.get()
|
||||||
return json.dumps({"uuid":vm.UUIDString(), "id":vm.ID(), "status":1})
|
|
||||||
except Exception as e:
|
|
||||||
return json.dumps({"err":str(e), "status":0})
|
|
||||||
return "No host available"
|
return "No host available"
|
||||||
|
|
||||||
|
|
||||||
|
def __create_vm(self, host, xml):
|
||||||
|
try:
|
||||||
|
vm= host.createXML(xml)
|
||||||
|
return json.dumps({"uuid":vm.UUIDString(), "id":vm.ID(), "status":1})
|
||||||
|
except Exception as e:
|
||||||
|
return json.dumps({"err":str(e), "status":0})
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def gethost(self):
|
||||||
|
"""
|
||||||
|
It'll show relevant info about all available hosts
|
||||||
|
"""
|
||||||
|
|
||||||
|
host_data= []
|
||||||
|
|
||||||
|
for i, each in enumerate(self.uris):
|
||||||
|
tmp= {}
|
||||||
|
c= self.connections[each]
|
||||||
|
nodeinfo = c.getInfo()
|
||||||
|
mem = nodeinfo[1]
|
||||||
|
cpus = nodeinfo[2]
|
||||||
|
usedmem = 0
|
||||||
|
usedcpus = 0
|
||||||
|
|
||||||
|
tmp["sn"]= i+1
|
||||||
|
tmp["hostname"]= c.getHostname()
|
||||||
|
tmp["status"]= "Alive" if c.isAlive() else "Dead"
|
||||||
|
tmp["uri"]= c.getURI()
|
||||||
|
tmp["cpu"]= {}
|
||||||
|
tmp["mem"]= {}
|
||||||
|
|
||||||
|
domain_ids = c.listDomainsID()
|
||||||
|
for did in domain_ids:
|
||||||
|
dom = c.lookupByID(did)
|
||||||
|
state, maxmem, mem2, cpus2, cput = dom.info()
|
||||||
|
usedmem += maxmem/1024
|
||||||
|
usedcpus += cpus2
|
||||||
|
|
||||||
|
tmp["cpu"]["total"]= cpus
|
||||||
|
tmp["cpu"]["free"]= cpus - usedcpus
|
||||||
|
tmp["mem"]["total"]= mem
|
||||||
|
tmp["mem"]["free"]= int(mem - usedmem)
|
||||||
|
|
||||||
|
host_data.append(tmp)
|
||||||
|
|
||||||
|
return render_template("host.html", {"host_data": host_data})
|
||||||
|
|
||||||
|
@cherrypy.expose
|
||||||
|
def getguest(self):
|
||||||
|
"""
|
||||||
|
It'll show host-wise available guests and detailed info about them
|
||||||
|
"""
|
||||||
|
|
||||||
|
guest_data= []
|
||||||
|
cnt=0
|
||||||
|
for i, each in enumerate(self.uris):
|
||||||
|
c= self.connections[each]
|
||||||
|
for did in c.listDomainsID():
|
||||||
|
dom= c.lookupByID(did)
|
||||||
|
tmp= {
|
||||||
|
"sn": cnt+1,
|
||||||
|
"guestname": dom.name(),
|
||||||
|
"hostname": c.getHostname(),
|
||||||
|
"id_": dom.ID(),
|
||||||
|
"mem": int(dom.info()[1]/1024),
|
||||||
|
"cpu": dom.info()[3],
|
||||||
|
"status": "Active" if dom.isActive() else "Inactive",
|
||||||
|
"uuid": dom.UUIDString()
|
||||||
|
}
|
||||||
|
|
||||||
|
guest_data.append(tmp)
|
||||||
|
cnt+=1
|
||||||
|
|
||||||
|
return render_template("guest.html", {"guest_data": guest_data})
|
||||||
|
|
||||||
|
|
||||||
|
def manage_host(self,host_id):
|
||||||
|
pass
|
||||||
|
|
||||||
ROOT = Client()
|
ROOT = Client()
|
||||||
CONF = os.path.join(os.path.dirname(__file__), 'site.conf')
|
CONF = os.path.join(os.path.dirname(__file__), 'site.conf')
|
||||||
|
|
||||||
|
@ -130,7 +211,15 @@ if __name__ == '__main__':
|
||||||
# cherrypy.process.plugins.Daemonizer(engine).subscribe()
|
# cherrypy.process.plugins.Daemonizer(engine).subscribe()
|
||||||
# cherrypy.process.plugins.DropPrivileges(
|
# cherrypy.process.plugins.DropPrivileges(
|
||||||
# ENGINE, uid='cherrypy', gid='cherrypy').subscribe()
|
# ENGINE, uid='cherrypy', gid='cherrypy').subscribe()
|
||||||
cherrypy.tree.mount(Client())
|
cherrypy.tree.mount(Client(),
|
||||||
|
'/',
|
||||||
|
config={
|
||||||
|
'/':{
|
||||||
|
'tools.staticdir.on': True,
|
||||||
|
'tools.staticdir.dir': PATH+"/templates"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
)
|
||||||
if hasattr(ENGINE, "signal_handler"):
|
if hasattr(ENGINE, "signal_handler"):
|
||||||
ENGINE.signal_handler.subscribe()
|
ENGINE.signal_handler.subscribe()
|
||||||
if hasattr(ENGINE, "console_control_handler"):
|
if hasattr(ENGINE, "console_control_handler"):
|
||||||
|
|
Loading…
Reference in New Issue
Block a user