[virt-tools-list] [PATCH] Use storage pool lifecycle events
Cole Robinson
crobinso at redhat.com
Tue Jun 14 11:21:03 UTC 2016
On 06/13/2016 02:25 PM, Jovanka Gulicoska wrote:
> ---
> virtManager/connection.py | 35 +++++++++++++++++++++++++++++++++++
> virtManager/storagelist.py | 5 +----
> virtManager/storagepool.py | 5 ++++-
> 3 files changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/virtManager/connection.py b/virtManager/connection.py
> index 6cb9cc0..98b4ce1 100644
> --- a/virtManager/connection.py
> +++ b/virtManager/connection.py
> @@ -213,6 +213,8 @@ class vmmConnection(vmmGObject):
> self._domain_cb_ids = []
> self.using_network_events = False
> self._network_cb_ids = []
> + self.using_storage_pool_events = False
> + self._storage_pool_cb_ids = []
>
> self._xml_flags = {}
>
> @@ -774,6 +776,20 @@ class vmmConnection(vmmGObject):
> else:
> self.schedule_priority_tick(pollnet=True, force=True)
>
> + def _storage_pool_lifecycle_event(self, conn, pool, event, reason, userdata):
Line is long, can you break this after pool,
> + ignore = conn
> + ignore = userdata
> +
> + name = pool.name()
> + logging.debug("storage pool lifecycle event: storage=%s event=%s "
> + "reason=%s", name, event, reason)
> + obj = self.get_pool(name)
> +
> + if obj:
> + self.idle_add(obj.refresh_from_event_loop)
> + else:
> + self.schedule_priority_tick(pollpool=True, force=True)
> +
> def _add_conn_events(self):
> if not self.check_support(support.SUPPORT_CONN_WORKING_XEN_EVENTS):
> return
> @@ -829,6 +845,20 @@ class vmmConnection(vmmGObject):
> self.using_network_events = False
> logging.debug("Error registering network events: %s", e)
>
> + try:
> + if FORCE_DISABLE_EVENTS:
> + raise RuntimeError("FORCE_DISABLE_EVENTS = True")
> +
> + eventid = getattr(libvirt, "VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE", 0)
Also long, break this after libvirt,
> + self._storage_pool_cb_ids.append(
> + self.get_backend().storagePoolEventRegisterAny(
> + None, eventid, self._storage_pool_lifecycle_event, None))
> + self.using_storage_pool_events = True
> + logging.debug("Using storage pool events")
> + except Exception, e:
> + self.using_storage_pool_events = False
> + logging.debug("Error registering storage pool events: %s", e)
> +
>
> ######################################
> # Connection closing/opening methods #
> @@ -849,12 +879,15 @@ class vmmConnection(vmmGObject):
> self._backend.domainEventDeregisterAny(eid)
> for eid in self._network_cb_ids:
> self._backend.networkEventDeregisterAny(eid)
> + for eid in self._storage_pool_cb_ids:
> + self._backend.storagePoolEventDeregisterAny(eid)
> except:
> logging.debug("Failed to deregister events in conn cleanup",
> exc_info=True)
> finally:
> self._domain_cb_ids = []
> self._network_cb_ids = []
> + self._storage_pool_cb_ids = []
>
> self._backend.close()
> self._stats = []
> @@ -1177,6 +1210,8 @@ class vmmConnection(vmmGObject):
> pollvm = False
> if self.using_network_events and not force:
> pollnet = False
> + if self.using_storage_pool_events and not force:
> + pollpool = False
>
> self._hostinfo = self._backend.getInfo()
>
> diff --git a/virtManager/storagelist.py b/virtManager/storagelist.py
> index 8a34e7f..c16cf5c 100644
> --- a/virtManager/storagelist.py
> +++ b/virtManager/storagelist.py
> @@ -654,11 +654,8 @@ class vmmStorageList(vmmGObjectUI):
> if pool is None:
> return
>
> - def cb():
> - pool.refresh()
> -
> logging.debug("Refresh pool '%s'", pool.get_name())
> - vmmAsyncJob.simple_async_noshow(cb, [], self,
> + vmmAsyncJob.simple_async_noshow(pool.refresh, [], self,
> _("Error refreshing pool '%s'") % pool.get_name())
>
> def _pool_apply(self):
This bit is an unrelated cleanup and should be sent as a separate patch
> diff --git a/virtManager/storagepool.py b/virtManager/storagepool.py
> index b099fbe..ac7af1d 100644
> --- a/virtManager/storagepool.py
> +++ b/virtManager/storagepool.py
> @@ -40,7 +40,7 @@ class vmmStorageVolume(vmmLibvirtObject):
> ##########################
>
> def _conn_tick_poll_param(self):
> - return None
> + return "pollpool"
> def class_name(self):
> return "volume"
>
> @@ -59,6 +59,7 @@ class vmmStorageVolume(vmmLibvirtObject):
> # Deliberately empty
> ignore = stats_update
> def _init_libvirt_state(self):
> + self.tick()
> self.ensure_latest_xml()
>
Hmm, these bits are in the vmmStorageVolume class, which I don't think would
be affected by these changes. Are they required? If so can you describe what
they fix for you?
Otherwise this patch looks good to me
Thanks,
Cole
>
> @@ -134,6 +135,8 @@ class vmmStoragePool(vmmLibvirtObject):
> return self._backend.XMLDesc(flags)
> def _define(self, xml):
> return self.conn.define_pool(xml)
> + def _using_events(self):
> + return self.conn.using_storage_pool_events
> def _check_supports_isactive(self):
> return self.conn.check_support(
> self.conn.SUPPORT_POOL_ISACTIVE, self._backend)
>
More information about the virt-tools-list
mailing list