diff --git a/nova-13.0.0/api-guide/source/conf.py b/nova-13.0.0/api-guide/source/conf.py index 5ced3d2..5d37a75 100644 --- a/nova-13.0.0/api-guide/source/conf.py +++ b/nova-13.0.0/api-guide/source/conf.py @@ -47,8 +47,8 @@ # The encoding of source files. # source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Compute API Guide' bug_tag = u'api-guide' diff --git a/nova-13.0.0/doc/source/conf.py b/nova-13.0.0/doc/source/conf.py index af23503..2244f1e 100644 --- a/nova-13.0.0/doc/source/conf.py +++ b/nova-13.0.0/doc/source/conf.py @@ -49,8 +49,8 @@ # The encoding of source files. #source_encoding = 'utf-8' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'nova' diff --git a/nova-13.0.0/nova/compute/manager.py b/nova-13.0.0/nova/compute/manager.py index 4518353..bc68ec9 100644 --- a/nova-13.0.0/nova/compute/manager.py +++ b/nova-13.0.0/nova/compute/manager.py @@ -798,7 +798,7 @@ def _get_instances_on_driver(self, context, filters=None): return objects.InstanceList() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( - context, filters, use_slave=True) + context, filters, use_subordinate=True) return local_instances except NotImplementedError: pass @@ -807,7 +807,7 @@ def _get_instances_on_driver(self, context, filters=None): # to brute force. driver_instances = self.driver.list_instances() instances = objects.InstanceList.get_by_filters(context, filters, - use_slave=True) + use_subordinate=True) name_map = {instance.name: instance for instance in instances} local_instances = [] for driver_instance in driver_instances: @@ -1530,7 +1530,7 @@ def _check_instance_build_time(self, context): 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) for instance in building_insts: if timeutils.is_older_than(instance.created_at, timeout): @@ -1809,7 +1809,7 @@ def _sync_scheduler_instance_info(self, context): context = context.elevated() instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) uuids = [instance.uuid for instance in instances] self.scheduler_client.sync_instance_info(context, self.host, uuids) @@ -5739,7 +5739,7 @@ def _heal_instance_info_cache(self, context): # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( - context, self.host, expected_attrs=[], use_slave=True) + context, self.host, expected_attrs=[], use_subordinate=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them @@ -5770,7 +5770,7 @@ def _heal_instance_info_cache(self, context): context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache', 'flavor'], - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue @@ -5821,7 +5821,7 @@ def _poll_rebooting_instances(self, context): task_states.REBOOT_PENDING], 'host': self.host} rebooting = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=[], use_slave=True) + context, filters, expected_attrs=[], use_subordinate=True) to_poll = [] for instance in rebooting: @@ -5838,7 +5838,7 @@ def _poll_rescued_instances(self, context): 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], - use_slave=True) + use_subordinate=True) to_unrescue = [] for instance in rescued_instances: @@ -5856,7 +5856,7 @@ def _poll_unconfirmed_resizes(self, context): migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, - use_slave=True) + use_subordinate=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) @@ -5885,7 +5885,7 @@ def _set_migration_to_error(migration, reason, **kwargs): try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) @@ -5946,7 +5946,7 @@ def _poll_shelved_instances(self, context): 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], - use_slave=True) + use_subordinate=True) to_gc = [] for instance in shelved_instances: @@ -5979,7 +5979,7 @@ def _instance_usage_audit(self, context): context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata', 'flavor'], - use_slave=True) + use_subordinate=True) num_instances = len(instances) errors = 0 successes = 0 @@ -6042,7 +6042,7 @@ def _poll_bandwidth_usage(self, context): instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: @@ -6067,7 +6067,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_out = None usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], - start_period=start_time, use_slave=True) + start_period=start_time, use_subordinate=True) if usage: bw_in = usage.bw_in bw_out = usage.bw_out @@ -6077,7 +6077,7 @@ def _poll_bandwidth_usage(self, context): usage = (objects.BandwidthUsage. get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], - start_period=prev_time, use_slave=True)) + start_period=prev_time, use_subordinate=True)) if usage: last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out @@ -6107,14 +6107,14 @@ def _poll_bandwidth_usage(self, context): last_refreshed=refreshed, update_cells=update_cells) - def _get_host_volume_bdms(self, context, use_slave=False): + def _get_host_volume_bdms(self, context, use_subordinate=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=use_slave) + use_subordinate=use_subordinate) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=use_slave) + context, instance.uuid, use_subordinate=use_subordinate) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) @@ -6146,7 +6146,7 @@ def _poll_volume_usage(self, context): return compute_host_bdms = self._get_host_volume_bdms(context, - use_slave=True) + use_subordinate=True) if not compute_host_bdms: return @@ -6172,7 +6172,7 @@ def _sync_power_states(self, context): """ db_instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) num_vm_instances = self.driver.get_num_instances() num_db_instances = len(db_instances) @@ -6231,14 +6231,14 @@ def _query_driver_power_state_and_sync(self, context, db_instance): self._sync_instance_power_state(context, db_instance, vm_power_state, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. pass def _sync_instance_power_state(self, context, db_instance, vm_power_state, - use_slave=False): + use_subordinate=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, @@ -6247,7 +6247,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. - db_instance.refresh(use_slave=use_slave) + db_instance.refresh(use_subordinate=use_subordinate) db_power_state = db_instance.power_state vm_state = db_instance.vm_state @@ -6422,7 +6422,7 @@ def _reclaim_queued_deletes(self, context): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS, - use_slave=True) + use_subordinate=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -6447,7 +6447,7 @@ def update_available_resource(self, context): new_resource_tracker_dict = {} compute_nodes_in_db = self._get_compute_nodes_in_db(context, - use_slave=True) + use_subordinate=True) nodenames = set(self.driver.get_available_nodes()) for nodename in nodenames: rt = self._get_resource_tracker(nodename) @@ -6480,10 +6480,10 @@ def update_available_resource(self, context): LOG.info(_LI("Deleting orphan compute node %s"), cn.id) cn.destroy() - def _get_compute_nodes_in_db(self, context, use_slave=False): + def _get_compute_nodes_in_db(self, context, use_subordinate=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, - use_slave=use_slave) + use_subordinate=use_subordinate) except exception.NotFound: LOG.error(_LE("No compute node record for host %s"), self.host) return [] @@ -6550,7 +6550,7 @@ def _cleanup_running_deleted_instances(self, context): "DELETED but still present on host."), instance.name, instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=True) + context, instance.uuid, use_subordinate=True) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, @@ -6618,11 +6618,11 @@ def _error_out_instance_on_exception(self, context, instance, self._set_instance_obj_error_state(context, instance) @wrap_exception() - def add_aggregate_host(self, context, aggregate, host, slave_info): + def add_aggregate_host(self, context, aggregate, host, subordinate_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') @@ -6634,11 +6634,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info): aggregate, host) @wrap_exception() - def remove_aggregate_host(self, context, host, slave_info, aggregate): + def remove_aggregate_host(self, context, host, subordinate_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') @@ -6733,7 +6733,7 @@ def _run_image_cache_manager_pass(self, context): 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) self.driver.manage_image_cache(context, filtered_instances) @@ -6748,7 +6748,7 @@ def _run_pending_deletes(self, context): attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=attrs, use_slave=True) + context, filters, expected_attrs=attrs, use_subordinate=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: @@ -6792,7 +6792,7 @@ def _cleanup_incomplete_migrations(self, context): attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, inst_filters, expected_attrs=attrs, use_slave=True) + context, inst_filters, expected_attrs=attrs, use_subordinate=True) for instance in instances: for migration in migrations: diff --git a/nova-13.0.0/nova/compute/rpcapi.py b/nova-13.0.0/nova/compute/rpcapi.py index 0735a80..65a48f9 100644 --- a/nova-13.0.0/nova/compute/rpcapi.py +++ b/nova-13.0.0/nova/compute/rpcapi.py @@ -162,7 +162,7 @@ class ComputeAPI(object): * 2.0 - Remove 1.x backwards compat * 2.1 - Adds orig_sys_metadata to rebuild_instance() - * 2.2 - Adds slave_info parameter to add_aggregate_host() and + * 2.2 - Adds subordinate_info parameter to add_aggregate_host() and remove_aggregate_host() * 2.3 - Adds volume_id to reserve_block_device_name() * 2.4 - Add bdms to terminate_instance @@ -390,7 +390,7 @@ def get_client(self, target, version_cap, serializer): serializer=serializer) def add_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Add aggregate host. :param ctxt: request context @@ -403,7 +403,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): version = '4.0' @@ -758,7 +758,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files, **extra) def remove_aggregate_host(self, ctxt, aggregate, host_param, host, - slave_info=None): + subordinate_info=None): '''Remove aggregate host. :param ctxt: request context @@ -771,7 +771,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host, cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): version = '4.0' diff --git a/nova-13.0.0/nova/conductor/rpcapi.py b/nova-13.0.0/nova/conductor/rpcapi.py index 0443df4..22ebae9 100644 --- a/nova-13.0.0/nova/conductor/rpcapi.py +++ b/nova-13.0.0/nova/conductor/rpcapi.py @@ -126,7 +126,7 @@ class ConductorAPI(object): * 1.62 - Added object_backport() * 1.63 - Changed the format of values['stats'] from a dict to a JSON string in compute_node_update() - * 1.64 - Added use_slave to instance_get_all_filters() + * 1.64 - Added use_subordinate to instance_get_all_filters() - Remove instance_type_get() - Remove aggregate_get() - Remove aggregate_get_by_host() diff --git a/nova-13.0.0/nova/console/xvp.py b/nova-13.0.0/nova/console/xvp.py index 46e9853..790d20a 100644 --- a/nova-13.0.0/nova/console/xvp.py +++ b/nova-13.0.0/nova/console/xvp.py @@ -40,7 +40,7 @@ help='Generated XVP conf file'), cfg.StrOpt('console_xvp_pid', default='/var/run/xvp.pid', - help='XVP master process pid file'), + help='XVP main process pid file'), cfg.StrOpt('console_xvp_log', default='/var/log/xvp.log', help='XVP log file'), diff --git a/nova-13.0.0/nova/db/api.py b/nova-13.0.0/nova/db/api.py index 90c5fe0..354f50d 100644 --- a/nova-13.0.0/nova/db/api.py +++ b/nova-13.0.0/nova/db/api.py @@ -97,8 +97,8 @@ def create_context_manager(connection): def select_db_reader_mode(f): """Decorator to select synchronous or asynchronous reader mode. - The kwarg argument 'use_slave' defines reader mode. Asynchronous reader - will be used if 'use_slave' is True and synchronous reader otherwise. + The kwarg argument 'use_subordinate' defines reader mode. Asynchronous reader + will be used if 'use_subordinate' is True and synchronous reader otherwise. """ return IMPL.select_db_reader_mode(f) diff --git a/nova-13.0.0/nova/db/sqlalchemy/api.py b/nova-13.0.0/nova/db/sqlalchemy/api.py index a739ce6..5f07df5 100644 --- a/nova-13.0.0/nova/db/sqlalchemy/api.py +++ b/nova-13.0.0/nova/db/sqlalchemy/api.py @@ -85,10 +85,10 @@ cfg.BoolOpt('sqlite_synchronous', default=True, help='If True, SQLite uses synchronous mode.'), - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', secret=True, help='The SQLAlchemy connection string to use to connect to the' - ' slave database.'), + ' subordinate database.'), cfg.StrOpt('mysql_sql_mode', default='TRADITIONAL', help='The SQL mode to be used for MySQL sessions. ' @@ -140,7 +140,7 @@ def _get_db_conf(conf_group, connection=None): kw = dict( connection=connection or conf_group.connection, - slave_connection=conf_group.slave_connection, + subordinate_connection=conf_group.subordinate_connection, sqlite_fk=False, __autocommit=True, expire_on_commit=False, @@ -188,18 +188,18 @@ def get_context_manager(context): return _context_manager_from_context(context) or main_context_manager -def get_engine(use_slave=False, context=None): +def get_engine(use_subordinate=False, context=None): """Get a database engine object. - :param use_slave: Whether to use the slave connection + :param use_subordinate: Whether to use the subordinate connection :param context: The request context that can contain a context manager """ ctxt_mgr = _context_manager_from_context(context) or main_context_manager - return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave) + return ctxt_mgr.get_legacy_facade().get_engine(use_subordinate=use_subordinate) -def get_session(use_slave=False,context=None, **kwargs): +def get_session(use_subordinate=False,context=None, **kwargs): ctxt_mgr = _context_manager_from_context(context) or main_context_manager - return ctxt_mgr.get_legacy_facade().get_session(use_slave=use_slave, **kwargs) + return ctxt_mgr.get_legacy_facade().get_session(use_subordinate=use_subordinate, **kwargs) def get_api_engine(): return api_context_manager.get_legacy_facade().get_engine() @@ -264,9 +264,9 @@ def wrapper(context, aggregate_id, *args, **kwargs): def select_db_reader_mode(f): """Decorator to select synchronous or asynchronous reader mode. - The kwarg argument 'use_slave' defines reader mode. Asynchronous reader - will be used if 'use_slave' is True and synchronous reader otherwise. - If 'use_slave' is not specified default value 'False' will be used. + The kwarg argument 'use_subordinate' defines reader mode. Asynchronous reader + will be used if 'use_subordinate' is True and synchronous reader otherwise. + If 'use_subordinate' is not specified default value 'False' will be used. Wrapped function must have a context in the arguments. """ @@ -277,9 +277,9 @@ def wrapper(*args, **kwargs): keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs) context = keyed_args['context'] - use_slave = keyed_args.get('use_slave', False) + use_subordinate = keyed_args.get('use_subordinate', False) - if use_slave: + if use_subordinate: reader_mode = main_context_manager.async else: reader_mode = main_context_manager.reader @@ -6375,7 +6375,7 @@ def archive_deleted_rows(max_rows=None): """ table_to_rows_archived = {} total_rows_archived = 0 - meta = MetaData(get_engine(use_slave=True)) + meta = MetaData(get_engine(use_subordinate=True)) meta.reflect() # Reverse sort the tables so we get the leaf nodes first for processing. for table in reversed(meta.sorted_tables): diff --git a/nova-13.0.0/nova/network/ldapdns.py b/nova-13.0.0/nova/network/ldapdns.py index 44f4b6d..b8f2641 100644 --- a/nova-13.0.0/nova/network/ldapdns.py +++ b/nova-13.0.0/nova/network/ldapdns.py @@ -42,9 +42,9 @@ default='password', help='Password for LDAP DNS', secret=True), - cfg.StrOpt('ldap_dns_soa_hostmaster', - default='hostmaster@example.org', - help='Hostmaster for LDAP DNS driver Statement of Authority'), + cfg.StrOpt('ldap_dns_soa_hostmain', + default='hostmain@example.org', + help='Hostmain for LDAP DNS driver Statement of Authority'), cfg.MultiStrOpt('ldap_dns_servers', default=['dns.example.org'], help='DNS Servers for LDAP DNS driver'), @@ -156,7 +156,7 @@ def _soa(cls): date = time.strftime('%Y%m%d%H%M%S') soa = '%s %s %s %s %s %s %s' % ( CONF.ldap_dns_servers[0], - CONF.ldap_dns_soa_hostmaster, + CONF.ldap_dns_soa_hostmain, date, CONF.ldap_dns_soa_refresh, CONF.ldap_dns_soa_retry, diff --git a/nova-13.0.0/nova/network/linux_net.py b/nova-13.0.0/nova/network/linux_net.py index 40064a0..ce72c26 100644 --- a/nova-13.0.0/nova/network/linux_net.py +++ b/nova-13.0.0/nova/network/linux_net.py @@ -1650,7 +1650,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True, out, err = _execute('brctl', 'addif', bridge, interface, check_exit_code=False, run_as_root=True) if (err and err != "device %s is already a member of a bridge; " - "can't enslave it to bridge %s.\n" % (interface, bridge)): + "can't ensubordinate it to bridge %s.\n" % (interface, bridge)): msg = _('Failed to add interface: %s') % err raise exception.NovaException(msg) diff --git a/nova-13.0.0/nova/network/neutronv2/api.py b/nova-13.0.0/nova/network/neutronv2/api.py index 6c2fbc5..4873c16 100644 --- a/nova-13.0.0/nova/network/neutronv2/api.py +++ b/nova-13.0.0/nova/network/neutronv2/api.py @@ -930,7 +930,7 @@ def _gather_port_ids_and_networks(self, context, instance, networks=None, instance = objects.Instance.get_by_uuid( context, instance['uuid'], expected_attrs=['system_metadata', 'info_cache'], - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: LOG.debug("Instance is deleted during refresh " "info cache", instance=instance) diff --git a/nova-13.0.0/nova/objects/bandwidth_usage.py b/nova-13.0.0/nova/objects/bandwidth_usage.py index fa5119d..cf6a4a6 100644 --- a/nova-13.0.0/nova/objects/bandwidth_usage.py +++ b/nova-13.0.0/nova/objects/bandwidth_usage.py @@ -20,7 +20,7 @@ class BandwidthUsage(base.NovaPersistentObject, base.NovaObject, base.NovaObjectDictCompat): # Version 1.0: Initial version - # Version 1.1: Add use_slave to get_by_instance_uuid_and_mac + # Version 1.1: Add use_subordinate to get_by_instance_uuid_and_mac # Version 1.2: Add update_cells to create VERSION = '1.2' @@ -48,17 +48,17 @@ def _from_db_object(context, bw_usage, db_bw_usage): @staticmethod @db.select_db_reader_mode - def _db_bw_usage_get(context, uuid, start_period, mac, use_slave=False): + def _db_bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): return db.bw_usage_get(context, uuid=uuid, start_period=start_period, mac=mac) @base.serialize_args @base.remotable_classmethod def get_by_instance_uuid_and_mac(cls, context, instance_uuid, mac, - start_period=None, use_slave=False): + start_period=None, use_subordinate=False): db_bw_usage = cls._db_bw_usage_get(context, uuid=instance_uuid, start_period=start_period, mac=mac, - use_slave=use_slave) + use_subordinate=use_subordinate) if db_bw_usage: return cls._from_db_object(context, cls(), db_bw_usage) @@ -78,7 +78,7 @@ def create(self, uuid, mac, bw_in, bw_out, last_ctr_in, @base.NovaObjectRegistry.register class BandwidthUsageList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version - # Version 1.1: Add use_slave to get_by_uuids + # Version 1.1: Add use_subordinate to get_by_uuids # Version 1.2: BandwidthUsage <= version 1.2 VERSION = '1.2' fields = { @@ -88,14 +88,14 @@ class BandwidthUsageList(base.ObjectListBase, base.NovaObject): @staticmethod @db.select_db_reader_mode def _db_bw_usage_get_by_uuids(context, uuids, start_period, - use_slave=False): + use_subordinate=False): return db.bw_usage_get_by_uuids(context, uuids=uuids, start_period=start_period) @base.serialize_args @base.remotable_classmethod - def get_by_uuids(cls, context, uuids, start_period=None, use_slave=False): + def get_by_uuids(cls, context, uuids, start_period=None, use_subordinate=False): db_bw_usages = cls._db_bw_usage_get_by_uuids(context, uuids=uuids, start_period=start_period, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(), BandwidthUsage, db_bw_usages) diff --git a/nova-13.0.0/nova/objects/block_device.py b/nova-13.0.0/nova/objects/block_device.py index b671975..a616b0d 100644 --- a/nova-13.0.0/nova/objects/block_device.py +++ b/nova-13.0.0/nova/objects/block_device.py @@ -287,7 +287,7 @@ def obj_load_attr(self, attrname): class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: BlockDeviceMapping <= version 1.1 - # Version 1.2: Added use_slave to get_by_instance_uuid + # Version 1.2: Added use_subordinate to get_by_instance_uuid # Version 1.3: BlockDeviceMapping <= version 1.2 # Version 1.4: BlockDeviceMapping <= version 1.3 # Version 1.5: BlockDeviceMapping <= version 1.4 @@ -325,28 +325,28 @@ def bdms_by_instance_uuid(cls, context, instance_uuids): @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance_uuids( - context, instance_uuids, use_slave=False): + context, instance_uuids, use_subordinate=False): return db.block_device_mapping_get_all_by_instance_uuids( context, instance_uuids) @base.remotable_classmethod - def get_by_instance_uuids(cls, context, instance_uuids, use_slave=False): + def get_by_instance_uuids(cls, context, instance_uuids, use_subordinate=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance_uuids( - context, instance_uuids, use_slave=use_slave) + context, instance_uuids, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=False): + context, instance_uuid, use_subordinate=False): return db.block_device_mapping_get_all_by_instance( context, instance_uuid) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) diff --git a/nova-13.0.0/nova/objects/compute_node.py b/nova-13.0.0/nova/objects/compute_node.py index a4f99e1..24de5ac 100644 --- a/nova-13.0.0/nova/objects/compute_node.py +++ b/nova-13.0.0/nova/objects/compute_node.py @@ -272,8 +272,8 @@ def get_by_host_and_nodename(cls, context, host, nodename): # TODO(pkholkin): Remove this method in the next major version bump @base.remotable_classmethod def get_first_node_by_host_for_old_compat(cls, context, host, - use_slave=False): - computes = ComputeNodeList.get_all_by_host(context, host, use_slave) + use_subordinate=False): + computes = ComputeNodeList.get_all_by_host(context, host, use_subordinate) # FIXME(sbauza): Some hypervisors (VMware, Ironic) can return multiple # nodes per host, we should return all the nodes and modify the callers # instead. @@ -370,7 +370,7 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject): # Version 1.2 Add get_by_service() # Version 1.3 ComputeNode version 1.4 # Version 1.4 ComputeNode version 1.5 - # Version 1.5 Add use_slave to get_by_service + # Version 1.5 Add use_subordinate to get_by_service # Version 1.6 ComputeNode version 1.6 # Version 1.7 ComputeNode version 1.7 # Version 1.8 ComputeNode version 1.8 + add get_all_by_host() @@ -401,7 +401,7 @@ def get_by_hypervisor(cls, context, hypervisor_match): # NOTE(hanlind): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod - def _get_by_service(cls, context, service_id, use_slave=False): + def _get_by_service(cls, context, service_id, use_subordinate=False): try: db_computes = db.compute_nodes_get_by_service_id( context, service_id) @@ -414,12 +414,12 @@ def _get_by_service(cls, context, service_id, use_slave=False): @staticmethod @db.select_db_reader_mode - def _db_compute_node_get_all_by_host(context, host, use_slave=False): + def _db_compute_node_get_all_by_host(context, host, use_subordinate=False): return db.compute_node_get_all_by_host(context, host) @base.remotable_classmethod - def get_all_by_host(cls, context, host, use_slave=False): + def get_all_by_host(cls, context, host, use_subordinate=False): db_computes = cls._db_compute_node_get_all_by_host(context, host, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) diff --git a/nova-13.0.0/nova/objects/instance.py b/nova-13.0.0/nova/objects/instance.py index e759fb5..99e044e 100644 --- a/nova-13.0.0/nova/objects/instance.py +++ b/nova-13.0.0/nova/objects/instance.py @@ -386,17 +386,17 @@ def _from_db_object(context, instance, db_inst, expected_attrs=None): @staticmethod @db.select_db_reader_mode def _db_instance_get_by_uuid(context, uuid, columns_to_join, - use_slave=False): + use_subordinate=False): return db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): + def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @@ -728,12 +728,12 @@ def _notify(): self.obj_reset_changes() @base.remotable - def refresh(self, use_slave=False): + def refresh(self, use_subordinate=False): extra = [field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field)] current = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra, - use_slave=use_slave) + use_subordinate=use_subordinate) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) @@ -1040,7 +1040,7 @@ class InstanceList(base.ObjectListBase, base.NovaObject): @db.select_db_reader_mode def _get_by_filters_impl(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False, + marker=None, expected_attrs=None, use_subordinate=False, sort_keys=None, sort_dirs=None): if sort_keys or sort_dirs: db_inst_list = db.instance_get_all_by_filters_sort( @@ -1057,25 +1057,25 @@ def _get_by_filters_impl(cls, context, filters, @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False, + marker=None, expected_attrs=None, use_subordinate=False, sort_keys=None, sort_dirs=None): return cls._get_by_filters_impl( context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, expected_attrs=expected_attrs, - use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs) + use_subordinate=use_subordinate, sort_keys=sort_keys, sort_dirs=sort_dirs) @staticmethod @db.select_db_reader_mode def _db_instance_get_all_by_host(context, host, columns_to_join, - use_slave=False): + use_subordinate=False): return db.instance_get_all_by_host(context, host, columns_to_join=columns_to_join) @base.remotable_classmethod - def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): + def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False): db_inst_list = cls._db_instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @@ -1115,7 +1115,7 @@ def get_hung_in_rebooting(cls, context, reboot_window, @db.select_db_reader_mode def _db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join, - use_slave=False): + use_subordinate=False): return db.instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=columns_to_join) @@ -1124,7 +1124,7 @@ def _db_instance_get_active_by_window_joined( def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, - use_slave=False): + use_subordinate=False): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. begin = timeutils.parse_isotime(begin) @@ -1132,7 +1132,7 @@ def _get_active_by_window_joined(cls, context, begin, end=None, db_inst_list = cls._db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @@ -1140,7 +1140,7 @@ def _get_active_by_window_joined(cls, context, begin, end=None, def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, expected_attrs=None, - use_slave=False): + use_subordinate=False): """Get instances and joins active during a certain time window. :param:context: nova request context @@ -1150,7 +1150,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances - :param use_slave if True, ship this query off to a DB slave + :param use_subordinate if True, ship this query off to a DB subordinate :returns: InstanceList """ @@ -1161,7 +1161,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, return cls._get_active_by_window_joined(context, begin, end, project_id, host, expected_attrs, - use_slave=use_slave) + use_subordinate=use_subordinate) @base.remotable_classmethod def get_by_security_group_id(cls, context, security_group_id): diff --git a/nova-13.0.0/nova/objects/migration.py b/nova-13.0.0/nova/objects/migration.py index 9baec5e..62e6755 100644 --- a/nova-13.0.0/nova/objects/migration.py +++ b/nova-13.0.0/nova/objects/migration.py @@ -155,7 +155,7 @@ def instance(self, instance): class MigrationList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Migration <= 1.1 - # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute + # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute # Version 1.2: Migration version 1.2 # Version 1.3: Added a new function to get in progress migrations # for an instance. @@ -168,15 +168,15 @@ class MigrationList(base.ObjectListBase, base.NovaObject): @staticmethod @db.select_db_reader_mode def _db_migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=False): + context, confirm_window, dest_compute, use_subordinate=False): return db.migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute) @base.remotable_classmethod def get_unconfirmed_by_dest_compute(cls, context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): db_migrations = cls._db_migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=use_slave) + context, confirm_window, dest_compute, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) diff --git a/nova-13.0.0/nova/objects/service.py b/nova-13.0.0/nova/objects/service.py index 7104cd8..41b7761 100644 --- a/nova-13.0.0/nova/objects/service.py +++ b/nova-13.0.0/nova/objects/service.py @@ -82,7 +82,7 @@ class Service(base.NovaPersistentObject, base.NovaObject, # Version 1.1: Added compute_node nested object # Version 1.2: String attributes updated to support unicode # Version 1.3: ComputeNode version 1.5 - # Version 1.4: Added use_slave to get_by_compute_host + # Version 1.4: Added use_subordinate to get_by_compute_host # Version 1.5: ComputeNode version 1.6 # Version 1.6: ComputeNode version 1.7 # Version 1.7: ComputeNode version 1.8 @@ -234,13 +234,13 @@ def get_by_host_and_binary(cls, context, host, binary): @staticmethod @db.select_db_reader_mode - def _db_service_get_by_compute_host(context, host, use_slave=False): + def _db_service_get_by_compute_host(context, host, use_subordinate=False): return db.service_get_by_compute_host(context, host) @base.remotable_classmethod - def get_by_compute_host(cls, context, host, use_slave=False): + def get_by_compute_host(cls, context, host, use_subordinate=False): db_service = cls._db_service_get_by_compute_host(context, host, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_service) # NOTE(ndipanov): This is deprecated and should be removed on the next @@ -330,11 +330,11 @@ def clear_min_version_cache(cls): @staticmethod @db.select_db_reader_mode - def _db_service_get_minimum_version(context, binary, use_slave=False): + def _db_service_get_minimum_version(context, binary, use_subordinate=False): return db.service_get_minimum_version(context, binary) @base.remotable_classmethod - def get_minimum_version(cls, context, binary, use_slave=False): + def get_minimum_version(cls, context, binary, use_subordinate=False): if not binary.startswith('nova-'): LOG.warning(_LW('get_minimum_version called with likely-incorrect ' 'binary `%s\''), binary) @@ -346,7 +346,7 @@ def get_minimum_version(cls, context, binary, use_slave=False): if cached_version: return cached_version version = cls._db_service_get_minimum_version(context, binary, - use_slave=use_slave) + use_subordinate=use_subordinate) if version is None: return 0 # NOTE(danms): Since our return value is not controlled by object diff --git a/nova-13.0.0/nova/objects/virtual_interface.py b/nova-13.0.0/nova/objects/virtual_interface.py index a3fc936..22b1115 100644 --- a/nova-13.0.0/nova/objects/virtual_interface.py +++ b/nova-13.0.0/nova/objects/virtual_interface.py @@ -96,12 +96,12 @@ def get_all(cls, context): @staticmethod @db.select_db_reader_mode def _db_virtual_interface_get_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): return db.virtual_interface_get_by_instance(context, instance_uuid) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_vifs = cls._db_virtual_interface_get_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) diff --git a/nova-13.0.0/nova/tests/functional/api_sample_tests/test_volumes.py b/nova-13.0.0/nova/tests/functional/api_sample_tests/test_volumes.py index 8eba7b6..1f465c3 100644 --- a/nova-13.0.0/nova/tests/functional/api_sample_tests/test_volumes.py +++ b/nova-13.0.0/nova/tests/functional/api_sample_tests/test_volumes.py @@ -213,7 +213,7 @@ class VolumeAttachmentsSample(test_servers.ServersSampleBase): def _stub_db_bdms_get_all_by_instance(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': 'a26887c6-c47b-4654-abb5-dfadf7d3f803', diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_disk_config.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_disk_config.py index 8b49f26..5b78205 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_disk_config.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_disk_config.py @@ -65,7 +65,7 @@ def fake_instance_get(context, id_): self.stub_out('nova.db.instance_get', fake_instance_get) def fake_instance_get_by_uuid(context, uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): for instance in FAKE_INSTANCES: if uuid == instance['uuid']: return instance diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_instance_actions.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_instance_actions.py index 220de4c..681f9a5 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_instance_actions.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_instance_actions.py @@ -91,7 +91,7 @@ def test_list_actions_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -107,7 +107,7 @@ def test_get_action_restricted_by_project(self): def fake_instance_get_by_uuid(context, instance_id, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) @@ -143,7 +143,7 @@ def setUp(self): self.fake_actions = copy.deepcopy(fake_server_actions.FAKE_ACTIONS) self.fake_events = copy.deepcopy(fake_server_actions.FAKE_EVENTS) - def fake_instance_get_by_uuid(context, instance_id, use_slave=False): + def fake_instance_get_by_uuid(context, instance_id, use_subordinate=False): return fake_instance.fake_instance_obj(None, **{'name': 'fake', 'project_id': context.project_id}) diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_security_groups.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_security_groups.py index 215a93b..555abcf 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_security_groups.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_security_groups.py @@ -91,7 +91,7 @@ def security_group_rule_db(rule, id=None): def return_server(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'power_state': 0x01, @@ -102,7 +102,7 @@ def return_server(context, server_id, def return_server_by_uuid(context, server_uuid, columns_to_join=None, - use_slave=False): + use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'power_state': 0x01, @@ -428,7 +428,7 @@ def test_get_security_group_by_instance(self): expected = {'security_groups': groups} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) @@ -453,7 +453,7 @@ def test_get_security_group_empty_for_instance(self, mock_sec_group, expected = {'security_groups': []} def return_instance(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertEqual(server_id, FAKE_UUID1) return return_server_by_uuid(context, server_id) mock_db_get_ins.side_effect = return_instance diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py index d6a7817..9e00679 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_actions.py @@ -507,7 +507,7 @@ def test_rebuild_admin_pass_pass_disabled(self): def test_rebuild_server_not_found(self): def server_not_found(self, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=instance_id) self.stub_out('nova.db.instance_get_by_uuid', server_not_found) @@ -951,7 +951,7 @@ def _fake_id(x): delete_on_termination=False)] def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -1043,7 +1043,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_metadata.py index 114f659..d60c73e 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_metadata.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_metadata.py @@ -94,7 +94,7 @@ def return_server(context, server_id, columns_to_join=None): def return_server_by_uuid(context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', @@ -106,7 +106,7 @@ def return_server_by_uuid(context, server_uuid, def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) @@ -738,7 +738,7 @@ def _return_server_in_build(self, context, server_id, 'vm_state': vm_states.BUILDING}) def _return_server_in_build_by_uuid(self, context, server_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'id': 1, 'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64', diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_start_stop.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_start_stop.py index 85f5608..79e8a45 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_start_stop.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_server_start_stop.py @@ -31,7 +31,7 @@ def fake_instance_get(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): result = fakes.stub_instance(id=1, uuid=instance_id) result['created_at'] = None result['deleted_at'] = None diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_serversV21.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_serversV21.py index 362f30c..62be620 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_serversV21.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_serversV21.py @@ -115,7 +115,7 @@ def fake_start_stop_invalid_state(self, context, instance): def fake_instance_get_by_uuid_not_found(context, uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): raise exception.InstanceNotFound(instance_id=uuid) diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_shelve.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_shelve.py index c25c64f..10d288e 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_shelve.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_shelve.py @@ -28,7 +28,7 @@ def fake_instance_get_by_uuid(context, instance_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): return fake_instance.fake_db_instance( **{'name': 'fake', 'project_id': '%s_unequal' % context.project_id}) diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py index e328b4e..8473f02 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py @@ -157,11 +157,11 @@ def _get_tenant_usages(self, detailed=''): def fake_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, expected_attrs=None, - use_slave=False): + use_subordinate=False): self.assertEqual(['flavor'], expected_attrs) return orig_get_active_by_window_joined(context, begin, end, project_id, host, - expected_attrs, use_slave) + expected_attrs, use_subordinate) with mock.patch.object(objects.InstanceList, 'get_active_by_window_joined', diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_volumes.py b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_volumes.py index 42745aa..b994e3d 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_volumes.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/compute/test_volumes.py @@ -104,7 +104,7 @@ def fake_compute_volume_snapshot_create(self, context, volume_id, pass -def fake_bdms_get_all_by_instance(context, instance_uuid, use_slave=False): +def fake_bdms_get_all_by_instance(context, instance_uuid, use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'instance_uuid': instance_uuid, diff --git a/nova-13.0.0/nova/tests/unit/api/openstack/fakes.py b/nova-13.0.0/nova/tests/unit/api/openstack/fakes.py index 3b4e339..edd5d63 100644 --- a/nova-13.0.0/nova/tests/unit/api/openstack/fakes.py +++ b/nova-13.0.0/nova/tests/unit/api/openstack/fakes.py @@ -354,7 +354,7 @@ def get_fake_uuid(token=0): def fake_instance_get(**kwargs): - def _return_server(context, uuid, columns_to_join=None, use_slave=False): + def _return_server(context, uuid, columns_to_join=None, use_subordinate=False): return stub_instance(1, **kwargs) return _return_server @@ -384,8 +384,8 @@ def _return_servers(context, *args, **kwargs): if 'columns_to_join' in kwargs: kwargs.pop('columns_to_join') - if 'use_slave' in kwargs: - kwargs.pop('use_slave') + if 'use_subordinate' in kwargs: + kwargs.pop('use_subordinate') if 'sort_keys' in kwargs: kwargs.pop('sort_keys') @@ -690,7 +690,7 @@ def stub_snapshot_get_all(self, context): def stub_bdm_get_all_by_instance_uuids(context, instance_uuids, - use_slave=False): + use_subordinate=False): i = 1 result = [] for instance_uuid in instance_uuids: diff --git a/nova-13.0.0/nova/tests/unit/compute/test_compute.py b/nova-13.0.0/nova/tests/unit/compute/test_compute.py index 7d6d35d..b00db80 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_compute.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_compute.py @@ -169,7 +169,7 @@ def setUp(self): self.compute.driver, NODENAME) self.compute._resource_tracker_dict[NODENAME] = fake_rt - def fake_get_compute_nodes_in_db(context, use_slave=False): + def fake_get_compute_nodes_in_db(context, use_subordinate=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuids.fake_compute_node, 'vcpus_used': 0, @@ -653,7 +653,7 @@ def test_poll_bandwidth_usage_not_implemented(self): time.time().AndReturn(20) time.time().AndReturn(21) objects.InstanceList.get_by_host(ctxt, 'fake-mini', - use_slave=True).AndReturn([]) + use_subordinate=True).AndReturn([]) self.compute.driver.get_all_bw_counters([]).AndRaise( NotImplementedError) self.mox.ReplayAll() @@ -682,10 +682,10 @@ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): got_host_bdms = self.compute._get_host_volume_bdms('fake-context') mock_get_by_host.assert_called_once_with('fake-context', self.compute.host, - use_slave=False) + use_subordinate=False) mock_get_by_inst.assert_called_once_with('fake-context', uuids.volume_instance, - use_slave=False) + use_subordinate=False) self.assertEqual(expected_host_bdms, got_host_bdms) def test_poll_volume_usage_disabled(self): @@ -704,7 +704,7 @@ def test_poll_volume_usage_returns_no_vols(self): self.mox.StubOutWithMock(self.compute, '_get_host_volume_bdms') self.mox.StubOutWithMock(self.compute.driver, 'get_all_volume_usage') # Following methods are called. - self.compute._get_host_volume_bdms(ctxt, use_slave=True).AndReturn([]) + self.compute._get_host_volume_bdms(ctxt, use_subordinate=True).AndReturn([]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -719,7 +719,7 @@ def test_poll_volume_usage_with_data(self): lambda x, y: [3, 4]) # All the mocks are called self.compute._get_host_volume_bdms(ctxt, - use_slave=True).AndReturn([1, 2]) + use_subordinate=True).AndReturn([1, 2]) self.compute._update_volume_usage_cache(ctxt, [3, 4]) self.mox.ReplayAll() self.flags(volume_usage_poll_interval=10) @@ -758,7 +758,7 @@ def test_detach_volume_usage(self): self.compute.driver.block_stats(instance, 'vdb').\ AndReturn([1, 30, 1, 20, None]) self.compute._get_host_volume_bdms(self.context, - use_slave=True).AndReturn( + use_subordinate=True).AndReturn( host_volume_bdms) self.compute.driver.get_all_volume_usage( self.context, host_volume_bdms).AndReturn( @@ -6158,9 +6158,9 @@ def test_cleanup_running_deleted_instances_reap(self): self.compute._shutdown_instance(ctxt, inst1, bdms, notify=False).\ AndRaise(test.TestingException) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst1.uuid, use_slave=True).AndReturn(bdms) + inst1.uuid, use_subordinate=True).AndReturn(bdms) objects.BlockDeviceMappingList.get_by_instance_uuid(ctxt, - inst2.uuid, use_slave=True).AndReturn(bdms) + inst2.uuid, use_subordinate=True).AndReturn(bdms) self.compute._shutdown_instance(ctxt, inst2, bdms, notify=False).\ AndReturn(None) @@ -6267,13 +6267,13 @@ def _heal_instance_info_cache(self, 'get_nw_info': 0, 'expected_instance': None} def fake_instance_get_all_by_host(context, host, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 @@ -6283,7 +6283,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance_map[instance_uuid] # NOTE(comstud): Override the stub in setUp() - def fake_get_instance_nw_info(context, instance, use_slave=False): + def fake_get_instance_nw_info(context, instance, use_subordinate=False): # Note that this exception gets caught in compute/manager # and is ignored. However, the below increment of # 'get_nw_info' won't happen, and you'll get an assert @@ -6381,7 +6381,7 @@ def test_poll_rescued_instances(self, unrescue, get): def fake_instance_get_all_by_filters(context, filters, expected_attrs=None, - use_slave=False): + use_subordinate=False): self.assertEqual(["system_metadata"], expected_attrs) return instances @@ -6431,7 +6431,7 @@ def test_poll_rebooting_instances(self, get): task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING]} get.assert_called_once_with(ctxt, filters, - expected_attrs=[], use_slave=True) + expected_attrs=[], use_subordinate=True) def test_poll_unconfirmed_resizes(self): instances = [ @@ -6483,7 +6483,7 @@ def test_poll_unconfirmed_resizes(self): migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for non-existing instance @@ -6495,7 +6495,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance def fake_migration_get_unconfirmed_by_dest_compute(context, - resize_confirm_window, dest_compute, use_slave=False): + resize_confirm_window, dest_compute, use_subordinate=False): self.assertEqual(dest_compute, CONF.host) return migrations @@ -6996,7 +6996,7 @@ def test_reclaim_queued_deletes_continue_on_error(self): objects.InstanceList.get_by_filters( ctxt, mox.IgnoreArg(), expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True + use_subordinate=True ).AndReturn(instances) # The first instance delete fails. @@ -7037,12 +7037,12 @@ def test_sync_power_states(self): hardware.InstanceInfo(state=power_state.RUNNING)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.RUNNING, - use_slave=True) + use_subordinate=True) self.compute.driver.get_info(mox.IgnoreArg()).AndReturn( hardware.InstanceInfo(state=power_state.SHUTDOWN)) self.compute._sync_instance_power_state(ctxt, mox.IgnoreArg(), power_state.SHUTDOWN, - use_slave=True) + use_subordinate=True) self.mox.ReplayAll() self.compute._sync_power_states(ctxt) @@ -10998,7 +10998,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", - aggregate=jsonutils.to_primitive(self.aggr), slave_info=None) + aggregate=jsonutils.to_primitive(self.aggr), subordinate_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): @@ -11012,36 +11012,36 @@ def fake_driver_remove_from_aggregate(context, aggregate, host, self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="host", - slave_info=None) + subordinate_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) - def test_add_aggregate_host_passes_slave_info_to_driver(self): + def test_add_aggregate_host_passes_subordinate_info_to_driver(self): def driver_add_to_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", - slave_info="SLAVE_INFO", + subordinate_info="SLAVE_INFO", aggregate=jsonutils.to_primitive(self.aggr)) - def test_remove_from_aggregate_passes_slave_info_to_driver(self): + def test_remove_from_aggregate_passes_subordinate_info_to_driver(self): def driver_remove_from_aggregate(context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate['id'], self.aggr['id']) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stubs.Set(self.compute.driver, "remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, aggregate=jsonutils.to_primitive(self.aggr), host="the_host", - slave_info="SLAVE_INFO") + subordinate_info="SLAVE_INFO") class ComputePolicyTestCase(BaseTestCase): diff --git a/nova-13.0.0/nova/tests/unit/compute/test_compute_api.py b/nova-13.0.0/nova/tests/unit/compute/test_compute_api.py index 328ea0b..b05725e 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_compute_api.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_compute_api.py @@ -2247,7 +2247,7 @@ def _test_snapshot_volume_backed(self, quiesce_required, quiesce_fails, quiesced = [False, False] quiesce_expected = not quiesce_fails and vm_state == vm_states.ACTIVE - def fake_get_all_by_instance(context, instance, use_slave=False): + def fake_get_all_by_instance(context, instance, use_subordinate=False): return copy.deepcopy(instance_bdms) def fake_image_create(context, image_meta, data=None): diff --git a/nova-13.0.0/nova/tests/unit/compute/test_compute_mgr.py b/nova-13.0.0/nova/tests/unit/compute/test_compute_mgr.py index eac8d8b..2aa7c90 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_compute_mgr.py @@ -204,7 +204,7 @@ def _get_rt_side_effect(*args, **kwargs): get_avail_nodes.return_value = avail_nodes get_rt.side_effect = _get_rt_side_effect self.compute.update_available_resource(ctxt) - get_db_nodes.assert_called_once_with(ctxt, use_slave=True) + get_db_nodes.assert_called_once_with(ctxt, use_subordinate=True) self.assertEqual(sorted([mock.call(node) for node in avail_nodes]), sorted(get_rt.call_args_list)) for rt in rts: @@ -1375,7 +1375,7 @@ def test_sync_power_states(self, mock_get): self.compute._sync_power_states(mock.sentinel.context) mock_get.assert_called_with(mock.sentinel.context, self.compute.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) mock_spawn.assert_called_once_with(mock.ANY, instance) def _get_sync_instance(self, power_state, vm_state, task_state=None, @@ -1394,7 +1394,7 @@ def _get_sync_instance(self, power_state, vm_state, task_state=None, def test_sync_instance_power_state_match(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) @@ -1402,7 +1402,7 @@ def test_sync_instance_power_state_match(self): def test_sync_instance_power_state_running_stopped(self): instance = self._get_sync_instance(power_state.RUNNING, vm_states.ACTIVE) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.ReplayAll() self.compute._sync_instance_power_state(self.context, instance, @@ -1413,7 +1413,7 @@ def _test_sync_to_stop(self, power_state, vm_state, driver_power_state, stop=True, force=False, shutdown_terminate=False): instance = self._get_sync_instance( power_state, vm_state, shutdown_terminate=shutdown_terminate) - instance.refresh(use_slave=False) + instance.refresh(use_subordinate=False) instance.save() self.mox.StubOutWithMock(self.compute.compute_api, 'stop') self.mox.StubOutWithMock(self.compute.compute_api, 'delete') @@ -1485,7 +1485,7 @@ def test_query_driver_power_state_and_sync_not_found_driver( mock_sync_power_state.assert_called_once_with(self.context, db_instance, power_state.NOSTATE, - use_slave=True) + use_subordinate=True) def test_run_pending_deletes(self): self.flags(instance_delete_interval=10) @@ -1515,7 +1515,7 @@ def save(self): 'cleaned': False}, expected_attrs=['info_cache', 'security_groups', 'system_metadata'], - use_slave=True).AndReturn([a, b, c]) + use_subordinate=True).AndReturn([a, b, c]) self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files') self.compute.driver.delete_instance_files( @@ -2866,7 +2866,7 @@ def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac, self.compute._poll_bandwidth_usage(self.context) get_by_uuid_mac.assert_called_once_with(self.context, uuids.instance, 'fake-mac', - start_period=0, use_slave=True) + start_period=0, use_subordinate=True) # NOTE(sdague): bw_usage_update happens at some time in # the future, so what last_refreshed is irrelevant. bw_usage_update.assert_called_once_with(self.context, @@ -2939,7 +2939,7 @@ def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host, self.compute._sync_scheduler_instance_info(self.context) mock_get_by_host.assert_called_once_with( fake_elevated, self.compute.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) mock_sync.assert_called_once_with(fake_elevated, self.compute.host, exp_uuids) diff --git a/nova-13.0.0/nova/tests/unit/compute/test_compute_xen.py b/nova-13.0.0/nova/tests/unit/compute/test_compute_xen.py index 36d57bb..fed2134 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_compute_xen.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_compute_xen.py @@ -55,7 +55,7 @@ def test_sync_power_states_instance_not_found(self): objects.InstanceList.get_by_host(ctxt, self.compute.host, expected_attrs=[], - use_slave=True).AndReturn(instance_list) + use_subordinate=True).AndReturn(instance_list) self.compute.driver.get_num_instances().AndReturn(1) vm_utils.lookup(self.compute.driver._session, instance['name'], False).AndReturn(None) diff --git a/nova-13.0.0/nova/tests/unit/compute/test_multiple_nodes.py b/nova-13.0.0/nova/tests/unit/compute/test_multiple_nodes.py index 4e9aa1a..c5cc5f4 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_multiple_nodes.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_multiple_nodes.py @@ -71,7 +71,7 @@ def setUp(self): self.conductor = self.start_service('conductor', manager=CONF.conductor.manager) - def fake_get_compute_nodes_in_db(context, use_slave=False): + def fake_get_compute_nodes_in_db(context, use_subordinate=False): fake_compute_nodes = [{'local_gb': 259, 'uuid': uuidsentinel.fake_compute, 'vcpus_used': 0, @@ -142,7 +142,7 @@ def test_compute_manager_removes_deleted_node(self): context=ctx, hypervisor_hostname='B', id=3), ] - def fake_get_compute_nodes_in_db(context, use_slave=False): + def fake_get_compute_nodes_in_db(context, use_subordinate=False): return fake_compute_nodes def fake_compute_node_delete(context, compute_node_id): diff --git a/nova-13.0.0/nova/tests/unit/compute/test_rpcapi.py b/nova-13.0.0/nova/tests/unit/compute/test_rpcapi.py index 58ec05c..2e1de84 100644 --- a/nova-13.0.0/nova/tests/unit/compute/test_rpcapi.py +++ b/nova-13.0.0/nova/tests/unit/compute/test_rpcapi.py @@ -152,7 +152,7 @@ def _test_compute_api(self, method, rpc_method, def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', @@ -402,7 +402,7 @@ def test_refresh_instance_security_rules(self): def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', diff --git a/nova-13.0.0/nova/tests/unit/db/test_db_api.py b/nova-13.0.0/nova/tests/unit/db/test_db_api.py index 933905d..2e2ccb0 100644 --- a/nova-13.0.0/nova/tests/unit/db/test_db_api.py +++ b/nova-13.0.0/nova/tests/unit/db/test_db_api.py @@ -189,7 +189,7 @@ def test_require_deadlock_retry_wraps_functions_properly(self): def test_select_db_reader_mode_select_sync(self, mock_clone, mock_using): @db.select_db_reader_mode - def func(self, context, value, use_slave=False): + def func(self, context, value, use_subordinate=False): pass mock_clone.return_value = enginefacade._TransactionContextManager( @@ -206,21 +206,21 @@ def func(self, context, value, use_slave=False): def test_select_db_reader_mode_select_async(self, mock_clone, mock_using): @db.select_db_reader_mode - def func(self, context, value, use_slave=False): + def func(self, context, value, use_subordinate=False): pass mock_clone.return_value = enginefacade._TransactionContextManager( mode=enginefacade._ASYNC_READER) ctxt = context.get_admin_context() value = 'some_value' - func(self, ctxt, value, use_slave=True) + func(self, ctxt, value, use_subordinate=True) mock_clone.assert_called_once_with(mode=enginefacade._ASYNC_READER) mock_using.assert_called_once_with(ctxt) @mock.patch.object(enginefacade._TransactionContextManager, 'using') @mock.patch.object(enginefacade._TransactionContextManager, '_clone') - def test_select_db_reader_mode_no_use_slave_select_sync(self, mock_clone, + def test_select_db_reader_mode_no_use_subordinate_select_sync(self, mock_clone, mock_using): @db.select_db_reader_mode @@ -1069,7 +1069,7 @@ def test_get_engine(self, mock_create_facade): sqlalchemy_api.get_engine() mock_create_facade.assert_called_once_with() - mock_facade.get_engine.assert_called_once_with(use_slave=False) + mock_facade.get_engine.assert_called_once_with(use_subordinate=False) def test_get_db_conf_with_connection(self): mock_conf_group = mock.MagicMock() diff --git a/nova-13.0.0/nova/tests/unit/network/test_api.py b/nova-13.0.0/nova/tests/unit/network/test_api.py index c6a30be..fad8018 100644 --- a/nova-13.0.0/nova/tests/unit/network/test_api.py +++ b/nova-13.0.0/nova/tests/unit/network/test_api.py @@ -195,7 +195,7 @@ def fake_associate(*args, **kwargs): def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, - use_slave=None): + use_subordinate=None): if instance_uuid == orig_instance_uuid: self.assertIn('extra.flavor', columns_to_join) return fake_instance.fake_db_instance(uuid=instance_uuid) diff --git a/nova-13.0.0/nova/tests/unit/network/test_linux_net.py b/nova-13.0.0/nova/tests/unit/network/test_linux_net.py index 8b35656..dbda3a0 100644 --- a/nova-13.0.0/nova/tests/unit/network/test_linux_net.py +++ b/nova-13.0.0/nova/tests/unit/network/test_linux_net.py @@ -389,7 +389,7 @@ def setUp(self): self.context = context.RequestContext('testuser', 'testproject', is_admin=True) - def get_vifs(_context, instance_uuid, use_slave): + def get_vifs(_context, instance_uuid, use_subordinate): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] diff --git a/nova-13.0.0/nova/tests/unit/objects/test_instance.py b/nova-13.0.0/nova/tests/unit/objects/test_instance.py index a0ffd9d..1b8e3a9 100644 --- a/nova-13.0.0/nova/tests/unit/objects/test_instance.py +++ b/nova-13.0.0/nova/tests/unit/objects/test_instance.py @@ -284,7 +284,7 @@ def test_refresh_does_not_recurse(self): self.mox.StubOutWithMock(objects.Instance, 'get_by_uuid') objects.Instance.get_by_uuid(self.context, uuid=inst.uuid, expected_attrs=['metadata'], - use_slave=False + use_subordinate=False ).AndReturn(inst_copy) self.mox.ReplayAll() self.assertRaises(exception.OrphanedObjectError, inst.refresh) @@ -419,7 +419,7 @@ def test_save_rename_sends_notification(self): self.mox.ReplayAll() inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'], - use_slave=False) + use_subordinate=False) self.assertEqual('hello', inst.display_name) inst.display_name = 'goodbye' inst.save() @@ -1390,7 +1390,7 @@ def test_get_all_by_filters(self): self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -1407,7 +1407,7 @@ def test_get_all_by_filters_sorted(self): self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, expected_attrs=['metadata'], - use_slave=False, sort_keys=['uuid'], sort_dirs=['asc']) + use_subordinate=False, sort_keys=['uuid'], sort_dirs=['asc']) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -1422,7 +1422,7 @@ def test_get_all_by_filters_calls_non_sort(self, # Single sort key/direction is set, call non-sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir', - limit=100, marker='uuid', use_slave=True) + limit=100, marker='uuid', use_subordinate=True) mock_get_by_filters.assert_called_once_with( self.context, {'foo': 'bar'}, 'key', 'dir', limit=100, marker='uuid', columns_to_join=None) @@ -1437,7 +1437,7 @@ def test_get_all_by_filters_calls_sort(self, # Multiple sort keys/directions are set, call sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, limit=100, marker='uuid', - use_slave=True, sort_keys=['key1', 'key2'], + use_subordinate=True, sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2']) mock_get_by_filters_sort.assert_called_once_with( self.context, {'foo': 'bar'}, limit=100, @@ -1459,7 +1459,7 @@ def test_get_all_by_filters_works_for_cleaned(self): self.mox.ReplayAll() inst_list = objects.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) @@ -1578,7 +1578,7 @@ def test_with_fault(self): self.mox.ReplayAll() instances = objects.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault'], - use_slave=False) + use_subordinate=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault)) diff --git a/nova-13.0.0/nova/tests/unit/objects/test_migration.py b/nova-13.0.0/nova/tests/unit/objects/test_migration.py index 74fa70e..014ddc2 100644 --- a/nova-13.0.0/nova/tests/unit/objects/test_migration.py +++ b/nova-13.0.0/nova/tests/unit/objects/test_migration.py @@ -184,7 +184,7 @@ def test_get_unconfirmed_by_dest_compute(self): self.mox.ReplayAll() migrations = ( migration.MigrationList.get_unconfirmed_by_dest_compute( - ctxt, 'window', 'foo', use_slave=False)) + ctxt, 'window', 'foo', use_subordinate=False)) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) diff --git a/nova-13.0.0/nova/tests/unit/virt/libvirt/test_driver.py b/nova-13.0.0/nova/tests/unit/virt/libvirt/test_driver.py index 0dc4591..db85605 100644 --- a/nova-13.0.0/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova-13.0.0/nova/tests/unit/virt/libvirt/test_driver.py @@ -10831,7 +10831,7 @@ def get_info(instance_name, xml, **kwargs): self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} - mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) + mock_get.assert_called_once_with(mock.ANY, filters, use_subordinate=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @@ -10905,7 +10905,7 @@ def side_effect(name, dom, block_device_info): mock_list.assert_called_once_with() self.assertEqual(2, get_disk_info.call_count) filters = {'uuid': instance_uuids} - mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) + mock_get.assert_called_once_with(mock.ANY, filters, use_subordinate=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains", diff --git a/nova-13.0.0/nova/tests/unit/virt/xenapi/client/test_session.py b/nova-13.0.0/nova/tests/unit/virt/xenapi/client/test_session.py index 16c352f..ac21cc5 100644 --- a/nova-13.0.0/nova/tests/unit/virt/xenapi/client/test_session.py +++ b/nova-13.0.0/nova/tests/unit/virt/xenapi/client/test_session.py @@ -74,10 +74,10 @@ def test_session_raises_exception(self, mock_ref, mock_uuid, # First login fails, second login in except block succeeds, # third login for the pool succeeds sess.login_with_password.side_effect = [ - XenAPI.Failure(['HOST_IS_SLAVE', 'master']), None, None] + XenAPI.Failure(['HOST_IS_SLAVE', 'main']), None, None] mock_version.return_value = ('version', 'brand') - session.XenAPISession('http://slave', 'username', 'password') + session.XenAPISession('http://subordinate', 'username', 'password') self.assertEqual(3, sess.login_with_password.call_count) self.assertEqual(3, mock_timeout.call_count) diff --git a/nova-13.0.0/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova-13.0.0/nova/tests/unit/virt/xenapi/test_xenapi.py index 6e578e1..ac1b670 100644 --- a/nova-13.0.0/nova/tests/unit/virt/xenapi/test_xenapi.py +++ b/nova-13.0.0/nova/tests/unit/virt/xenapi/test_xenapi.py @@ -1500,7 +1500,7 @@ def fake_aggregate_get_by_host(self, *args, **kwargs): self.stub_out("nova.db.aggregate_get_by_host", fake_aggregate_get_by_host) - self.stubs.Set(self.conn._session, "is_slave", True) + self.stubs.Set(self.conn._session, "is_subordinate", True) self.assertRaises(test.TestingException, self.conn._session._get_host_uuid) @@ -1514,7 +1514,7 @@ def fake_aggregate_get(context, host, key): self.stub_out('nova.db.aggregate_get_by_host', fake_aggregate_get) - self.stubs.Set(self.conn._session, "is_slave", True) + self.stubs.Set(self.conn._session, "is_subordinate", True) self.assertEqual('this_should_be_metadata', self.conn._session._get_host_uuid()) @@ -2970,7 +2970,7 @@ def setUp(self): self.aggr = objects.Aggregate(context=self.context, id=1, **values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', + 'main_compute': 'host', 'availability_zone': 'fake_zone', pool_states.KEY: pool_states.ACTIVE, 'host': xenapi_fake.get_record('host', @@ -2980,18 +2980,18 @@ def test_pool_add_to_aggregate_called_by_driver(self): calls = [] - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): + def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_add_to_aggregate) self.stubs.Set(self.conn._pool, "add_to_aggregate", pool_add_to_aggregate) self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_add_to_aggregate, calls) @@ -3000,18 +3000,18 @@ def test_pool_remove_from_aggregate_called_by_driver(self): calls = [] def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): + subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) calls.append(pool_remove_from_aggregate) self.stubs.Set(self.conn._pool, "remove_from_aggregate", pool_remove_from_aggregate) self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertIn(pool_remove_from_aggregate, calls) @@ -3027,11 +3027,11 @@ def fake_init_pool(id, name): self.assertThat(self.fake_metadata, matchers.DictMatches(result['metadetails'])) - def test_join_slave(self): - # Ensure join_slave gets called when the request gets to master. - def fake_join_slave(id, compute_uuid, host, url, user, password): - fake_join_slave.called = True - self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave) + def test_join_subordinate(self): + # Ensure join_subordinate gets called when the request gets to main. + def fake_join_subordinate(id, compute_uuid, host, url, user, password): + fake_join_subordinate.called = True + self.stubs.Set(self.conn._pool, "_join_subordinate", fake_join_subordinate) aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -3041,7 +3041,7 @@ def fake_join_slave(id, compute_uuid, host, url, user, password): user='fake_user', passwd='fake_pass', xenhost_uuid='fake_uuid')) - self.assertTrue(fake_join_slave.called) + self.assertTrue(fake_join_subordinate.called) def test_add_to_aggregate_first_host(self): def fake_pool_set_name_label(self, session, pool_ref, name): @@ -3081,19 +3081,19 @@ def test_remove_from_empty_aggregate(self): self.conn._pool.remove_from_aggregate, self.context, result, "test_host") - def test_remove_slave(self): - # Ensure eject slave gets called. - def fake_eject_slave(id, compute_uuid, host_uuid): - fake_eject_slave.called = True - self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave) + def test_remove_subordinate(self): + # Ensure eject subordinate gets called. + def fake_eject_subordinate(id, compute_uuid, host_uuid): + fake_eject_subordinate.called = True + self.stubs.Set(self.conn._pool, "_eject_subordinate", fake_eject_subordinate) self.fake_metadata['host2'] = 'fake_host2_uuid' aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(fake_eject_slave.called) + self.assertTrue(fake_eject_subordinate.called) - def test_remove_master_solo(self): + def test_remove_main_solo(self): # Ensure metadata are cleared after removal. def fake_clear_pool(id): fake_clear_pool.called = True @@ -3108,8 +3108,8 @@ def fake_clear_pool(id): pool_states.KEY: pool_states.ACTIVE}, matchers.DictMatches(result['metadetails'])) - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. + def test_remote_main_non_empty_pool(self): + # Ensure AggregateError is raised if removing the main. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -3219,7 +3219,7 @@ def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore): self.compute.add_aggregate_host, self.context, host="fake_host", aggregate=self.aggr, - slave_info=None) + subordinate_info=None) self.assertEqual(self.aggr.metadata[pool_states.KEY], pool_states.ERROR) self.assertEqual(self.aggr.hosts, ['fake_host']) @@ -3230,16 +3230,16 @@ def __init__(self): self._mock_calls = [] def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): + host_param, host, subordinate_info): self._mock_calls.append(( self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) + host_param, host, subordinate_info)) def remove_aggregate_host(self, ctxt, aggregate_id, host_param, - host, slave_info): + host, subordinate_info): self._mock_calls.append(( self.remove_aggregate_host, ctxt, aggregate_id, - host_param, host, slave_info)) + host_param, host, subordinate_info)) class StubDependencies(object): @@ -3254,10 +3254,10 @@ def _is_hv_pool(self, *_ignore): def _get_metadata(self, *_ignore): return { pool_states.KEY: {}, - 'master_compute': 'master' + 'main_compute': 'main' } - def _create_slave_info(self, *ignore): + def _create_subordinate_info(self, *ignore): return "SLAVE_INFO" @@ -3271,33 +3271,33 @@ class HypervisorPoolTestCase(test.NoDBTestCase): 'id': 98, 'hosts': [], 'metadata': { - 'master_compute': 'master', + 'main_compute': 'main', pool_states.POOL_FLAG: '', pool_states.KEY: '' } } fake_aggregate = objects.Aggregate(**fake_aggregate) - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_add_subordinate_to_pool(self): + subordinate = ResourcePoolWithStubs() - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, + (subordinate.compute_rpcapi.add_aggregate_host, "CONTEXT", jsonutils.to_primitive(self.fake_aggregate), - "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_remove_subordinate_from_pool(self): + subordinate = ResourcePoolWithStubs() - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", 98, "slave", "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + (subordinate.compute_rpcapi.remove_aggregate_host, + "CONTEXT", 98, "subordinate", "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) class SwapXapiHostTestCase(test.NoDBTestCase): diff --git a/nova-13.0.0/nova/virt/libvirt/driver.py b/nova-13.0.0/nova/virt/libvirt/driver.py index 58dd46d..7874650 100644 --- a/nova-13.0.0/nova/virt/libvirt/driver.py +++ b/nova-13.0.0/nova/virt/libvirt/driver.py @@ -7011,7 +7011,7 @@ def _get_disk_over_committed_size_total(self): # in _update_available_resource method for calculating usages based # on instance utilization. local_instance_list = objects.InstanceList.get_by_filters( - ctx, filters, use_slave=True) + ctx, filters, use_subordinate=True) # Convert instance list to dictionary with instace uuid as key. local_instances = {inst.uuid: inst for inst in local_instance_list} diff --git a/nova-13.0.0/nova/virt/xenapi/client/session.py b/nova-13.0.0/nova/virt/xenapi/client/session.py index 8f277ff..f320a5d 100644 --- a/nova-13.0.0/nova/virt/xenapi/client/session.py +++ b/nova-13.0.0/nova/virt/xenapi/client/session.py @@ -92,7 +92,7 @@ def __init__(self, url, user, pw): import XenAPI self.XenAPI = XenAPI self._sessions = queue.Queue() - self.is_slave = False + self.is_subordinate = False exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " "(is the Dom0 disk full?)")) url = self._create_first_session(url, user, pw, exception) @@ -125,13 +125,13 @@ def _create_first_session(self, url, user, pw, exception): try: session = self._create_session_and_login(url, user, pw, exception) except self.XenAPI.Failure as e: - # if user and pw of the master are different, we're doomed! + # if user and pw of the main are different, we're doomed! if e.details[0] == 'HOST_IS_SLAVE': - master = e.details[1] - url = pool.swap_xapi_host(url, master) + main = e.details[1] + url = pool.swap_xapi_host(url, main) session = self._create_session_and_login(url, user, pw, exception) - self.is_slave = True + self.is_subordinate = True else: raise self._sessions.put(session) @@ -143,7 +143,7 @@ def _populate_session_pool(self, url, user, pw, exception): self._sessions.put(session) def _get_host_uuid(self): - if self.is_slave: + if self.is_subordinate: aggr = objects.AggregateList.get_by_host( context.get_admin_context(), CONF.host, key=pool_states.POOL_FLAG)[0] diff --git a/nova-13.0.0/nova/virt/xenapi/fake.py b/nova-13.0.0/nova/virt/xenapi/fake.py index 3b04d6a..c51160f 100644 --- a/nova-13.0.0/nova/virt/xenapi/fake.py +++ b/nova-13.0.0/nova/virt/xenapi/fake.py @@ -106,7 +106,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr'): # Create a pool if we don't have one already if len(_db_content['pool']) == 0: pool_ref = _create_pool('') - _db_content['pool'][pool_ref]['master'] = host_ref + _db_content['pool'][pool_ref]['main'] = host_ref _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref @@ -871,7 +871,7 @@ def __getattr__(self, name): return self._session elif name == 'xenapi': return _Dispatcher(self.xenapi_request, None) - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith('login') or name.startswith('subordinate_local'): return lambda *params: self._login(name, params) elif name.startswith('Async'): return lambda *params: self._async(name, params) diff --git a/nova-13.0.0/nova/virt/xenapi/pool.py b/nova-13.0.0/nova/virt/xenapi/pool.py index de09337..e2cb16c 100644 --- a/nova-13.0.0/nova/virt/xenapi/pool.py +++ b/nova-13.0.0/nova/virt/xenapi/pool.py @@ -66,7 +66,7 @@ def undo_aggregate_operation(self, context, op, aggregate, 'state during operation on %(host)s'), {'aggregate_id': aggregate.id, 'host': host}) - def add_to_aggregate(self, context, aggregate, host, slave_info=None): + def add_to_aggregate(self, context, aggregate, host, subordinate_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate.metadata): return @@ -83,38 +83,38 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None): if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate.hosts) == 1: - # this is the first host of the pool -> make it master + # this is the first host of the pool -> make it main self._init_pool(aggregate.id, aggregate.name) - # save metadata so that we can find the master again - metadata = {'master_compute': host, + # save metadata so that we can find the main again + metadata = {'main_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> do a pool-join - # To this aim, nova compute on the slave has to go down. + main_compute = aggregate.metadata['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> do a pool-join + # To this aim, nova compute on the subordinate has to go down. # NOTE: it is assumed that ONLY nova compute is running now - self._join_slave(aggregate.id, host, - slave_info.get('compute_uuid'), - slave_info.get('url'), slave_info.get('user'), - slave_info.get('passwd')) - metadata = {host: slave_info.get('xenhost_uuid'), } + self._join_subordinate(aggregate.id, host, + subordinate_info.get('compute_uuid'), + subordinate_info.get('url'), subordinate_info.get('user'), + subordinate_info.get('passwd')) + metadata = {host: subordinate_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) - elif master_compute and master_compute != host: - # send rpc cast to master, asking to add the following + elif main_compute and main_compute != host: + # send rpc cast to main, asking to add the following # host with specified credentials. - slave_info = self._create_slave_info() + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.add_aggregate_host( - context, aggregate, host, master_compute, slave_info) + context, aggregate, host, main_compute, subordinate_info) - def remove_from_aggregate(self, context, aggregate, host, slave_info=None): + def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None): """Remove a compute host from an aggregate.""" - slave_info = slave_info or dict() + subordinate_info = subordinate_info or dict() if not pool_states.is_hv_pool(aggregate.metadata): return @@ -126,19 +126,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): aggregate_id=aggregate.id, reason=invalid[aggregate.metadata[pool_states.KEY]]) - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> instruct it to eject a host from the pool + main_compute = aggregate.metadata['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> instruct it to eject a host from the pool host_uuid = aggregate.metadata[host] - self._eject_slave(aggregate.id, - slave_info.get('compute_uuid'), host_uuid) + self._eject_subordinate(aggregate.id, + subordinate_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) - elif master_compute == host: - # Remove master from its own pool -> destroy pool only if the - # master is on its own, otherwise raise fault. Destroying a - # pool made only by master is fictional + elif main_compute == host: + # Remove main from its own pool -> destroy pool only if the + # main is on its own, otherwise raise fault. Destroying a + # pool made only by main is fictional if len(aggregate.hosts) > 1: - # NOTE: this could be avoided by doing a master + # NOTE: this could be avoided by doing a main # re-election, but this is simpler for now. raise exception.InvalidAggregateActionDelete( aggregate_id=aggregate.id, @@ -146,32 +146,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): 'from the pool; pool not empty') % host) self._clear_pool(aggregate.id) - aggregate.update_metadata({'master_compute': None, host: None}) - elif master_compute and master_compute != host: - # A master exists -> forward pool-eject request to master - slave_info = self._create_slave_info() + aggregate.update_metadata({'main_compute': None, host: None}) + elif main_compute and main_compute != host: + # A main exists -> forward pool-eject request to main + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.remove_aggregate_host( - context, aggregate.id, host, master_compute, slave_info) + context, aggregate.id, host, main_compute, subordinate_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate.id, action='remove_from_aggregate', reason=_('Unable to eject %s ' - 'from the pool; No master found') + 'from the pool; No main found') % host) - def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): - """Joins a slave into a XenServer resource pool.""" + def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd): + """Joins a subordinate into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), - 'master_addr': self._host_addr, - 'master_user': CONF.xenserver.connection_username, - 'master_pass': CONF.xenserver.connection_password, } + 'main_addr': self._host_addr, + 'main_user': CONF.xenserver.connection_username, + 'main_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error(_LE("Pool-Join failed: %s"), e) @@ -180,8 +180,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): reason=_('Unable to join %s ' 'in the pool') % host) - def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): - """Eject a slave from a XenServer resource pool.""" + def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid): + """Eject a subordinate from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution @@ -220,7 +220,7 @@ def _clear_pool(self, aggregate_id): action='remove_from_aggregate', reason=six.text_type(e.details)) - def _create_slave_info(self): + def _create_subordinate_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi diff --git a/nova-13.0.0/nova/virt/xenapi/pool_states.py b/nova-13.0.0/nova/virt/xenapi/pool_states.py index ae431dd..f4acdf5 100644 --- a/nova-13.0.0/nova/virt/xenapi/pool_states.py +++ b/nova-13.0.0/nova/virt/xenapi/pool_states.py @@ -25,7 +25,7 @@ A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based +without any potential race condition that may incur in main/subordinate-based configurations. The pool goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the diff --git a/nova-13.0.0/releasenotes/source/conf.py b/nova-13.0.0/releasenotes/source/conf.py index c402c61..e2bbec3 100644 --- a/nova-13.0.0/releasenotes/source/conf.py +++ b/nova-13.0.0/releasenotes/source/conf.py @@ -39,8 +39,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Nova Release Notes' diff --git a/nova-13.0.0/tools/db/schema_diff.py b/nova-13.0.0/tools/db/schema_diff.py index 018569f..89a7d15 100644 --- a/nova-13.0.0/tools/db/schema_diff.py +++ b/nova-13.0.0/tools/db/schema_diff.py @@ -33,16 +33,16 @@ MYSQL: ./tools/db/schema_diff.py mysql+pymysql://root@localhost \ - master:latest my_branch:82 + main:latest my_branch:82 POSTGRESQL: ./tools/db/schema_diff.py postgresql://localhost \ - master:latest my_branch:82 + main:latest my_branch:82 DB2: ./tools/db/schema_diff.py ibm_db_sa://localhost \ - master:latest my_branch:82 + main:latest my_branch:82 """ from __future__ import print_function @@ -254,12 +254,12 @@ def parse_options(): try: orig_branch, orig_version = sys.argv[2].split(':') except IndexError: - usage('original branch and version required (e.g. master:82)') + usage('original branch and version required (e.g. main:82)') try: new_branch, new_version = sys.argv[3].split(':') except IndexError: - usage('new branch and version required (e.g. master:82)') + usage('new branch and version required (e.g. main:82)') return db_url, orig_branch, orig_version, new_branch, new_version