Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removing the master and slave jargon #1

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions nova-13.0.0/api-guide/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,8 @@
# The encoding of source files.
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'
# The main toctree document.
main_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
Expand Down
4 changes: 2 additions & 2 deletions nova-13.0.0/doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@
# The encoding of source files.
#source_encoding = 'utf-8'

# The master toctree document.
master_doc = 'index'
# The main toctree document.
main_doc = 'index'

# General information about the project.
project = u'nova'
Expand Down
70 changes: 35 additions & 35 deletions nova-13.0.0/nova/compute/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -798,7 +798,7 @@ def _get_instances_on_driver(self, context, filters=None):
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
context, filters, use_subordinate=True)
return local_instances
except NotImplementedError:
pass
Expand All @@ -807,7 +807,7 @@ def _get_instances_on_driver(self, context, filters=None):
# to brute force.
driver_instances = self.driver.list_instances()
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
use_subordinate=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
Expand Down Expand Up @@ -1530,7 +1530,7 @@ def _check_instance_build_time(self, context):
'host': self.host}

building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
filters, expected_attrs=[], use_subordinate=True)

for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
Expand Down Expand Up @@ -1809,7 +1809,7 @@ def _sync_scheduler_instance_info(self, context):
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
use_subordinate=True)
uuids = [instance.uuid for instance in instances]
self.scheduler_client.sync_instance_info(context, self.host, uuids)

Expand Down Expand Up @@ -5739,7 +5739,7 @@ def _heal_instance_info_cache(self, context):
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
context, self.host, expected_attrs=[], use_subordinate=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
Expand Down Expand Up @@ -5770,7 +5770,7 @@ def _heal_instance_info_cache(self, context):
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache',
'flavor'],
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
Expand Down Expand Up @@ -5821,7 +5821,7 @@ def _poll_rebooting_instances(self, context):
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
context, filters, expected_attrs=[], use_subordinate=True)

to_poll = []
for instance in rebooting:
Expand All @@ -5838,7 +5838,7 @@ def _poll_rescued_instances(self, context):
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
use_subordinate=True)

to_unrescue = []
for instance in rescued_instances:
Expand All @@ -5856,7 +5856,7 @@ def _poll_unconfirmed_resizes(self, context):

migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
use_subordinate=True)

migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
Expand Down Expand Up @@ -5885,7 +5885,7 @@ def _set_migration_to_error(migration, reason, **kwargs):
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
Expand Down Expand Up @@ -5946,7 +5946,7 @@ def _poll_shelved_instances(self, context):
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
use_subordinate=True)

to_gc = []
for instance in shelved_instances:
Expand Down Expand Up @@ -5979,7 +5979,7 @@ def _instance_usage_audit(self, context):
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata',
'flavor'],
use_slave=True)
use_subordinate=True)
num_instances = len(instances)
errors = 0
successes = 0
Expand Down Expand Up @@ -6042,7 +6042,7 @@ def _poll_bandwidth_usage(self, context):

instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
use_subordinate=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
Expand All @@ -6067,7 +6067,7 @@ def _poll_bandwidth_usage(self, context):
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
start_period=start_time, use_subordinate=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
Expand All @@ -6077,7 +6077,7 @@ def _poll_bandwidth_usage(self, context):
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
start_period=prev_time, use_subordinate=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
Expand Down Expand Up @@ -6107,14 +6107,14 @@ def _poll_bandwidth_usage(self, context):
last_refreshed=refreshed,
update_cells=update_cells)

def _get_host_volume_bdms(self, context, use_slave=False):
def _get_host_volume_bdms(self, context, use_subordinate=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
use_subordinate=use_subordinate)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
context, instance.uuid, use_subordinate=use_subordinate)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
Expand Down Expand Up @@ -6146,7 +6146,7 @@ def _poll_volume_usage(self, context):
return

compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
use_subordinate=True)
if not compute_host_bdms:
return

Expand All @@ -6172,7 +6172,7 @@ def _sync_power_states(self, context):
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
use_subordinate=True)

num_vm_instances = self.driver.get_num_instances()
num_db_instances = len(db_instances)
Expand Down Expand Up @@ -6231,14 +6231,14 @@ def _query_driver_power_state_and_sync(self, context, db_instance):
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
pass

def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
use_subordinate=False):
"""Align instance power state between the database and hypervisor.

If the instance is not found on the hypervisor, but is in the database,
Expand All @@ -6247,7 +6247,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,

# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_instance.refresh(use_subordinate=use_subordinate)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state

Expand Down Expand Up @@ -6422,7 +6422,7 @@ def _reclaim_queued_deletes(self, context):
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
use_subordinate=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
Expand All @@ -6447,7 +6447,7 @@ def update_available_resource(self, context):
new_resource_tracker_dict = {}

compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True)
use_subordinate=True)
nodenames = set(self.driver.get_available_nodes())
for nodename in nodenames:
rt = self._get_resource_tracker(nodename)
Expand Down Expand Up @@ -6480,10 +6480,10 @@ def update_available_resource(self, context):
LOG.info(_LI("Deleting orphan compute node %s"), cn.id)
cn.destroy()

def _get_compute_nodes_in_db(self, context, use_slave=False):
def _get_compute_nodes_in_db(self, context, use_subordinate=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
use_subordinate=use_subordinate)
except exception.NotFound:
LOG.error(_LE("No compute node record for host %s"), self.host)
return []
Expand Down Expand Up @@ -6550,7 +6550,7 @@ def _cleanup_running_deleted_instances(self, context):
"DELETED but still present on host."),
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
context, instance.uuid, use_subordinate=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
Expand Down Expand Up @@ -6618,11 +6618,11 @@ def _error_out_instance_on_exception(self, context, instance,
self._set_instance_obj_error_state(context, instance)

@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
def add_aggregate_host(self, context, aggregate, host, subordinate_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
Expand All @@ -6634,11 +6634,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info):
aggregate, host)

@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
def remove_aggregate_host(self, context, host, subordinate_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
Expand Down Expand Up @@ -6733,7 +6733,7 @@ def _run_image_cache_manager_pass(self, context):
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
filters, expected_attrs=[], use_subordinate=True)

self.driver.manage_image_cache(context, filtered_instances)

Expand All @@ -6748,7 +6748,7 @@ def _run_pending_deletes(self, context):
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
context, filters, expected_attrs=attrs, use_subordinate=True)
LOG.debug('There are %d instances to clean', len(instances))

for instance in instances:
Expand Down Expand Up @@ -6792,7 +6792,7 @@ def _cleanup_incomplete_migrations(self, context):
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
context, inst_filters, expected_attrs=attrs, use_subordinate=True)

for instance in instances:
for migration in migrations:
Expand Down
10 changes: 5 additions & 5 deletions nova-13.0.0/nova/compute/rpcapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ class ComputeAPI(object):

* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
* 2.2 - Adds subordinate_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
Expand Down Expand Up @@ -390,7 +390,7 @@ def get_client(self, target, version_cap, serializer):
serializer=serializer)

def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
subordinate_info=None):
'''Add aggregate host.

:param ctxt: request context
Expand All @@ -403,7 +403,7 @@ def add_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
subordinate_info=subordinate_info)

def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '4.0'
Expand Down Expand Up @@ -758,7 +758,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
**extra)

def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
subordinate_info=None):
'''Remove aggregate host.

:param ctxt: request context
Expand All @@ -771,7 +771,7 @@ def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
subordinate_info=subordinate_info)

def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '4.0'
Expand Down
2 changes: 1 addition & 1 deletion nova-13.0.0/nova/conductor/rpcapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ class ConductorAPI(object):
* 1.62 - Added object_backport()
* 1.63 - Changed the format of values['stats'] from a dict to a JSON string
in compute_node_update()
* 1.64 - Added use_slave to instance_get_all_filters()
* 1.64 - Added use_subordinate to instance_get_all_filters()
- Remove instance_type_get()
- Remove aggregate_get()
- Remove aggregate_get_by_host()
Expand Down
2 changes: 1 addition & 1 deletion nova-13.0.0/nova/console/xvp.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@
help='Generated XVP conf file'),
cfg.StrOpt('console_xvp_pid',
default='/var/run/xvp.pid',
help='XVP master process pid file'),
help='XVP main process pid file'),
cfg.StrOpt('console_xvp_log',
default='/var/log/xvp.log',
help='XVP log file'),
Expand Down
4 changes: 2 additions & 2 deletions nova-13.0.0/nova/db/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,8 +97,8 @@ def create_context_manager(connection):
def select_db_reader_mode(f):
"""Decorator to select synchronous or asynchronous reader mode.

The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
will be used if 'use_slave' is True and synchronous reader otherwise.
The kwarg argument 'use_subordinate' defines reader mode. Asynchronous reader
will be used if 'use_subordinate' is True and synchronous reader otherwise.
"""
return IMPL.select_db_reader_mode(f)

Expand Down
Loading