Skip to content

Commit

Permalink
new: usr: support for creating spark clusters. Pull Request #103.
Browse files Browse the repository at this point in the history
  • Loading branch information
Prasanna Santhanam authored and Rohit Agarwal committed May 11, 2015
1 parent ecf99b4 commit 7305d5c
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 2 deletions.
3 changes: 2 additions & 1 deletion bin/qds.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,8 @@ def _create_cluster_info(arguments):
arguments.slave_request_type,
arguments.use_hbase,
arguments.custom_ec2_tags,
arguments.use_hadoop2)
arguments.use_hadoop2,
arguments.use_spark)

cluster_info.set_spot_instance_settings(
arguments.maximum_bid_price_percentage,
Expand Down
11 changes: 10 additions & 1 deletion qds_sdk/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,11 @@ def _parse_create_update(cls, args, action):
action="store_false",
default=None,
help="Use hadoop1 instead of hadoop2. This is the default.")
hadoop2.add_argument("--use-spark",
dest="use_spark",
action="store_true",
default=None,
help="Turn on spark for this cluster")

spot_group = argparser.add_argument_group("spot instance settings" +
" (valid only when slave-request-type is hybrid or spot)")
Expand Down Expand Up @@ -601,7 +606,8 @@ def set_hadoop_settings(self, master_instance_type=None,
slave_request_type=None,
use_hbase=None,
custom_ec2_tags=None,
use_hadoop2=None):
use_hadoop2=None,
use_spark=None):
"""
Kwargs:
Expand All @@ -624,6 +630,8 @@ def set_hadoop_settings(self, master_instance_type=None,
`use_hbase`: Start hbase daemons on the cluster. Uses Hadoop2
`use_hadoop2`: Use hadoop2 in this cluster
`use_spark`: Use spark in this cluster
"""
self.hadoop_settings['master_instance_type'] = master_instance_type
self.hadoop_settings['slave_instance_type'] = slave_instance_type
Expand All @@ -633,6 +641,7 @@ def set_hadoop_settings(self, master_instance_type=None,
self.hadoop_settings['slave_request_type'] = slave_request_type
self.hadoop_settings['use_hbase'] = use_hbase
self.hadoop_settings['use_hadoop2'] = use_hadoop2
self.hadoop_settings['use_spark'] = use_spark

if custom_ec2_tags and custom_ec2_tags.strip():
try:
Expand Down
36 changes: 36 additions & 0 deletions tests/test_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,6 +673,42 @@ def test_use_hadoop1(self):
}
})

def test_use_spark(self):
sys.argv = ['qds.py', 'cluster', 'create', '--label', 'test_label',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--use-spark']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with('POST', 'clusters',
{'cluster':
{'label': ['test_label'],
'ec2_settings': {'compute_secret_key': 'sak',
'compute_access_key': 'aki'},
'hadoop_settings': {'use_spark': True}
}
})

@unittest.skipIf(sys.version_info < (2, 7, 0), "Known failure on Python 2.6")
def test_use_spark_on_hadoop2(self):
sys.argv = ['qds.py', 'cluster', 'create', '--label', 'test_label',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--use-spark', '--use-hadoop2']
print_command()
Connection._api_call = Mock(return_value={})
with self.assertRaises(SystemExit):
qds.main()

@unittest.skipIf(sys.version_info < (2, 7, 0), "Known failure on Python 2.6")
def test_use_spark_on_hadoop1(self):
sys.argv = ['qds.py', 'cluster', 'create', '--label', 'test_label',
'--access-key-id', 'aki', '--secret-access-key', 'sak',
'--use-spark', '--use-hadoop1']
print_command()
Connection._api_call = Mock(return_value={})
with self.assertRaises(SystemExit):
qds.main()

@unittest.skipIf(sys.version_info < (2, 7, 0), "Known failure on Python 2.6")
def test_conflict_hadoop21_hadoop2(self):
sys.argv = ['qds.py', 'cluster', 'create', '--label', 'test_label',
Expand Down

0 comments on commit 7305d5c

Please sign in to comment.