-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathVagrantfile
424 lines (329 loc) · 11.6 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
# -*- mode: ruby -*-
# vi: set ft=ruby :
# All Vagrant configuration is done below. The "2" in Vagrant.configure
# configures the configuration version (we support older styles for
# backwards compatibility). Please don't change it unless you know what
# you're doing.
Vagrant.configure(2) do |config|
$provisioning_nova_docker = <<EOF
#
# Disable Network Manager
#
systemctl mask NetworkManager
systemctl stop NetworkManager
cat > /etc/sysconfig/network-scripts/ifcfg-enp0s8 << MIDO_EOF
DEVICE=enp0s8
BOOTPROTO=dhcp
NM_CONTROLLED=no
MIDO_EOF
cat > /etc/sysconfig/network-scripts/ifcfg-enp0s9 << MIDO_EOF
DEVICE=enp0s9
BOOTPROTO=dhcp
NM_CONTROLLED=no
MIDO_EOF
systemctl enable network.service
systemctl start network.service
#
# Repos
#
rpm -ivh http://yum.puppetlabs.com/puppetlabs-release-el-7.noarch.rpm
yum makecache fast
# packstack
yum install -y ntpdate
ntpdate pool.ntp.org
yum install -y https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm
yum install -y epel-release
#rm -f /var/lib/rpm/__*
#rpm --rebuilddb -v -v
# Midonet
cat >> /etc/yum.repos.d/midonet.repo << EOF_MIDO
[midonet]
name=MidoNet
baseurl=http://bcn4.bcn.midokura.com:8081/artifactory/midonet/el7/master/nightly/all/noarch/
enabled=1
gpgcheck=1
gpgkey=http://bcn4.bcn.midokura.com:8081/artifactory/api/gpg/key/public
[midonet-openstack-integration]
name=MidoNet OpenStack Integration
baseurl=http://bcn4.bcn.midokura.com:8081/artifactory/midonet/el7/master/nightly/juno/noarch/
enabled=1
gpgcheck=1
gpgkey=http://bcn4.bcn.midokura.com:8081/artifactory/api/gpg/key/public
[midonet-misc]
name=MidoNet 3rd Party Tools and Libraries
baseurl=http://bcn4.bcn.midokura.com:8081/artifactory/midonet/el7/thirdparty/stable/all/x86_64/
enabled=1
gpgcheck=1
gpgkey=http://bcn4.bcn.midokura.com:8081/artifactory/api/gpg/key/public
EOF_MIDO
# Cassandra
cat >> /etc/yum.repos.d/cassandra.repo << EOF_MIDO
[datastax]
name= DataStax Repo for Apache Cassandra
baseurl=http://rpm.datastax.com/community
enabled=1
gpgcheck=0
EOF_MIDO
cat >> /etc/resolv.conf << EOF_MIDO
nameserver 8.8.8.8
nameserver 8.8.4.4
EOF_MIDO
# Updating and installing dependencies
#
yum update -y
#cp /etc/pki/tls/certs/ca-bundle.crt /root/
#curl http://curl.haxx.se/ca/cacert.pem -o /etc/pki/tls/certs/ca-bundle.crt
yum update -y ca-certificates
# Tools
yum install -y augeas crudini screen wget
#
# Packstack
#
#
wget https://bootstrap.pypa.io/get-pip.py
python get-pip.py
yum install -y python-devel
yum install -y gcc libgcc glibc libffi-devel libxml2-devel libxslt-devel openssl-devel zlib-devel bzip2-devel ncurses-devel
pip install cryptography
pip install requests[security]
systemctl stop firewalld
systemctl mask firewalld
yum install -y iptables-services
systemctl enable iptables
IP=192.168.124.185
yum install -y openstack-packstack
packstack --install-hosts=$IP \
--nagios-install=n \
--os-swift-install=n \
--os-ceilometer-install=n \
--os-cinder-install=n \
--os-glance-install=n \
--os-heat-install=n \
--os-horizon-install=n \
--os-nova-install=n \
--provision-demo=n
rc=$?; if [[ $rc != 0 ]]; then exit $rc; fi
#
# Disable unneeded remaining services
#
yum remove -y openstack-neutron-openvswitch
systemctl stop openvswitch
systemctl mask openvswitch
systemctl stop neutron-l3-agent
systemctl mask neutron-l3-agent
ADMIN_TOKEN=$(crudini --get /etc/keystone/keystone.conf DEFAULT admin_token)
ADMIN_PASSWORD=$(grep "OS_PASSWORD" /root/keystonerc_admin | sed -e 's/"//g' | cut -f2 -d'=')
NEUTRON_DBPASS=$(crudini --get /root/packstack-answers-* general CONFIG_NEUTRON_DB_PW)
cp /root/keystonerc_admin /home/vagrant/
chown vagrant:vagrant /root/keystonerc_admin /home/vagrant/keystonerc_admin
#
# Zookeeper
#
yum install -y java-1.7.0-openjdk-headless zookeeper
# Zookeeper expects the JRE to be found in the /usr/java/default/bin/ directory
# so if it is in a different location, you must create a symbolic link pointing
# to that location. To do so run the 2 following commands:
mkdir -p /usr/java/default/bin/
ln -s /usr/lib/jvm/jre-1.7.0-openjdk/bin/java /usr/java/default/bin/java
# Next we need to create the zookeeper data directory and assign permissions:
mkdir /var/lib/zookeeper/data
chmod 777 /var/lib/zookeeper/data
# Now we can edit the Zookeeper configuration file. We need to add the servers
# (in a prod installation you would have more than one zookeeper server in a
# cluster. For this example we are only using one. ). Edit the Zookeeper config
# file at /etc/zookeeper/zoo.cfg and add the following to the bottom of the
# file:
echo "server.1=$IP:2888:3888" >> /etc/zookeeper/zoo.cfg
# We need to set the Zookeeper ID on this server:
echo 1 > /var/lib/zookeeper/data/myid
systemctl enable zookeeper.service
systemctl start zookeeper.service
#
# Cassandra
#
yum install -y dsc20
sed -i -e "s/cluster_name:.*/cluster_name: 'midonet'/" /etc/cassandra/conf/cassandra.yaml
sed -i -e "s/listen_address:.*/listen_address: $IP/" /etc/cassandra/conf/cassandra.yaml
sed -i -e "s/seeds:.*/seeds: \"$IP\"/" /etc/cassandra/conf/cassandra.yaml
sed -i -e "s/rpc_address:.*/rpc_address: $IP/" /etc/cassandra/conf/cassandra.yaml
rm -rf /var/lib/cassandra/data/system
systemctl enable cassandra.service
systemctl start cassandra.service
#
# Midolman
#
yum install -y midolman
systemctl enable midolman.service
systemctl start midolman.service
#
# Midonet client
#
yum install -y python-midonetclient
#
# Midonet-api
#
yum install -y midonet-api
# Small inline python program that sets the proper xml values to midonet-api's
# web.xml
cat << MIDO_EOF | python -
from xml.dom import minidom
DOCUMENT_PATH = '/usr/share/midonet-api/WEB-INF/web.xml'
def set_value(param_name, value):
value_node = param_node.parentNode.getElementsByTagName('param-value')[0]
value_node.childNodes[0].data = value
doc = minidom.parse(DOCUMENT_PATH)
params = doc.getElementsByTagName('param-name')
for param_node in params:
if param_node.childNodes[0].data == 'rest_api-base_uri':
set_value(param_node, 'http://$IP:8081/midonet-api')
elif param_node.childNodes[0].data == 'keystone-service_host':
set_value(param_node, '$IP')
elif param_node.childNodes[0].data == 'keystone-admin_token':
set_value(param_node, '$ADMIN_TOKEN')
elif param_node.childNodes[0].data == 'zookeeper-zookeeper_hosts':
set_value(param_node, '$IP:2181')
with open(DOCUMENT_PATH, 'w') as f:
f.write(doc.toprettyxml())
MIDO_EOF
yum install -y tomcat
cat << MIDO_EOF | augtool -L
set /augeas/load/Shellvars/incl[last()+1] /etc/tomcat/tomcat.conf
load
set /files/etc/tomcat/tomcat.conf/CONNECTOR_PORT 8081
save
MIDO_EOF
cat << MIDO_EOF | sudo augtool -L
set /augeas/load/Xml/incl[last()+1] /etc/tomcat/server.xml
load
set /files/etc/tomcat/server.xml/Server/Service/Connector[1]/#attribute/port 8081
save
MIDO_EOF
cat << MIDO_EOF > /etc/tomcat/Catalina/localhost/midonet-api.xml
<Context
path="/midonet-api"
docBase="/usr/share/midonet-api"
antiResourceLocking="false"
privileged="true"
/>
MIDO_EOF
systemctl enable tomcat.service
systemctl start tomcat.service
#
# Midonet-cli
#
#
yum install -y python-midonetclient
cat > ~/.midonetrc << MIDO_EOF
[cli]
api_url = http://$IP:8081/midonet-api
username = admin
password = $ADMIN_PASSWORD
project_id = admin
MIDO_EOF
cp /root/.midonetrc /home/vagrant/
# Create a tunnel zone
TUNNEL_ZONE=$(midonet-cli -e tunnel-zone create name gre type gre)
HOST_UUID=$(midonet-cli -e list host | awk '{print $2;}')
midonet-cli -e tunnel-zone $TUNNEL_ZONE add member host $HOST_UUID address $IP
#
# Keystone Integration
#
source ~/keystonerc_admin
keystone service-create --name midonet --type midonet --description "Midonet API Service"
keystone user-create --name midonet --pass midonet --tenant admin
keystone user-role-add --user midonet --role admin --tenant admin
#
# Neutron integration
#
yum install -y openstack-neutron python-neutron-plugin-midonet
crudini --set /etc/neutron/neutron.conf DEFAULT core_plugin midonet.neutron.plugin.MidonetPluginV2
mkdir /etc/neutron/plugins/midonet
cat > /etc/neutron/plugins/midonet/midonet.ini << MIDO_EOF
[DATABASE]
sql_connection = mysql://neutron:$NEUTRON_DBPASS@$IP/neutron
sql_max_retries = 100
[MIDONET]
# MidoNet API URL
midonet_uri = http://$IP:8081/midonet-api
# MidoNet administrative user in Keystone
username = midonet
password = midonet
# MidoNet administrative user's tenant
project_id = admin
auth_url = http://$IP:5000/v2.0
MIDO_EOF
rm -f /etc/neutron/plugin.ini
ln -s /etc/neutron/plugins/midonet/midonet.ini /etc/neutron/plugin.ini
# Comment out the service_plugins definitions
crudini --set /etc/neutron/neutron.conf DEFAULT service_plugins neutron.services.firewall.fwaas_plugin.FirewallPlugin
sed -i -e 's/^router_scheduler_driver/#router_scheduler_driver/' /etc/neutron/neutron.conf
# dhcp agent config
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver neutron.agent.linux.interface.MidonetInterfaceDriver
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver midonet.neutron.agent.midonet_driver.DhcpNoOpDriver
crudini --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata True
crudini --set /etc/neutron/dhcp_agent.ini MIDONET midonet_uri http://$IP:8081/midonet-api
crudini --set /etc/neutron/dhcp_agent.ini MIDONET username midonet
crudini --set /etc/neutron/dhcp_agent.ini MIDONET password midonet
systemctl restart neutron-server
systemctl restart neutron-dhcp-agent
systemctl restart neutron-metadata-agent
#
# Create internal network for usage on the instances
#
neutron net-create foo
neutron subnet-create foo 172.16.1.0/24 --name foo
echo "FINISHED"
function install_nova_docker_with_midonet() {
yum install -y git python-pip
git clone https://review.openstack.org/stackforge/nova-docker
pushd nova-docker
git checkout origin/stable/juno
git cherry-pick 06dabc0aecf95003e2558da3899ceed43367c237
pip install pbr
python setup.py install --record /root/nova_docker_installed_files.txt
mkdir -p /etc/nova/rootwrap.d
cp etc/nova/rootwrap.d/docker.filters /etc/nova/rootwrap.d/
popd
}
function configure_glance_for_docker() {
source keystonerc_admin
local glance_formats=$(crudini --get /etc/glance/glance-api.conf DEFAULT container_formats)
local glance_with_docker=$(case "$glance_formats" in *docker* ) echo "$glance_formats";; * ) echo "$glance_formats,docker";; esac)
crudini --set /etc/glance/glance-api.conf DEFAULT container_formats "$glance_with_docker"
systemctl restart openstack-glance-api
}
function configure_nova_for_docker() {
crudini --set /etc/nova/nova.conf DEFAULT compute_driver novadocker.virt.docker.DockerDriver
systemctl restart openstack-nova-compute
}
#
#Setting docker up
#
#yum install -y docker
# add nova compute to the docker group so it can use its socket
#gpasswd -a nova docker
#systemctl enable docker
#systemctl start docker
#configure_glance_for_docker
# Add docker's cirros to glance
#docker pull cirros
#docker save cirros | glance image-create --is-public=True --container-format=docker --disk-format=raw --name cirros
#install_nova_docker_with_midonet
#configure_nova_for_docker
# Define more appropriately sized instance for cirros containers
#nova flavor-create "m1.nano" auto 64 0 1
EOF
config.vm.box = "centos7"
config.vm.synced_folder ".", "/vagrant", disabled: true
config.vm.define :nova_docker do |nova_docker|
nova_docker.vm.hostname = "nova-docker.local"
nova_docker.vm.network :private_network, ip: "192.168.124.185"
nova_docker.vm.network :private_network, ip: "192.168.124.186"
nova_docker.vm.provision "shell",
inline: $provisioning_nova_docker
nova_docker.vm.provider :virtualbox do |vb|
vb.memory = 4096
vb.cpus = 2
end
end
end