forked from jlongever/on-build-config
-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathtest.sh.in
420 lines (375 loc) · 12.4 KB
/
test.sh.in
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
#!/bin/bash -ex
export VCOMPUTE=("${NODE_NAME}-Rinjin1","${NODE_NAME}-Rinjin2","${NODE_NAME}-Quanta")
MODIFY_API_PACKAGE="${MODIFY_API_PACKAGE}"
source ${WORKSPACE}/build-config/shareMethod.sh
cleanUpDockerImages(){
echo $SUDO_PASSWORD |sudo -S docker rmi $(echo $SUDO_PASSWORD |sudo -S docker images -q)
}
cleanUpDockerContainer(){
echo $SUDO_PASSWORD |sudo -S docker rm $(echo $SUDO_PASSWORD |sudo -S docker ps -a -q)
}
cleanUp(){
set +e
cleanUpDockerContainer
cleanUpDockerImages
echo $SUDO_PASSWORD |sudo -S service mongodb stop
echo $SUDO_PASSWORD |sudo -S service rabbitmq-server stop
set -e
}
apiPackageModify() {
pushd ${WORKSPACE}/build-deps/on-http/extra
sed -i "s/.*git symbolic-ref.*/ continue/g" make-deb.sh
sed -i "/build-package.bash/d" make-deb.sh
sed -i "/GITCOMMITDATE/d" make-deb.sh
sed -i "/mkdir/d" make-deb.sh
bash make-deb.sh
popd
for package in ${API_PACKAGE_LIST}; do
sudo pip uninstall -y ${package//./-} || true
pushd ${WORKSPACE}/build-deps/on-http/$package
fail=true
while $fail; do
python setup.py install
if [ $? -eq 0 ];then
fail=false
fi
done
popd
done
}
VCOMPUTE="${VCOMPUTE}"
TOTAL_VCOMPUTES=3
if [ -z "${VCOMPUTE}" ]; then
VCOMPUTE=("jvm-Quanta_T41-1" "jvm-vRinjin-1" "jvm-vRinjin-2")
fi
TEST_GROUP="${TEST_GROUP}"
if [ -z "${TEST_GROUP}" ]; then
TEST_GROUP="smoke-tests"
fi
nodesOff() {
cd ${WORKSPACE}/build-config/deployment/
if [ "${USE_VCOMPUTE}" != "false" ]; then
for i in ${VCOMPUTE[@]}; do
./vm_control.sh "${ESXI_HOST},${ESXI_USER},${ESXI_PASS},power_off,1,${i}_*"
done
else
./telnet_sentry.exp ${SENTRY_HOST} ${SENTRY_USER} ${SENTRY_PASS} off ${OUTLET_NAME}
sleep 5
fi
}
nodesOn() {
cd ${WORKSPACE}/build-config/deployment/
if [ "${USE_VCOMPUTE}" != "false" ]; then
for i in ${VCOMPUTE[@]}; do
./vm_control.sh "${ESXI_HOST},${ESXI_USER},${ESXI_PASS},power_on,1,${i}_*"
done
else
./telnet_sentry.exp ${SENTRY_HOST} ${SENTRY_USER} ${SENTRY_PASS} on ${OUTLET_NAME}
sleep 5
fi
}
nodesDelete() {
cd ${WORKSPACE}/build-config/deployment/
if [ "${USE_VCOMPUTE}" != "false" ]; then
if [ ${OVA_POST_TEST} == "true" ]; then
VCOMPUTE+=("${NODE_NAME}-ova-for-post-test")
fi
for i in ${VCOMPUTE[@]}; do
./vm_control.sh "${ESXI_HOST},${ESXI_USER},${ESXI_PASS},delete,1,${i}_*"
done
fi
}
nodesCreate() {
cd ${WORKSPACE}/build-config/deployment/
if [ "${USE_VCOMPUTE}" != "false" ]; then
for i in {1..2}
do
execWithTimeout "ovftool --overwrite --noSSLVerify --diskMode=${DISKMODE} --datastore=${DATASTORE} --name='${NODE_NAME}-Rinjin${i}' --net:'${NIC}=${NODE_NAME}-switch' '${HOME}/isofarm/OVA/vRinjin-Haswell.ova' vi://${ESXI_USER}:${ESXI_PASS}@${ESXI_HOST}"
done
execWithTimeout "ovftool --overwrite --noSSLVerify --diskMode=${DISKMODE} --datastore=${DATASTORE} --name='${NODE_NAME}-Quanta' --net:'${NIC}=${NODE_NAME}-switch' '${HOME}/isofarm/OVA/vQuanta-T41-Haswell.ova' vi://${ESXI_USER}:${ESXI_PASS}@${ESXI_HOST}"
else
nodesOff
fi
}
vnc_record_start(){
mkdir -p ${WORKSPACE}/build-log
pushd ${WORKSPACE}/build-config
export fname_prefix="vNode"
if [ ! -z $BUILD_ID ]; then
export fname_prefix=${fname_prefix}_b${BUILD_ID}
fi
bash vnc_record.sh ${WORKSPACE}/build-log $fname_prefix &
}
vnc_record_stop(){
#sleep 2 sec to ensure FLV finishes the disk I/O before VM destroyed
set +e
pkill -f flvrec.py
sleep 2
set -e
}
generateSolLog(){
pushd ${WORKSPACE}/build-config
bash generate-sol-log.sh > ${WORKSPACE}/sol_script.log &
}
generateSolLogStop(){
set +e
pkill -f SCREEN
}
generateSysLog(){
set +e
containerId=$( echo $SUDO_PASSWORD |sudo -S docker ps|grep "my/test" | awk '{print $1}' )
echo $SUDO_PASSWORD |sudo -S docker exec -it $containerId dmesg > ${WORKSPACE}/build-log/dmesg.log
}
generateMongoLog(){
set +e
containerId=$( echo $SUDO_PASSWORD |sudo -S docker ps|grep "my/test" | awk '{print $1}' )
echo $SUDO_PASSWORD |sudo -S docker cp $containerId:/var/log/mongodb ${WORKSPACE}/build-log
echo $SUDO_PASSWORD |sudo -S chown -R $USER:$USER ${WORKSPACE}/build-log/mongodb
}
generateRackHDLog(){
set +e
containerId=$( echo $SUDO_PASSWORD |sudo -S docker ps|grep "my/test" | awk '{print $1}' )
echo $SUDO_PASSWORD |sudo -S docker cp $containerId:/var/log/rackhd.log ${WORKSPACE}/build-log
echo $SUDO_PASSWORD |sudo -S chown -R $USER:$USER ${WORKSPACE}/build-log/logs
mv ${WORKSPACE}/build-log/logs/*.log ${WORKSPACE}/build-log
}
setupVirtualEnv(){
pushd ${WORKSPACE}/RackHD/test
rm -rf .venv/on-build-config
./mkenv.sh on-build-config
source myenv_on-build-config
popd
if [ "$MODIFY_API_PACKAGE" == true ] ; then
apiPackageModify
fi
}
BASE_REPO_URL="${BASE_REPO_URL}"
runTests() {
set +e
netstat -ntlp
args=()
if [ ! -z "$1" ];then
args+="$1"
fi
fitSmokeTest "${args}"
set -e
}
waitForAPI() {
netstat -ntlp
timeout=0
maxto=60
set +e
url=http://localhost:9090/api/2.0/nodes
while [ ${timeout} != ${maxto} ]; do
wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 -t 1 --continue ${url}
if [ $? = 0 ]; then
break
fi
sleep 10
timeout=`expr ${timeout} + 1`
done
set -e
if [ ${timeout} == ${maxto} ]; then
echo "Timed out waiting for RackHD API service (duration=`expr $maxto \* 10`s)."
exit 1
fi
}
waitForNodes() {
# Wait for virtual nodes to be discovered, this expects exactly the number of virtual
# compute nodes defined by TOTAL_VCOMPUTES at the beginning of this script
netstat -ntlp
timeout=0
maxto=60
set +e
url=http://localhost:9090/api/2.0/nodes
# check if nodeids have been created for the virtual nodes
sleep 20
while [ ${timeout} != ${maxto} ]; do
echo "Current node list: "
wget -SO- -T 1 -t 1 --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --continue ${url}
wget -SO- -T 1 -t 1 --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --continue ${url} | grep -o "compute" | wc -l | grep ${TOTAL_VCOMPUTES}
if [ $? = 0 ]; then
break
fi
sleep 10
timeout=`expr ${timeout} + 1`
done
set -e
if [ ${timeout} == ${maxto} ]; then
echo "Timed out waiting for RackHD virtual node discovery (duration=`expr $maxto \* 10`s)."
exit 1
fi
}
######################################
# OVA POST SMOKE TEST RELATED #
######################################
portForwarding(){
# forward ova to localhost
# according to vagrant/mongo/config.json and cit/fit config
socat TCP4-LISTEN:9091,forever,reuseaddr,fork TCP4:$1:5672 &
socat TCP4-LISTEN:9090,forever,reuseaddr,fork TCP4:$1:8080 &
socat TCP4-LISTEN:9092,forever,reuseaddr,fork TCP4:$1:9080 &
socat TCP4-LISTEN:9093,forever,reuseaddr,fork TCP4:$1:8443 &
socat TCP4-LISTEN:2222,forever,reuseaddr,fork TCP4:$1:22 &
socat TCP4-LISTEN:37017,forever,reuseaddr,fork TCP4:$1:27017 &
echo "Finished ova -> localhost port forwarding"
echo "5672->9091"
echo "8080->9090"
echo "9080->9092"
echo "8443->9093"
echo "22->2222"
echo "27017->37017"
}
fetchOVALog(){
ansible_workspace=${WORKSPACE}/build-config/jobs/build_ova/ansible
# fetch rackhd log
pushd $ansible_workspace
echo "ova-post-test ansible_host=$OVA_INTERNAL_IP ansible_user=$OVA_USER ansible_ssh_pass=$OVA_PASSWORD ansible_become_pass=$OVA_PASSWORD" > hosts
ansible-playbook -i hosts main.yml --tags "after-test"
mkdir -p ${WORKSPACE}/build-log
for log in `ls *.log *.flv *sol.log.raw | xargs` ; do
cp $log ${WORKSPACE}/build-log
done
popd
}
dockerUp(){
ifconfig
netstat -ntlp
pushd $WORKSPACE
echo $SUDO_PASSWORD |sudo -S docker load -i rackhd_pipeline_docker.tar
popd
cp -r ${WORKSPACE}/build-deps ${WORKSPACE}/build-config/jobs/pr_gate/docker
pushd ${WORKSPACE}/build-config/jobs/pr_gate/docker
#cp -r ${WORKSPACE}/build-config/jobs/pr_gate/docker/* .
echo $SUDO_PASSWORD |sudo -S docker build -t my/test .
echo $SUDO_PASSWORD |sudo -S docker run --net=host -v /etc/localtime:/etc/localtime:ro -d -t my/test
popd
}
setupTestsConfig(){
RACKHD_DHCP_HOST_IP=$(ifconfig | awk '/inet addr/{print substr($2,6)}' |grep 172.31.128)
if [ -n "$RACKHD_CONFIG_FILE_URL" ]; then
wget --tries=3 $RACKHD_CONFIG_FILE_URL -O ${WORKSPACE}/build-config/jobs/pr_gate/docker/monorail/config.json
fi
sed -i "s/172.31.128.1/${RACKHD_DHCP_HOST_IP}/g" ${WORKSPACE}/build-config/jobs/pr_gate/docker/monorail/config.json
pushd ${WORKSPACE}/RackHD/test/config
sed -i "s/\"username\": \"vagrant\"/\"username\": \"${SUDO_USER}\"/g" credentials_default.json
sed -i "s/\"password\": \"vagrant\"/\"password\": \"$SUDO_PASSWORD\"/g" credentials_default.json
popd
pushd ${WORKSPACE}/RackHD
find ./ -type f -exec sed -i -e "s/172.31.128.1/${RACKHD_DHCP_HOST_IP}/g" {} \;
popd
}
collectTestReport()
{
pushd ${WORKSPACE}/RackHD/test
mkdir -p ${WORKSPACE}/xunit-reports
cp *.xml ${WORKSPACE}/xunit-reports
popd
}
fitSmokeTest()
{
set +e
echo "########### Run FIT Stack Init #############"
pushd ${WORKSPACE}/RackHD/test
#TODO Parameterize FIT args
tstack="${TEST_STACK}"
args=()
if [ ! -z "$1" ];then
args+="$1"
fi
waitForNodes
python run_tests.py -test deploy/rackhd_stack_init.py ${tstack} ${args} -xunit
if [ $? -ne 0 ]; then
echo "Test FIT failed running deploy/rackhd_stack_init.py"
collectTestReport
exit 1
fi
echo "########### Run FIT Smoke Test #############"
python run_tests.py ${TEST_GROUP} ${tstack} ${args} -v 4 -xunit
if [ $? -ne 0 ]; then
echo "Test FIT failed running smoke test"
collectTestReport
exit 1
fi
collectTestReport
popd
set -e
}
exportLog(){
set +e
mkdir -p ${WORKSPACE}/build-log
vnc_record_stop
generateSolLogStop
generateRackHDLog
generateMongoLog
echo $SUDO_PASSWORD| sudo -S chown -R $USER:$USER ${WORKSPACE}
set -e
}
######################################
# OVA POST SMOKE TEST RELATED END #
######################################
if [ "$TEST_TYPE" == "ova" ]; then
# based on the assumption that in the same folder, the VMs has been exist normally. so don't destroy VM here.
nodesCreate
# Prepare RackHD
# Forward local host port to ova
portForwarding ${OVA_INTERNAL_IP}
# We setup the virtual-environment here, since once we
# call "nodesOn", it's a race to get to the first test
# before the nodes get booted far enough to start being
# seen by RackHD. Logically, it would be better IN runTests.
# We do it between the vagrant and waitForAPI to use the
# time to make the env instead of doing sleeps...
setupVirtualEnv
waitForAPI
nodesOn &
# signal handler
trap "deactivate && fetchOVALog" SIGINT SIGTERM SIGKILL EXIT
# Run tests
runTests
# Clean Up below
#shutdown vagrant box and delete all resource (like removing vm disk files in "~/VirtualBox VMs/")
#cleanupVMs
#nodesDelete
elif [ "$TEST_TYPE" == "docker" ]; then
# based on the assumption that in the same folder, the VMs has been exist normally. so don't destroy VM here.
nodesCreate
# Prepare RackHD
# Forward local host port to ova
portForwarding localhost
trap exportLog SIGINT SIGTERM SIGKILL EXIT
# We setup the virtual-environment here, since once we
# call "nodesOn", it's a race to get to the first test
# before the nodes get booted far enough to start being
# seen by RackHD. Logically, it would be better IN runTests.
# We do it between the vagrant and waitForAPI to use the
# time to make the env instead of doing sleeps...
setupVirtualEnv
waitForAPI
nodesOn &
generateSolLog
vnc_record_start
# Run tests
runTests
# exit venv
deactivate
# Clean Up below
#shutdown vagrant box and delete all resource (like removing vm disk files in "~/VirtualBox VMs/")
#cleanupVMs
#nodesDelete
else
cleanUp
# register the signal handler to export log
trap exportLog SIGINT SIGTERM SIGKILL EXIT
nodesCreate
setupTestsConfig
dockerUp
# Setup the virtual-environment
setupVirtualEnv
waitForAPI
nodesOn
generateSolLog
vnc_record_start
# Run tests
runTests " --sm-amqp-use-user guest"
fi