-
Notifications
You must be signed in to change notification settings - Fork 0
/
boilerplate.yml
257 lines (225 loc) · 8.47 KB
/
boilerplate.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
---
# Note: this playbook relies on `xps` variable. The `xps` variable is
# a list that contains the name of all experimentation. For instance,
# this list is as following for the idle experimentation.
# - idle-cpt20-nfk05
# - idle-cpt20-nfk10
# - idle-cpt20-nfk25
# - idle-cpt20-nfk50
- hosts: all
tasks:
- name: The list of experimentation
debug: var=xps
- name: Making the results and archives directories
file: path=/results/archives state=directory
- name: Getting results from home and put them into /results/archives
command: "cp /home/{{ item }}.tar.gz /results/archives/"
args:
creates: "/results/archives/{{ item }}.tar.gz"
with_items: "{{ xps }}"
- name: Extracting experimentation results
command: "tar -C /results -xzf /results/archives/{{ item }}.tar.gz"
args:
creates: "/results/{{ item }}"
with_items: "{{ xps }}"
- name: Extracting everything
shell: "for i in $(ls /results/{{ item }}/*.tar.gz); do tar -C /results/{{ item }} -xzf $i; done"
with_items: "{{ xps }}"
# workaround the https://github.com/BeyondTheClouds/kolla-g5k/issues/44 for older results
# It will overwrite the logs from those in _data if this directory exists
- name: Unnesting directory structure (workaround https://github.com/BeyondTheClouds/kolla-g5k/issues/44)
shell: "cd /results/{{ item }}/tmp/kolla-logs; [ -d _data ] && cp -r _data/* . || echo 0"
with_items: "{{ xps }}"
- name: Extract haproxy logs from to one who holds it
shell: "cd /results/{{ item }}; for i in $(ls *.tar.gz); do tar -tvzf $i | grep haproxy.log | awk -v x=$i '$3 > 0 {print x}'; done | xargs -n 1 tar -xvzf"
with_items: "{{ xps }}"
- name: Fixing permissions
command: chmod 755 -R /results
- name: Starting influx container
docker:
name: "influx_{{ item.1 }}"
detach: yes
image: "tutum/influxdb:0.13"
state: started
restart_policy: always
ports:
- "{{ 8083 + item.0 * 100 | int }}:8083"
- "{{ 18086 + item.0 * 100 | int }}:8086"
volumes: "/results/{{ item.1 }}/influx-data:/data"
with_indexed_items: "{{ xps }}"
- name: Removing previous grafana container
docker:
image: "grafana/grafana:3.1.0"
name: "grafana"
state: absent
- name: Starting grafana container
docker:
detach: yes
image: "grafana/grafana:3.1.0"
name: "grafana"
ports: "3000:3000"
state: started
restart_policy: always
- name: Waiting for the grafana service to become available
wait_for:
# we use the bridge interface of docker
# instead of localhost
# api is accessible on loopback too quickly and isn't ready
host: "172.17.0.1"
port: 3000
state: started
delay: 2
timeout: 120
- name: Installing httplib2 library. Required by uri module
pip: name=httplib2
- name: Add the influx cadvisor data source
uri:
url: "http://localhost:3000/api/datasources"
user: admin
password: admin
force_basic_auth: yes
body_format: json
HEADER_Content-Type: application/json
method: POST
# we workaround this issue :
# https://github.com/ansible/ansible-modules-core/issues/265
# by adding an empty space at the beginning of the json ...
body: " { \"name\": \"cadvisor-{{ item.1 }}\", \"type\": \"influxdb\", \"url\": \"http://172.17.0.1:{{ 18086 + item.0 * 100 }}\", \"access\": \"proxy\", \"database\": \"cadvisor\", \"user\": \"root\", \"password\": \"root\", \"isDefault\": true }"
status_code: 200,500
with_indexed_items: "{{ xps }}"
- name: Add the influx collectd data source
uri:
url: "http://localhost:3000/api/datasources"
user: admin
password: admin
force_basic_auth: yes
body_format: json
HEADER_Content-Type: application/json
method: POST
# we workaround this issue :
# https://github.com/ansible/ansible-modules-core/issues/265
# by adding an empty space at the beginning of the json ...
body: " { \"name\": \"collectd-{{ item.1 }}\", \"type\": \"influxdb\", \"url\": \"http://172.17.0.1:{{ 18086 + item.0 * 100 }}\", \"access\": \"proxy\", \"database\": \"collectd\", \"user\": \"root\", \"password\": \"root\", \"isDefault\": true }"
return_content: yes
status_code: 200
with_indexed_items: "{{ xps }}"
- name: Coping the nginx configuration file
copy: src="files/nginx.conf" dest="/nginx.conf"
- name: Removing previous nginx server container
docker:
image: "nginx:alpine"
name: "nginx"
state: absent
- name: Starting the nginx server container
docker:
detach: yes
image: "nginx:alpine"
name: "nginx"
ports: "80:80"
restart_policy: always
volumes:
- "/results:/usr/share/nginx/html"
- "/nginx.conf:/etc/nginx/nginx.conf"
state: started
- name: Add elasticsearch
docker:
detach: yes
image: elasticsearch
name: elasticsearch
ports:
- "9200:9200"
- "9300:9300"
restart_policy: always
# https://www.elastic.co/guide/en/elasticsearch/reference/5.0/_maximum_map_count_check.html
- name: Set the maximum map count
sysctl:
name: vm.max_map_count
value: 262144
state: present
- name: Waiting for elasticsearch to become available
wait_for:
# we use the bridge interface of docker
# instead of localhost
# api is accessible on loopback too quickly and isn't ready
host: "172.17.0.1"
port: 9200
state: started
delay: 2
timeout: 120
# We use the Type field to differentiate different xps
# Do not analyse it will ease the analyse through kibana
#
- name: Check if heka index exists
uri :
url: "http://localhost:9200/heka"
method: GET
body_format: json
status_code: 200,404
register: index
- debug: var=index
- name: Deleting old index
uri :
url: "http://localhost:9200/heka"
method: DELETE
body_format: json
when: index.status == 200
- name: Create the heka index in elasticsearch
uri :
url: "http://localhost:9200/heka"
method: PUT
# Ansible 1.9.x doesn't seem to be able to load a json
# and use it in the body of the request properly
# e.g with ansible 2:
# body: "{{ lookup('file', 'files/mapping.json') | from_json | to_json}}"
# body_format: json
- name: Do not analyse some fields in elasticsearch (see mapping.json)
uri :
url: "http://localhost:9200/heka/_mapping/message"
method: PUT
body: " { \"properties\": {\"{{ item }}\": {\"type\":\"string\", \"index\": \"not_analyzed\"}}}"
body_format: json
with_items: ["Type", "request_id", "tenant_id", "user_id", "programname"]
- name: Add kibana
docker:
detach: yes
image: kibana
name: kibana
links:
- "elasticsearch:elasticsearch"
ports:
- "5601:5601"
restart_policy: always
- name: Create heka configuration directory
file: path=/etc/heka state=directory
- name: Copying heka config files
copy: src=files/heka/ dest=/etc/heka/{{ item }}
with_items: "{{ xps }}"
- name: Generating heka specific configuration
template: src=templates/heka-{{ item[0] }}.toml.j2 dest=/etc/heka/{{ item[1] }}/heka-{{ item[0] }}.toml
with_nested:
- ["elasticsearch", "openstack", "keystone", "mariadb", "rabbitmq"]
- "{{ xps }}"
- name: Add kolla/heka
docker:
command: kolla_start
detach: yes
image: kolla/centos-binary-heka:2.0.2
name: "heka-{{ item }}"
links:
- "elasticsearch:elasticsearch"
restart_policy: always
volumes:
- "/etc/heka/{{ item }}:/var/lib/kolla/config_files"
- "/etc/localtime:/etc/localtime:ro"
- "/results/{{ item }}/tmp/kolla-logs:/var/log/kolla"
- "heka-{{ item }}:/var/cache/hekad"
- "heka_socket-{{ item }}:/var/lib/kolla/heka"
# patch to support custom types
- "/etc/heka/{{ item }}/lua_decoders/os_openstack_log.lua:/usr/share/heka/lua_decoders/os_openstack_log.lua"
- "/etc/heka/{{ item }}/lua_decoders/os_keystone_apache_log.lua:/usr/share/heka/lua_decoders/os_keystone_apache_log.lua"
- "/etc/heka/{{ item }}/lua_decoders/os_mysql_log.lua:/usr/share/heka/lua_decoders/os_mysql_log.lua"
- "/etc/heka/{{ item }}/lua_decoders/os_rabbitmq_log.lua:/usr/share/heka/lua_decoders/os_rabbitmq_log.lua"
env:
SKIP_LOG_SETUP: true
KOLLA_CONFIG_STRATEGY: COPY_ALWAYS
with_items: "{{ xps }}"