diff --git a/CNAME b/CNAME new file mode 100644 index 0000000..e847d8f --- /dev/null +++ b/CNAME @@ -0,0 +1 @@ +sgoettschkes.me diff --git a/README.md b/README.md new file mode 100644 index 0000000..e553f82 --- /dev/null +++ b/README.md @@ -0,0 +1,24 @@ +Sgoettschkes/sgoettschkes.github.io +=================================== + +sgoettschkes.me goes [still](https://github.com/still-ex/still) + +## Development + +Install the dependencies using `mix deps.get` for Elixir and `npm install --prefix priv/site/assets` for Javascript. + +Run the dev server using `mix still.dev` and see the result at http://localhost:3000/ + +### Update dependencies + +To update mix dependencies and see the ones which can't be updated automatically, run `mix deps.update --all && mix hex.outdated`. + +To update npm dependencies and see the ones which can't be updated automatically, run `npm update --prefix priv/site/assets --all && npm outdated --prefix priv/site/assets` + +## Production + +Generate the html using `mix still.compile`. The final page is published into the `_site` folder. + +### Publishing + +To publish the generate page (see above) to Github Pages, the content from `_site` has to be pushed to the `main` branch. The command `mix publish` will take care of this. It's run within the Github Actions workflow. diff --git a/about.html b/about.html new file mode 100644 index 0000000..de66436 --- /dev/null +++ b/about.html @@ -0,0 +1,409 @@ + + + + + About; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +

Sebastian Göttschkes

+

+ + +

Contact

+ + + +

Projects

+ +
+ +
Sgoettschkes.me
+
+ Personal blog, about and /now page. + +
+ sgoettschkes.me + +
+ +
+ +

Talks

+ + + + + +

Workshops

+ + + +

Publications

+ + + +

Contributions

+ + + +
+ + + + diff --git a/css/app.css b/css/app.css new file mode 100644 index 0000000..8bf1b9b --- /dev/null +++ b/css/app.css @@ -0,0 +1,879 @@ +/* +! tailwindcss v3.4.4 | MIT License | https://tailwindcss.com +*/ + +/* +1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4) +2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116) +*/ + +*, +::before, +::after { + box-sizing: border-box; + /* 1 */ + border-width: 0; + /* 2 */ + border-style: solid; + /* 2 */ + border-color: #e5e7eb; + /* 2 */ +} + +::before, +::after { + --tw-content: ''; +} + +/* +1. Use a consistent sensible line-height in all browsers. +2. Prevent adjustments of font size after orientation changes in iOS. +3. Use a more readable tab size. +4. Use the user's configured `sans` font-family by default. +5. Use the user's configured `sans` font-feature-settings by default. +6. Use the user's configured `sans` font-variation-settings by default. +7. Disable tap highlights on iOS +*/ + +html, +:host { + line-height: 1.5; + /* 1 */ + -webkit-text-size-adjust: 100%; + /* 2 */ + -moz-tab-size: 4; + /* 3 */ + -o-tab-size: 4; + tab-size: 4; + /* 3 */ + font-family: Open Sans, sans-serif; + /* 4 */ + font-feature-settings: normal; + /* 5 */ + font-variation-settings: normal; + /* 6 */ + -webkit-tap-highlight-color: transparent; + /* 7 */ +} + +/* +1. Remove the margin in all browsers. +2. Inherit line-height from `html` so users can set them as a class directly on the `html` element. +*/ + +body { + margin: 0; + /* 1 */ + line-height: inherit; + /* 2 */ +} + +/* +1. Add the correct height in Firefox. +2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655) +3. Ensure horizontal rules are visible by default. +*/ + +hr { + height: 0; + /* 1 */ + color: inherit; + /* 2 */ + border-top-width: 1px; + /* 3 */ +} + +/* +Add the correct text decoration in Chrome, Edge, and Safari. +*/ + +abbr:where([title]) { + -webkit-text-decoration: underline dotted; + text-decoration: underline dotted; +} + +/* +Remove the default font size and weight for headings. +*/ + +h1, +h2, +h3, +h4, +h5, +h6 { + font-size: inherit; + font-weight: inherit; +} + +/* +Reset links to optimize for opt-in styling instead of opt-out. +*/ + +a { + color: inherit; + text-decoration: inherit; +} + +/* +Add the correct font weight in Edge and Safari. +*/ + +b, +strong { + font-weight: bolder; +} + +/* +1. Use the user's configured `mono` font-family by default. +2. Use the user's configured `mono` font-feature-settings by default. +3. Use the user's configured `mono` font-variation-settings by default. +4. Correct the odd `em` font sizing in all browsers. +*/ + +code, +kbd, +samp, +pre { + font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + /* 1 */ + font-feature-settings: normal; + /* 2 */ + font-variation-settings: normal; + /* 3 */ + font-size: 1em; + /* 4 */ +} + +/* +Add the correct font size in all browsers. +*/ + +small { + font-size: 80%; +} + +/* +Prevent `sub` and `sup` elements from affecting the line height in all browsers. +*/ + +sub, +sup { + font-size: 75%; + line-height: 0; + position: relative; + vertical-align: baseline; +} + +sub { + bottom: -0.25em; +} + +sup { + top: -0.5em; +} + +/* +1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297) +2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016) +3. Remove gaps between table borders by default. +*/ + +table { + text-indent: 0; + /* 1 */ + border-color: inherit; + /* 2 */ + border-collapse: collapse; + /* 3 */ +} + +/* +1. Change the font styles in all browsers. +2. Remove the margin in Firefox and Safari. +3. Remove default padding in all browsers. +*/ + +button, +input, +optgroup, +select, +textarea { + font-family: inherit; + /* 1 */ + font-feature-settings: inherit; + /* 1 */ + font-variation-settings: inherit; + /* 1 */ + font-size: 100%; + /* 1 */ + font-weight: inherit; + /* 1 */ + line-height: inherit; + /* 1 */ + letter-spacing: inherit; + /* 1 */ + color: inherit; + /* 1 */ + margin: 0; + /* 2 */ + padding: 0; + /* 3 */ +} + +/* +Remove the inheritance of text transform in Edge and Firefox. +*/ + +button, +select { + text-transform: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Remove default button styles. +*/ + +button, +input:where([type='button']), +input:where([type='reset']), +input:where([type='submit']) { + -webkit-appearance: button; + /* 1 */ + background-color: transparent; + /* 2 */ + background-image: none; + /* 2 */ +} + +/* +Use the modern Firefox focus style for all focusable elements. +*/ + +:-moz-focusring { + outline: auto; +} + +/* +Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737) +*/ + +:-moz-ui-invalid { + box-shadow: none; +} + +/* +Add the correct vertical alignment in Chrome and Firefox. +*/ + +progress { + vertical-align: baseline; +} + +/* +Correct the cursor style of increment and decrement buttons in Safari. +*/ + +::-webkit-inner-spin-button, +::-webkit-outer-spin-button { + height: auto; +} + +/* +1. Correct the odd appearance in Chrome and Safari. +2. Correct the outline style in Safari. +*/ + +[type='search'] { + -webkit-appearance: textfield; + /* 1 */ + outline-offset: -2px; + /* 2 */ +} + +/* +Remove the inner padding in Chrome and Safari on macOS. +*/ + +::-webkit-search-decoration { + -webkit-appearance: none; +} + +/* +1. Correct the inability to style clickable types in iOS and Safari. +2. Change font properties to `inherit` in Safari. +*/ + +::-webkit-file-upload-button { + -webkit-appearance: button; + /* 1 */ + font: inherit; + /* 2 */ +} + +/* +Add the correct display in Chrome and Safari. +*/ + +summary { + display: list-item; +} + +/* +Removes the default spacing and border for appropriate elements. +*/ + +blockquote, +dl, +dd, +h1, +h2, +h3, +h4, +h5, +h6, +hr, +figure, +p, +pre { + margin: 0; +} + +fieldset { + margin: 0; + padding: 0; +} + +legend { + padding: 0; +} + +ol, +ul, +menu { + list-style: none; + margin: 0; + padding: 0; +} + +/* +Reset default styling for dialogs. +*/ + +dialog { + padding: 0; +} + +/* +Prevent resizing textareas horizontally by default. +*/ + +textarea { + resize: vertical; +} + +/* +1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300) +2. Set the default placeholder color to the user's configured gray 400 color. +*/ + +input::-moz-placeholder, textarea::-moz-placeholder { + opacity: 1; + /* 1 */ + color: #9ca3af; + /* 2 */ +} + +input::placeholder, +textarea::placeholder { + opacity: 1; + /* 1 */ + color: #9ca3af; + /* 2 */ +} + +/* +Set the default cursor for buttons. +*/ + +button, +[role="button"] { + cursor: pointer; +} + +/* +Make sure disabled buttons don't get the pointer cursor. +*/ + +:disabled { + cursor: default; +} + +/* +1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14) +2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210) + This can trigger a poorly considered lint error in some tools but is included by design. +*/ + +img, +svg, +video, +canvas, +audio, +iframe, +embed, +object { + display: block; + /* 1 */ + vertical-align: middle; + /* 2 */ +} + +/* +Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14) +*/ + +img, +video { + max-width: 100%; + height: auto; +} + +/* Make elements with the HTML hidden attribute stay hidden by default */ + +[hidden] { + display: none; +} + +main a { + --tw-text-opacity: 1; + color: rgb(0 140 186 / var(--tw-text-opacity)); +} + +article h2 { + margin-top: 0.25rem; + margin-bottom: 0.5rem; + font-family: Lora, serif; + font-size: 1.5rem; + line-height: 2rem; + font-weight: 700; +} + +article p { + margin-bottom: 1.25rem; + font-family: Open Sans, sans-serif; +} + +pre { + margin-top: 1.25rem; + margin-bottom: 1.25rem; + --tw-bg-opacity: 1; + background-color: rgb(248 250 252 / var(--tw-bg-opacity)); + padding: 0.25rem; +} + +code { + --tw-text-opacity: 1; + color: rgb(185 28 28 / var(--tw-text-opacity)); +} + +*, ::before, ::after { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-pan-x: ; + --tw-pan-y: ; + --tw-pinch-zoom: ; + --tw-scroll-snap-strictness: proximity; + --tw-gradient-from-position: ; + --tw-gradient-via-position: ; + --tw-gradient-to-position: ; + --tw-ordinal: ; + --tw-slashed-zero: ; + --tw-numeric-figure: ; + --tw-numeric-spacing: ; + --tw-numeric-fraction: ; + --tw-ring-inset: ; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(59 130 246 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; + --tw-blur: ; + --tw-brightness: ; + --tw-contrast: ; + --tw-grayscale: ; + --tw-hue-rotate: ; + --tw-invert: ; + --tw-saturate: ; + --tw-sepia: ; + --tw-drop-shadow: ; + --tw-backdrop-blur: ; + --tw-backdrop-brightness: ; + --tw-backdrop-contrast: ; + --tw-backdrop-grayscale: ; + --tw-backdrop-hue-rotate: ; + --tw-backdrop-invert: ; + --tw-backdrop-opacity: ; + --tw-backdrop-saturate: ; + --tw-backdrop-sepia: ; + --tw-contain-size: ; + --tw-contain-layout: ; + --tw-contain-paint: ; + --tw-contain-style: ; +} + +::backdrop { + --tw-border-spacing-x: 0; + --tw-border-spacing-y: 0; + --tw-translate-x: 0; + --tw-translate-y: 0; + --tw-rotate: 0; + --tw-skew-x: 0; + --tw-skew-y: 0; + --tw-scale-x: 1; + --tw-scale-y: 1; + --tw-pan-x: ; + --tw-pan-y: ; + --tw-pinch-zoom: ; + --tw-scroll-snap-strictness: proximity; + --tw-gradient-from-position: ; + --tw-gradient-via-position: ; + --tw-gradient-to-position: ; + --tw-ordinal: ; + --tw-slashed-zero: ; + --tw-numeric-figure: ; + --tw-numeric-spacing: ; + --tw-numeric-fraction: ; + --tw-ring-inset: ; + --tw-ring-offset-width: 0px; + --tw-ring-offset-color: #fff; + --tw-ring-color: rgb(59 130 246 / 0.5); + --tw-ring-offset-shadow: 0 0 #0000; + --tw-ring-shadow: 0 0 #0000; + --tw-shadow: 0 0 #0000; + --tw-shadow-colored: 0 0 #0000; + --tw-blur: ; + --tw-brightness: ; + --tw-contrast: ; + --tw-grayscale: ; + --tw-hue-rotate: ; + --tw-invert: ; + --tw-saturate: ; + --tw-sepia: ; + --tw-drop-shadow: ; + --tw-backdrop-blur: ; + --tw-backdrop-brightness: ; + --tw-backdrop-contrast: ; + --tw-backdrop-grayscale: ; + --tw-backdrop-hue-rotate: ; + --tw-backdrop-invert: ; + --tw-backdrop-opacity: ; + --tw-backdrop-saturate: ; + --tw-backdrop-sepia: ; + --tw-contain-size: ; + --tw-contain-layout: ; + --tw-contain-paint: ; + --tw-contain-style: ; +} + +.m-0 { + margin: 0px; +} + +.mx-1 { + margin-left: 0.25rem; + margin-right: 0.25rem; +} + +.mx-auto { + margin-left: auto; + margin-right: auto; +} + +.my-10 { + margin-top: 2.5rem; + margin-bottom: 2.5rem; +} + +.mb-2 { + margin-bottom: 0.5rem; +} + +.mb-4 { + margin-bottom: 1rem; +} + +.mb-5 { + margin-bottom: 1.25rem; +} + +.mb-8 { + margin-bottom: 2rem; +} + +.mb-auto { + margin-bottom: auto; +} + +.mt-2 { + margin-top: 0.5rem; +} + +.mt-5 { + margin-top: 1.25rem; +} + +.inline-block { + display: inline-block; +} + +.inline { + display: inline; +} + +.flex { + display: flex; +} + +.grid { + display: grid; +} + +.h-2\/6 { + height: 33.333333%; +} + +.h-fit { + height: -moz-fit-content; + height: fit-content; +} + +.h-full { + height: 100%; +} + +.max-h-\[33\%\] { + max-height: 33%; +} + +.min-h-\[33\%\] { + min-height: 33%; +} + +.w-3\/5 { + width: 60%; +} + +.w-fit { + width: -moz-fit-content; + width: fit-content; +} + +.w-full { + width: 100%; +} + +.flex-none { + flex: none; +} + +.flex-grow { + flex-grow: 1; +} + +.list-inside { + list-style-position: inside; +} + +.list-disc { + list-style-type: disc; +} + +.grid-cols-1 { + grid-template-columns: repeat(1, minmax(0, 1fr)); +} + +.flex-row { + flex-direction: row; +} + +.flex-col { + flex-direction: column; +} + +.items-center { + align-items: center; +} + +.justify-items-center { + justify-items: center; +} + +.border { + border-width: 1px; +} + +.border-b-2 { + border-bottom-width: 2px; +} + +.border-t { + border-top-width: 1px; +} + +.border-slate-100 { + --tw-border-opacity: 1; + border-color: rgb(241 245 249 / var(--tw-border-opacity)); +} + +.border-slate-200 { + --tw-border-opacity: 1; + border-color: rgb(226 232 240 / var(--tw-border-opacity)); +} + +.border-slate-500 { + --tw-border-opacity: 1; + border-color: rgb(100 116 139 / var(--tw-border-opacity)); +} + +.bg-white { + --tw-bg-opacity: 1; + background-color: rgb(255 255 255 / var(--tw-bg-opacity)); +} + +.bg-gradient-to-b { + background-image: linear-gradient(to bottom, var(--tw-gradient-stops)); +} + +.from-slate-200 { + --tw-gradient-from: #e2e8f0 var(--tw-gradient-from-position); + --tw-gradient-to: rgb(226 232 240 / 0) var(--tw-gradient-to-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); +} + +.from-slate-600 { + --tw-gradient-from: #475569 var(--tw-gradient-from-position); + --tw-gradient-to: rgb(71 85 105 / 0) var(--tw-gradient-to-position); + --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to); +} + +.to-slate-300 { + --tw-gradient-to: #cbd5e1 var(--tw-gradient-to-position); +} + +.to-slate-800 { + --tw-gradient-to: #1e293b var(--tw-gradient-to-position); +} + +.p-3 { + padding: 0.75rem; +} + +.px-3 { + padding-left: 0.75rem; + padding-right: 0.75rem; +} + +.px-4 { + padding-left: 1rem; + padding-right: 1rem; +} + +.pb-4 { + padding-bottom: 1rem; +} + +.pr-0 { + padding-right: 0px; +} + +.pr-12 { + padding-right: 3rem; +} + +.pt-2 { + padding-top: 0.5rem; +} + +.pt-4 { + padding-top: 1rem; +} + +.text-center { + text-align: center; +} + +.font-sans { + font-family: Open Sans, sans-serif; +} + +.font-serif { + font-family: Lora, serif; +} + +.text-4xl { + font-size: 2.25rem; + line-height: 2.5rem; +} + +.text-lg { + font-size: 1.125rem; + line-height: 1.75rem; +} + +.text-sm { + font-size: 0.875rem; + line-height: 1.25rem; +} + +.text-xl { + font-size: 1.25rem; + line-height: 1.75rem; +} + +.text-xs { + font-size: 0.75rem; + line-height: 1rem; +} + +.font-bold { + font-weight: 700; +} + +.capitalize { + text-transform: capitalize; +} + +.leading-5 { + line-height: 1.25rem; +} + +.text-black { + --tw-text-opacity: 1; + color: rgb(0 0 0 / var(--tw-text-opacity)); +} + +.text-blue { + --tw-text-opacity: 1; + color: rgb(0 140 186 / var(--tw-text-opacity)); +} + +.text-gray-400 { + --tw-text-opacity: 1; + color: rgb(156 163 175 / var(--tw-text-opacity)); +} + +.text-white { + --tw-text-opacity: 1; + color: rgb(255 255 255 / var(--tw-text-opacity)); +} diff --git a/feed.xml b/feed.xml new file mode 100644 index 0000000..af93c2d --- /dev/null +++ b/feed.xml @@ -0,0 +1,224 @@ + + + + <![CDATA[ Sgoettschkes.me ]]> + + https://sgoettschkes.me/ + Still + Mon, 01 Jul 2024 13:00:43 +0000 + + + 60 + + + <![CDATA[ Building OneSen in public ]]> + A unique visual representation of the 'OneSen' note-taking app

It’s been a while since I posted on my blog. Almost two years, to be precise. I was focusing my energy on other parts of my life, and, to be honest, I never got back into publishing regular blog posts after falling off the wagon in 2017. But now I am back, and I intend to publish more often.

Building OneSen

Last week, I started a new side project. It’s called OneSen, and currently, it’s a text field you can use to store your daily notes. In the future, I envision it to be a note-taking app focusing on daily notes (much like micro-blogging) and simplicity. I also have ideas to add the capabilities of LLMs to enrich the experience and get insights into your notes. The alpha version, without a design, text, or explanations, can be found at https://onesen.app. But it works 😉

Build in public

My current plan is to build this app in public. I like the idea of (radical) transparency, which is present when being open about everything happening. I like to teach others what works for me and what doesn’t. I like to inspire others to build something on their own, make their work more public, or share more. We can all benefit from the experiences others have already had.

The idea

To be transparent, I have to give credit where credit is due. The idea for OneSen came from Dainius, who mentioned that he’d like a straightforward interface to take notes and have them stored daily, much like a physical notebook with its pages dedicated to a specific date.

I let the idea sit, and because I thought about it more over time, I decided to act on it. I kept the notebook analogy since the concept closely relates to physical notebooks. Currently, each notebook contains pages, one page for each day. A user can only change the current day, and the system saves changes automatically when the text changes. It’s as easy as possible. Go to your notebook, write something, and leave.

The MVP

My current plan for the MVP is to add a design, make it possible to view past dates and rename notebooks. Those changes should be enough to make it usable and see if people want something like this.

The long term vision

OneSen could go in many directions. One idea I had was the “One second a day” video app for writing. Write something daily and combine it per week, month, or year.

Another idea was to add LLM capabilities, analyze notes, and gather insights using AI. This feature could be interesting for people writing morning pages or journals.

I am also thinking about encrypting notes on the client, adding security that almost no other note-taking app has. The drawback would be that working with notes would need to happen on the client since even searching would not be possible with encrypted notes.

]]>
+ Sun, 01 Jan 2023 16:45:00 +0000 + https://sgoettschkes.me/p/building-onesen-in-public.html + https://sgoettschkes.me/p/building-onesen-in-public.html + + +
+ + + <![CDATA[ Phoenix UI testing with Cypress, Part 1 ]]> + I still remember the days I tried to achieve UI testing with Selenium, PhantomJS and various other tools. It was a hassle. It didn’t run on CI because it needed some kind of window manager. It was unstable.

Introducing Cypress

Cypress has solved these issues, hiding the complexity of UI testing and leaving you with the task of writing the tests. It comes with it’s own UI where you can run tests and see what the test does in realtime. Cypress can do screenshots, videos of the tests and more.

In this tutorial, we’ll mostly use cypress run. This command acts like mix test does: Running your tests and displaying the result within your terminal window. I encourage you to check out the other features of Cypress on your own.

Cypress Setup

I assume you have an existing Elixir/Phoenix project you want to start using Cypress with. Your frontend files are located at assets/ while your elixir tests are stored within test/.

To install Cypress, we use npm: cd assets/ && npm install cypress --save-dev. Cypress installs itself and is afterwards available as a command line tool at assets/node_modules/cypress/bin/cypress.

Cypress by default expects all test files and support files to be located at cypress/. I’d argue a much better place for these files is in test/cypress, so this is where we’re going to place them. If you like your Cypress tests to live someplace else, you’ll find this guide helpful to figure out which adjustments you need to make.

Config and support files

Let’s start with the config file cypress.json in your root directory:

{
+  "componentFolder": false,
+  "downloadsFolder": "tmp/cypress/downloads",
+  "fixturesFolder": "test/cypress/fixtures",
+  "integrationFolder": "test/cypress/integration",
+  "pluginsFile": false,
+  "screenshotOnRunFailure": false,
+  "screenshotsFolder": "tmp/cypress/screenshots",
+  "supportFile": false,
+  "testFiles": "**/*.*",
+  "video": false,
+  "videosFolder": "tmp/cypress/videos"
+}

As you can see, we overwrite all folders, either pointing to test/cypress or tmp/cypress (for files to be ignored). We also don’t use support files or plugins and deactivate screenshots and videos.

The first test

Now it’s time to write the first test, a simple request to our homepage. Tests for Cypress are placed in the integrations folder which means creating the file test/cypress/integration/index_spec.js:

describe('Homepage', () => {
+  it('Visit homepage without interaction', () => {
+    cy.visit('http://localhost:4000/')
+  })
+})

You can run this test using the command ./assets/node_modules/cypress/bin/cypress run but it will fail if your Phoenix server does not run. Try it again after starting the server with mix phx.serer in another terminal window.

The All-in-one shell file

We want to run the tests with one command, both locally and on a CI server. I used the shell script suggested by https://www.alanvardy.com/post/phoenix-cypress-tests and modified them a bit. Create a file cypress-run.sh, make it executable (chmod +x cypress-run.sh) and put the following code into it:

 #!/bin/sh
+
+MIX_ENV=cypress mix ecto.reset
+echo "===STARTING PHX SERVER==="
+echo "===IF STARTING CYPRESS FAILS==="
+echo "===RUN npm install cypress --save-dev ==="
+echo "===IN THE assets/ FOLDER==="
+MIX_ENV=cypress mix phx.server &
+pid=$! # Store server pid
+echo "===WAITING FOR PHX SERVER==="
+until $(curl --output /dev/null --silent --head --fail http://localhost:4002); do
+    printf '.'
+    sleep 5
+done
+echo ""
+echo "===PHX SERVER RUNNING==="
+echo "===STARTING CYPRESS==="
+./assets/node_modules/.bin/cypress run
+result=$?
+kill -9 $pid # kill server
+echo "===KILLING PHX SERVER==="
+exit $result

As you might have noticed, the MIX_ENV is set to cypress. To create this env, we need the new configuration file config/cypress.exs:

use Mix.Config
+
+# Configure your database
+config :phonix, Phonix.Repo,
+  username: "postgres",
+  password: "postgres",
+  database: "phonix_cypress",
+  hostname: "localhost",
+  pool_size: 10
+
+config :phonix, PhonixWeb.Endpoint,
+  http: [port: 4002],
+  server: true
+
+# Print only warnings and errors during test
+config :logger, level: :warn

This approach is also copied from https://www.alanvardy.com/post/phoenix-cypress-tests. It’s a great idea to separate the test environment from the ui test environment. As Alan suggests, you could use a tool like ex_check which can run your normal tests and your ui tests in parallel, which is only possible if you use different databases and thus different environments.

We are using a different port in the cypress env (4002), so make sure to adjust your tests accordingly.

Now you can run your UI tests by executing ./cypress-run.sh. This script should run on your CI environment as well as locally. Just make sure to run npm install in your CI run!

What else?

I intend to write a second part, figuring out how to use fixtures or reset the database between tests. I saw a few blog posts on how to do this, utilizing sockets in phoenix to take commands. I don’t really like the approach and might come up with a way to work with the database directly. We’ll see!

]]>
+ Wed, 03 Feb 2021 17:30:00 +0000 + https://sgoettschkes.me/p/phoenix-testing-with-cypress.html + https://sgoettschkes.me/p/phoenix-testing-with-cypress.html + + +
+ + + <![CDATA[ Quickstart guide for clojure (compojure), clojurescript (figwheel), garden ]]> + Setting up new projects is always exciting, but if you have done it a few times, it’s getting old quick. I have set up a few projects in the last time and I believe I have a nice setup going which I’m about to share with everybody interested. There is nothing new in here and if you are a seasoned Clojure developer, you might not learn much. If you are just starting out or have some work done in Clojure but need a working setup or some input on your current setup, you are at the right place.

All my projects live insight Vagrant virtual machines. I’ll be using Clojure with Compojure and Ring, Clojurescript with figwheel and garden. I’m also throwing in the cooper library.

Clojure

All my web Clojure projects include compojure and the lein-ring plugin. Both are mature and work very well, so I didn’t look any further. My project.clj at this point looks like:

(defproject mycompany/myproject "0.1.0"
+  :dependencies [[compojure "1.6.0"]
+                 [org.clojure/clojure "1.8.0"]]
+  :main myproject.core
+  :min-lein-version "2.0.0"
+  :plugins [[lein-ring "0.12.1"]]
+  :ring {:auto-reload? true
+         :handler myproject.core/app
+         :open-browser? false
+         :reload-paths ["src/" "resources/"]}
+  :source-paths ["src/clj"])

The ring setup is important as it allows for auto-recompiling when run through lein ring server. open-browser is there because I run the project inside a vm, so there is no browser to open and instead of remembering to run lein ring server-headless every time, I disable it altogether.

ClojureScript

Adding Clojurescript is just a dependency away, especially if you don’t start with figwheel and other libraries but keep it plain and simple:

(defproject mycompany/myproject "0.1.0"
+  :cljsbuild {:builds [{:source-paths ["src/cljs"]
+                        :compiler {:optimizations :whitespace
+                                   :output-to "resources/public/js/main.js"
+                                   :output-dir "resources/public/js"}}]}
+  :dependencies [[compojure "1.6.0"]
+                 [org.clojure/clojure "1.8.0"]
+                 [org.clojure/clojurescript "1.9.946"]
+                 [ring/ring-core "1.6.2"]
+                 [selmer "1.11.2"]]
+  :main myproject.core
+  :min-lein-version "2.0.0"
+  :plugins [[lein-cljsbuild "1.1.7"]
+            [lein-ring "0.12.1"]]
+  :resource-paths ["resources"]
+  :ring {:auto-reload? true
+         :handler myproject.core/app
+         :open-browser? false
+         :reload-paths ["src/" "resources/"]}
+  :source-paths ["src/clj"])

lein-cljsbuild helps transpiling ClojureScript to Javascript by running lein cljsbuild once or lein cljsbuild auto.

Garden

I usually try to stick with the choices popular within the ecosystem. I have used Sass and Less in the past and was fine with both. Garden is another CSS precompiler, but you write your CC as Clojure data structures, making it easy to integrate it within the ecosystem. There is nothing wrong with using another precompiler for CSS or write plain CSS if the project calls for it.

For garden, you only need the dependency [garden "1.3.3"] as well as the plugin [lein-garden "0.3.0"]. After that, adding the garden config to your project.clj works like this:

  :garden {:builds [{:id "screen"
+                     :source-paths ["src/garden"]
+                     :stylesheet myproject.core/main
+                     :compiler {:output-to "resources/public/css/main.css"
+                                :pretty-print? false}}]}

As you can see, the garden source code goes into src/garden. Within src, there is also clj and cljs to split up the different parts (backend, frontend, CSS).

Figwheel

We glanced over figwheel when setting up ClojureScript. If you would stop right now and would start working on your project, you’d need to wait for the ClojureScript compiler to generate the js on every change, than reload the website, navigate where you left of and look at your changed.

With figwheel, only the part of your ClojureScript app that changed get recompiled and these parts get pushed to the browser directly which in turns exchanges the code parts so you see the changes directly.

With all this going on, figwheel was the first hurdle for me. Adding it was straightforward by adding [lein-figwheel "0.5.14"] to the plugin section of the project.clj. After the, the cljsbuild config needed to be changed:

  :cljsbuild {:builds [{:compiler {:asset-path "js/out"
+                                   :main "myproject.core"
+                                   :optimizations :none
+                                   :output-to "resources/public/js/main.js"
+                                   :output-dir "resources/public/js/out"}
+                        :figwheel {:websocket-host "myproject.local"}
+                        :id "dev"
+                        :source-paths ["src/cljs"]}]}

The websocket-host was needed because of the vm. I run the project through a hostname and not by mapping localhost ports. The second thing needed was the figwheel config itself:

  :figwheel {:css-dirs ["resources/public/css"]
+             :hawk-options {:watcher :polling}
+             :ring-handler myproject.core/app
+             :server-port 3000}

css-dirs is important to have hot code reloading for CSS as well. The hawk-options is needed because of the vm (again), as figwheel does not detect code changes (due to the way Vagrant mounts folders). By adding the ring-handler, the ring server is run when running figwheel, making it easier than running both processes in parallel.

Bonus points: Cooper

The next thing for me was to not have the need to run both lein figwheel and lein garden auto in two different shells. Luckily, there is cooper, which can be used to run many tasks in parallel. Add the lein plugin and a small config, and you are good to go:

  :cooper {"figwheel"  ["lein" "figwheel"]
+           "garden" ["lein" "garden" "auto"]}

After that, lein cooper figwheel garden got you covered.

Follow along

If you want to see this changes in full (without cooper right now), go over to https://github.com/Sgoettschkes/web-clj and step through the commits. You can see every code change in detail much better than I would ever be able to outline in this blog.

I’ll also be adding more (and writing a second blog post) on testing Clojure and ClojureScript as well as adding some example code to the repo. Stay tuned!

I’m also looking for feedback towards my setup, both in terms if libraries I used and the ways I plugged them together. You can either comment here, add an issue to the github repo or find me on social media as well as send me an email.

]]>
+ Fri, 03 Nov 2017 14:00:00 +0000 + https://sgoettschkes.me/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html + https://sgoettschkes.me/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html + + +
+ + + <![CDATA[ My personal state of Clojure ]]> + Almost a year ago I wrote my last blog post on this blog. It wasn’t planned, it just happened. I was busy doing other, unrelated things and while I sometimes had something to say, I never took the time to take it to my blog. As with many things which are not top priority, it got lost along the way.

As a comeback, I figured a personal look into Clojure might be interesting for some. Let’s go!

One year with Clojure

We started using Clojure for a new project (green-field, as some might say) at Blossom. We are building the project alongside running Blossom and other projects, which means progress is naturally slow. We also decided to look into Datomic as well, harming progress even further. But while we removed Datomic from our stack again, we keep Clojure and Clojurescript. Getting used to Clojure and Clojurescript took some times as well but now I feel good about the way I write code and I grew to like it a lot. I still got much to learn, but that’s ok as I like learning new things. Beside work, I used Clojure for 3 side projects (a Chatbot, a article-based website and a map-based web app).

Clojure as a language

I have worked with many languages over the years. Clojure feels more mature than many other “small” languages, both the language itself as well as the ecosystem. Leiningen is a great tool for managing dependencies and to buid your project. Built on top of the JVM means that running the application in production is possible everywhere where you’d be able to run Java applications.

After some time to get used to the Clojure way, I am very fast implementing features in Clojure. Having the codebase split up into many small functions which are pure in most cases makes it easy to plug them together in new ways, remove a certain step or add another one. Testing is easy as well, given that Clojure works with data most of the time.

And, before the argument comes up: Sure, just having (pure) functions doesn’t protect you from a messy code base. You can have unreadable, untraceable code in almost any language. But my personal feeling is that by sticking to small functions and trying to keep them pure goes a long way towards good software.

The Clojars repository contains many packages and I haven’t had any problems finding the libraries I needed throughout the year. Having the possibility to add Maven dependencies or Jar files if no Clojure library is available adds to the ecosystem as well. Using Java libraries isn’t as smooth as the ones written in Clojure, but that’s mostly due to the underlying differences in thinking than a shortcoming of Clojure. And even though it sometimes feels weird, it works quiet well.

The community

Clojure has a small, nice community. You can feel the excitment about the language at our local Usergroup as well as when talking about Clojure at conferences and with fellow developers. Many times I had people jumping in to help me with my problems and being nice. I also don’t feel like I am looked down on or anything if I don’t understand a certain concept. In languages like Haskell I always felt a little stupid for not knowing how to solve a certain problem “the functional way”. I didn’t have this feeling in Clojure.

As with any other small language, having a small community has drawbacks. Sometimes, you come across a problem nobody ever had. This is rare in the big languages as almost everything has been done. Sometimes, you are looking around and while it seems others have solved the problem, they didn’t openly discuss it. Again, it happens because of the smaller amount of people around.

Additionally, the opinion of a few does make a lot more noise in a small community. It did show recently as a negative blog post hit Hackernews and reddit and the community started discussing. I don’t think a post like this would hit the Javascript or Python community as hard. With this in mind, it becomes even more crucial to try to discuss in a civil manner than to attack. People are spending their free time so we all can benefit. Even if you don’t like or don’t agree, showing some decency and respect for the work goes a long way.

Getting payed

Beside Blossom, I do some freelancing and while I’d love to use Clojure there as well, I wasn’t able to find any opportunities yet. The market for Clojure developers seems small. I was surprised to see some Clojure job posts on Upwork and have applied there, without much success yet.

In Vienna, there are some companies using Clojure for their products. I am excited for their success stories and the inspiration for other companies to make the bold move and invest in a rather small language. I feel that with the Java interoperability the risks of such a move are minimized.

Should you drop everything else and learn Clojure? I don’t think so. Make sure to experiment with Clojure. If you like it and if you feel you could be more productive, try to incorporate it into existing projects or new ones. But don’t just try Clojure. See if Erlang/Elixir is for you, if you can get more done with another “big” language or if you are still happy with your language of choice.

I don’t see Clojure as a game changer, just as a nicer, cleaner way to express intend. But that’s a lot of what a language should foster, right?

]]>
+ Mon, 23 Oct 2017 10:00:00 +0000 + https://sgoettschkes.me/p/my-personal-state-of-clojure.html + https://sgoettschkes.me/p/my-personal-state-of-clojure.html + + +
+ + + <![CDATA[ (clojure 2) Getting used to ]]> + After “The setup“ and “The beginning“, my time with Clojure was somewhat limited due to pre-christmas stress. But this gave me time to reflect on what I learned so far, what I liked and didn’t like. I also attended the December Meetup by (clojure ‘vienna) and pair programmed Clojure for a few hours.

The ecosystem

I already said I liked the way Leiningen worked. Nothing has changed here. Leiningen feels stable and works great. I didn’t come across any issues using it, which is a great way for a language to work.

I didn’t find an editor/IDE I liked yet. I started with vim because it is what I know and universal enough. I also set up Atom and use it but don’t really like it. My biggest problem right now is that Clojure and Leiningen are installed inside a virtual machine, making many tools unusable. I’d love to be able to evaluate statements from my editor directly by running them inside the vm but it turns out that’s a bit of a problem. There are tools to do that, but the setup needed is just to much.

My current workflow is writing code in Atom, running it sshd into the vm and also having an additional ssh session running with the Leiningen repl started to test out different things.

The workflow

Programming in a functional lanuage is just different then using a procedural language. It’s more about passing data around and rearrange it instead of maintaining a global state. It’s true that it’s much easier to reason about a small part of the software as the input and output is defined and can be tested.

When pairing on Day 9 of Advent of Code, my coding partner who is also new to Clojure mentioned that he can’t see himself working in the language due to this being so different than what he’s used to from other languages (mainly Java). While I understand what he means I honestly think that it’s not better or worse but different. It takes time to get used to it.

One thing I noticed was that testing was very easy because you can always split up big function into smaller ones, making them easily testable. It was also easy to reason about the different parts. The only hard thing was to put together the plan on how to get started and which path to take. But this discussion was useful and it was good to have it in the beginning instead of moving along and “just doing things”.

The current status

The last weeks have been busy, so I didn’t dive deeper into Clojure. My next step is taking a book and working through it while on christmas holiday. After using Clojure in practice and learning a ton I feel like a bit more theoretical understanding would be good at this point.

I’d also like to write a small app in Clojure to see what this feels like. Advent of Code, Project Euler and others are great but they don’t mimic the real world. I realized with Advent of Code that I spent most of the time figuring out the algorithm, which does not help me getting any further with Clojure.

The meta

Beside working with Clojure, I also watched a few talks by Rich Hickey. I like his ideas and got the feeling his one of those people not buying into hype and “best practice” but asking hard questions and thinking for himself. I might not agree with everything he says and don’t agree with every decision in Clojure, but overall he seems very reasonable. That’s more than enough for me to get behind Clojure.

The next update will most likely on the book I chose to work through and what I learned. And it might only get here after my holiday, so enjoy the holidays, relax and stay tuned :)

]]>
+ Wed, 21 Dec 2016 10:00:00 +0000 + https://sgoettschkes.me/p/clojure-2-getting-used-to.html + https://sgoettschkes.me/p/clojure-2-getting-used-to.html + + +
+ + + <![CDATA[ (clojure 1) The beginning ]]> + After “The setup“, I dove right into Clojure. As said before, I want to get to know Clojure by working on Advent of Code (AoC). I realize that coding puzzles and dojos and katas are not real world applications and one might miss certain things like performance or running an application in production. But they are small enough to provide fast feedback and to not hit a big wall. Even if I am not able to solve one, I can always skip that and keep going with another puzzle.

lein run & lein test

Before starting with real code, I created a project for AoC. lein new adventofcode, ran in the /vagrant folder inside my Vagrant vm created a scaffold of a Clojure project the way Leiningen decided it should be. Within this directory, the program can be run by lein run and tested with lein test.

I was again surprised by how easy this was. Everything works out of the box, no changes needed. The test fails, but that’s expected.

And if you want to play with Clojure a bit before writing real code, a quick lein repl brings up a nice repl you can use to get a feeling for the syntax.

Advent of Code, Day 1

Right after that first few minutes, I got into the code. Looking at the first day of AoC, it seemed plausible to see if I could get the input for the puzzle by downloading the file. A quick google search later it seemed fairly easy: Add a dependency to the code, use the library to request a url and get a string back. Adding the dependency is one line in the project.cls, requiring it is one more line and doing the request is again one line. That’s three lines. I was hooked!

Leiningen detects changes in the dependencies and downloads everything automatically once you run lein run or any other Leiningen command. I added all my code to the core.clj file because I didn’t want to figure out how to best distribute code between many files. I usually focus on one thing at a time when learning a new language!

Sadly, my code didn’t work. The reason was that the puzzle input needs login. I quickly decided that this would be to much of a hassle and put the puzzle input into a local txt file. Reading this file, I found the slurp function available in the clojure core which reads a file to a string. Again, very easy. It would be a problem with very big files but for now this was fine.

Solving the first day was a succession of this steps: Google for an isolated problem, find a function or way to solve it, continue.

Error messages

While solving the first day, it became clear to me that Clojure error messages are horrible. Seriously, the are the worst messages I have ever seen. Usually, they are just a Stack Trace from an Exception. Sometimes, they point to a specific line somehwere in there and sometimes it doesn’t even mention your code at all.

It took way longer than needed because I had a weird error I couldn’t solve because I had no idea where to look. I looked at every piece of the code and after I already though the compiler was wrong I found the tiny little error.

A lot fo errors are EOF problems because on of the many closing paranthesis is missing. Again, there is no real hint where this is so you again are on your own counting paranthesis.

Done

It took me maybe 3 hours to get both parts of the puzzle for day 1. Given Clojure is a completly new language for me and I had a few problems along the way, I guess this is an ok time spawn. I had a misunderstanding in the puzzle which took some time as well, so overall I was pretty happy.

I’ll keep my impressions of the language for now and will get into them when I’m a little further down the road!

]]>
+ Thu, 08 Dec 2016 20:00:00 +0000 + https://sgoettschkes.me/p/clojure-1-the-beginning.html + https://sgoettschkes.me/p/clojure-1-the-beginning.html + + +
+ + + <![CDATA[ (clojure 0) The setup ]]> + In the last week I started playing with Clojure. The reason for this are some upcoming projects with the blossom Team. The stack will most likely be Clojure in the backend and ClojureScript in the frontend. So I better up my game. I started with the Advent of Code to have some real world exposure and not just write complicated “Hello, World” code. I’m planning on writing a series of blog posts, documenting my journey.

Vagrant

People who know me knew this was coming. I start every project in a clean vm managed by Vagrant to not clutter my laptop, remove stuff without a trace and not get caught in some weird dependency hell. I also got a vagrant setup for learning new stuff which is usually my go-to project when working on things like “Advent of Code”.

So the first step for me was to add Clojure to the tech stack inside the Vagrant machine. To be honest, I expected some problems. There usually are. The only real easy setup I ever had was Dart, wehre downloading the zip and extracting it is all you really need. With clojure, it was almost as easy. Make sure Java is installed, download the leiningen script, make sure it’s executable and in the path, done.

Clojure and Leiningen

Leiningen to me feels like a package manager on steriods and I love it. Using Leiningen to install Clojure is pure joy because it does it automatically. No need to figure anything out on your own. And it just works. I never installed something explicitly. If I add a dependency to my project.clj and run or test the project, Leiningen discovers the change and installs it automatically. I’m still amazed by how easy it feels to work with Leiningen.

Leiningen also creates a new project scaffolding if you need it. Perfect for newcomers who have no idea how a project setup in Clojure looks like lein new PROJECTNAME and lein got you covered. You can use Leiningen to run the project, test it and build it as needed.

Tests included

The new project also includes an incomplete test, urging you to fix it. This is great because it makes sure there is a working test setup included in the scaffolding make it easier to get started with tests. Nobody can be forced to write tests, but the easier it is to get started the more likely developers are to pick it up. And it’s more fun as well!

In the next part I’ll talk about the first Advent of Code solutions as well as my feeling about Clojure after working with it a bit. Stay tuned ;)

]]>
+ Mon, 05 Dec 2016 13:00:00 +0000 + https://sgoettschkes.me/p/clojure-0-the-setup.html + https://sgoettschkes.me/p/clojure-0-the-setup.html + + +
+ + + <![CDATA[ An experiment ]]> + In the upcoming weeks, I’d like to do an experiment involving Pokemon Go and the in-game currency Pokecoins. In short: I’ll work with a specific client for 2 hours each week doing my usual work and I’ll use the resulting money to buy Pokecoins. This work will be done on top of my current work.

Client needed

I am currently looking for a new client I can work with. There are a few requirements that need to be met in order to be a fit:

  • I’ll only work for 2 hours/week with this client
  • The work is done async

Ideal work would be:

  • Small development work which isn’t time critical (PHP, Python, Dart, Javascript)
  • Code review and/or overall codebase check (PHP/Python only)
  • DevOps work like setting up a vagrant environment or improving an existing Ansible setup

As statet in the title, this is an experiment, which means that I’ll work on a rate that is lower than my usual hourly rate. Other than that, the client can expect my full commitment and my usual high quality work. If you know any company/person who would be interested in this, please forward my contact info which can be found at http://agileaddicts.com!

The experiment

The Pokemon Go hype got me! I’m not playing as much as a month ago (which might be because I was on holiday back then), but I invest quiet some time into the game. If you are not familiar with the game, it’s essentially running around with your mobile phone and catching Pokemon that pop up. You can also fight with your Pokemon in gyms, which gives you a reward (Pokecoins) which you can then use to buy additional items. You can also buy Pokecoins for real money. 100 Pokecoins cost 1€ and it takes me about 1 1/2 hour to get 100 Pokecoins, which is also the maximum for one day (21 hours to be exact).

Doing the math, it’s insane to spent over 1 hour to “earn” something which is worth 1€. On the other hand, I play other games for hours which do not give me any “reward” and I do watch Netflix and I do various other activities which actually cost me money.

And this is where my experiment comes in. I’d like to use the money from the 2 hours a week on Pokecoins and see if this changes my experience. I didn’t spent any money on the game yet which means I can directly compare the two ways of playing. I’ll also make sure to reduce my playing time by 2 hours, so those 2 hours won’t come out of my sleeping time or other activities.

I expect that the two hours working will increase my Pokemon Go experience. The amount of coins will be a lot more than I could get in-game and can be used to make up for the two hours “lost” working instead of playing. Of course, there is the possibility that I’d rather play to earn those coins than work. Or it could be that not much changes.

I’ll of course update my blog with results!

]]>
+ Wed, 14 Sep 2016 07:00:00 +0000 + https://sgoettschkes.me/p/an-experiment.html + https://sgoettschkes.me/p/an-experiment.html + + +
+ + + <![CDATA[ Idempotent version updates with Ansible ]]> + If you are a seasoned Vagrant user, you know the problem around provisioning. If some software version changes, everybody needs to provision his or her machine again. Otherwise, things will fail eventually. You’ll also run into problems if running the provisioning twice fails for some reason (e.g. because a file already exists somewhere).

Idempotent setup

If you are setting up your provisioning, the first thing you need to make sure is that you can run the setup steps more than once and it works without manual steps in between. Nothing is more frustrating than running vagrant provision and being left with errors because something is already installed or a file is already at a specific place.

This is easy to do, even with a basic shell provisioner. You can check for the existence of files and test if some software is already installed. It’s also pretty easy to test: Just run the provisioning again and see if it works. If not, add appropriate tests and not run the offending commands.

Updating vs. optimizing

Getting the provisioning step to update software to the correct version is more tricky. It’s easy with package managers like composer for PHP or pip for Python where you have a file containing all dependencies and their versions and the package manager takes care of the rest. If you need to download, compile and install a library, you are faced with three very different options: You can do all the steps every time when provisioning, which takes time even though most of the time nothing changed. You can also install it once and then forget about it, which means nothing happens if a new version for a software is available. The third thing is figuring out the current version as well as the target version and only run the steps for installing it if they don’t match.

With Ansible, the Vagrant provisioner of choice for me, all three ways are possible. It’s important to be clear which one you are choosing so you know what drawbacks your solution might have.

To run some steps every time, give them no restriction like creates for commands and Ansible will do as told. If you are using the command or shell module, Ansible will run it every time. If your process is downloading some tar or zip and extracting it, maybe running a setup command as well, Ansible will download it every time and you’ll get new versions available under the url.

If you wanna run the whole process only once , use creates or similar instructions. Ansible will check if the file or directory is there and if it is, skip the step. As an example, let’s say you download some tar and extract it somewhere. The unarchive module takes a parameter called creates. If you set it to the extracted path, the command will do nothing if that folder is already there. As Ansible can’t possible figure out if the version inside the tar is the same as the existing one, it won’t unarchive it even though the version changed.

Conditionals

If you want to run a command or a set of commands only if the installed version of a software differs from the target version, things are getting interesting. Let me walk you through that scenario. As an example, I’ll use the Dart SDK.

First, we need a command to get the current version installed. For Dart, that’s reading the version file which contains only the version string. Other tools might make this more complicated, e.g. the Google Cloud SDK, which prints a lot of information in addition to the version when running gcloud --version. I usually use some command line magic like sed or cut to extract the part I need. Using the shell module from ansible, I pipe them together to end up with the version being the stdout for that command. We need to use register to put the output of that task into a variable. The whole task could look like this:

- name: Read the dart version
+  shell: cat /usr/local/lib/dart-sdk/version
+  register: current_dart_version
+  ignore_errors: True
+  changed_when: dart_version != current_dart_version.stdout

As you can see, I ignore errors because the shell command might fail if dart is not installed. I use changed_when because I like a clean output from Ansible. You can also see a variable named dart_version which I did not mention yet. It’s simply a variable keeping the target dart version.

Next, let us download the Dart SDK if needed:

- name: download dart sdk
+  get_url:
+    dest=/tmp/dartsdk.zip
+    force=yes
+    url=https://storage.googleapis.com/dart-archive/channels/stable/release/{{ dart_version }}/sdk/dartsdk-linux-x64-release.zip
+  when: dart_version != current_dart_version.stdout

Again, we use the dart_version as a target both in the url and for the when clause which compares the stdout from the above command with our target. The last step is to extract the zip:

- name: extract dart sdk
+  unarchive:
+    copy=no
+    dest=/usr/local/lib
+    src=/tmp/dartsdk.zip
+  when: dart_version != current_dart_version.stdout

The same pattern repeats here as well. We only run the command if the target version is different from the current one. You can use this pattern for all kinds of installation and steps performed after the installation is done.

Getting fail-safe

Even though this seems very much all you can want from a provisioning step, if you have to take care of real servers in production, you might want to be even more cautious. In the example above, if some files are not present in the new SDK, they are not deleted as the extract commands does not take care of this. We could work around this by first deleting the SDK.

In production, it might be valuable to not just replace a version but have two versions installed and switch a symlink or something to change over. This way, it’s not possible that a process has access to the software in an unpredictable state.

One tip regarding the version extraction: There are different ways to read the version! You might be able to read the version from some version-file or run the command with a COMMAND --version parameter. Look for different ways and see if some outputs only the exact version. And if you have to, using some regex with sed might not be the cleanest way but it does the job.

]]>
+ Mon, 29 Feb 2016 10:00:00 +0000 + https://sgoettschkes.me/p/idempotent-version-updates-with-ansible.html + https://sgoettschkes.me/p/idempotent-version-updates-with-ansible.html + + +
+ + + <![CDATA[ Dart, Vagrant and IDEs ]]> + Using Vagrant in order to have a reproducible development environment is second nature to me. All projects I work on have a Vagrantfile and it usually works great.

Packages and your IDE

One of the biggest problems when running your code inside a virtual machine is the IDE, at least for me. The code lives on my host machine and is synced into the vm, so I use an editor or IDE on my host. I might not have the needed dependencies on my host machine, so the IDE struggles to provide code completion (among other features which only work when the IDE can inspect the libraries used).

PyCharm (and many other JetBrains IDEs) has the ability to use a remote interpreter for Python. Pointing it to the Vagrant machine, PyCharm will ssh into the virtual machine and get the interpreter as well as all libraries installed through pip from there. This works rather well, so even though I don’t have flask installed on my host, I have code completion for the flask API and PyCharm tells me if I am missing an argument. If I use Python 3 for my project and it’s installed in the vm, I don’t need it on my host at all (In fact, I don’t need any Python installed, which is nice if you happen to develop on Windows).

Dart and the packages symlinks

The Dart plugin for the JetBrains IDEs does not have such a feature. It relies on the Dart SDK installed on your host. I wouldn’t mind installing Dart, but there are some problems:

  • Different projects might use different versions of the Dart SDK. Keeping different versions of the SDK is tedious.
  • pub, the Dart package manager, downloads packages to ~/.pub-cache. All dependencies need to be downloaded twice to be available inside the vm (to execute the code) and on the host (so the IDE can access it). You could sync your local pub-cache folder into the vm to get around this issue.
  • pub creates symlinks in your project to reference packages. In your project root, it creates a packages folder which contains symlinks to each package. The paths are absolute, so each time you run it inside the vm, it breaks the lookup on the host and vice verse.

I didn’t find a solution to my first problem, but could solve the other two!

.packages to the rescue

As far as the .pub-cache folder is concerned, there is a third solution beside downloading twice and syncing your pub cache: pub looks up the PUB_CACHE environment variable and if it contains a valid path, it puts the dependencies there. This way, you can store all your dependencies in your project root in a .pub-cache (or whatever you wanna call it) folder. It’s synced to your host automatically, so no need to install the same libraries twice.

The symlinks are a bigger problem, but if you are using Dart 1.12 or higher, there is a way around. By using --no-package-symlinks as an argument to pub get ( or pub upgrade), no symlinks are created. Instead, a single .packages file is used to store all links for the packages needed. Sadly, this links are again absolute. But with a little bit of Dart magic (ok, regex magic), you can change those to be relative and work both on the vm and the host.

If you are using Dart before 1.12, you can of course try to rewrite the symlinks which might work as well!

The rough edges

Because the PUB_CACHE environment variable needs to be set correctly, I only run pub get and pub upgrade inside the vm where my provisioning takes care of putting everything in place. On my host, the PUB_CACHE might point to a different project, messing everything up.

One also needs to remember to run pub get with the correct argument and running the script to replace the links to be relative. If you are using some task runner (like grinder), this is no issue as you have your task and it takes care of doing all the steps needed. If you are still running tasks manually, well, you might want to consider using a task runner.

The state of packages managers

While this was intended as a practical post about Dart in the vm and IDEs, there is an underlying question: Why is it so complicated? Turns out that package management is complicated. Every package manager I have worked with has some problems. NPM (for Node.js) downloads every dependency once for each library that depends on it. There is no sharing going on, even if two libraries in one project depend on the exact same version.

pip (for Python) and gem (for ruby) install packages globally and rely on external solutions (or workarounds) to install dependencies locally for every project. composer (for PHP) installs everything locally but only once, making it the best package manager I worked with in the past. If you work on a lot of projects and all depend on the same set of libraries, there is a certain overhead in downloading those libraries for each project though!

pub seems to take into account all those problems: it installs each version needed of each library globally (if you don’t mess with the PUB_CACHE), which means you can have different projects use different versions of the same library OR use the same library without storing it twice. Of course, within one project you can only use one version of each library, so “dependency hell” is still possible.

The perfect solution?

The best solution would of course be if the Dart plugin could use a remote SDK. I doubt this will come anytime soon (I didn’t see anybody ranting about it anyway). The solution outlined above should also work with any IDE capable of using the .packages file for Dart.

Having a working solution in place makes working on Dart projects which run inside a Vagrant vm much nicer. You should try it!

]]>
+ Thu, 21 Jan 2016 10:00:00 +0000 + https://sgoettschkes.me/p/dart-vagrant-and-ides.html + https://sgoettschkes.me/p/dart-vagrant-and-ides.html + + +
+ +
+
diff --git a/hashtags.html b/hashtags.html new file mode 100644 index 0000000..0bc011b --- /dev/null +++ b/hashtags.html @@ -0,0 +1,84 @@ + + + + + Hashtags; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +

Sebastian Göttschkes

+

+ + +

What do these 5 hashtags in your bio mean?

+ +

Great question! Back when Diaspora was a thing, instead of a bio or about text you would add 5 hashtags most relevant to you. I fell in love with this way of focusing on the most important things. That's why they show up in my profile all over the web.

+ +

I try to keep them up to date, but as with everything, there might be an old set of hashtags out there somewhere. Consider it a glimpse into my past.

+ +
+ + + + diff --git a/img/2024-01-01-building-onesen-in-public.png b/img/2024-01-01-building-onesen-in-public.png new file mode 100644 index 0000000..12ff6e2 Binary files /dev/null and b/img/2024-01-01-building-onesen-in-public.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..4435f85 --- /dev/null +++ b/index.html @@ -0,0 +1,262 @@ + + + + + Coding 5 to 9; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +

Sebastian Göttschkes

+

+ + + +
+

Written by Sebastian on Jan 01, 2023 in dev

+

Building OneSen in public

+

A unique visual representation of the 'OneSen' note-taking app

+

Read more

+
+ +
+

Written by Sebastian on Feb 03, 2021 in dev

+

Phoenix UI testing with Cypress, Part 1

+

I still remember the days I tried to achieve UI testing with Selenium, PhantomJS and various other tools. It was a hassle. It didn’t run on CI because it needed some kind of window manager. It was unstable.

+

Read more

+
+ +
+

Written by Sebastian on Nov 03, 2017 in dev

+

Quickstart guide for clojure (compojure), clojurescript (figwheel), garden

+

Setting up new projects is always exciting, but if you have done it a few times, it’s getting old quick. I have set up a few projects in the last time and I believe I have a nice setup going which I’m about to share with everybody interested. There is nothing new in here and if you are a seasoned Clojure developer, you might not learn much. If you are just starting out or have some work done in Clojure but need a working setup or some input on your current setup, you are at the right place.

+

Read more

+
+ +
+

Written by Sebastian on Oct 23, 2017 in dev

+

My personal state of Clojure

+

Almost a year ago I wrote my last blog post on this blog. It wasn’t planned, it just happened. I was busy doing other, unrelated things and while I sometimes had something to say, I never took the time to take it to my blog. As with many things which are not top priority, it got lost along the way.

+

Read more

+
+ +
+

Written by Sebastian on Dec 21, 2016 in dev

+

(clojure 2) Getting used to

+

After “The setup“ and “The beginning“, my time with Clojure was somewhat limited due to pre-christmas stress. But this gave me time to reflect on what I learned so far, what I liked and didn’t like. I also attended the December Meetup by (clojure ‘vienna) and pair programmed Clojure for a few hours.

+

Read more

+
+ +
+

Written by Sebastian on Dec 08, 2016 in dev

+

(clojure 1) The beginning

+

After “The setup“, I dove right into Clojure. As said before, I want to get to know Clojure by working on Advent of Code (AoC). I realize that coding puzzles and dojos and katas are not real world applications and one might miss certain things like performance or running an application in production. But they are small enough to provide fast feedback and to not hit a big wall. Even if I am not able to solve one, I can always skip that and keep going with another puzzle.

+

Read more

+
+ +
+

Written by Sebastian on Dec 05, 2016 in dev

+

(clojure 0) The setup

+

In the last week I started playing with Clojure. The reason for this are some upcoming projects with the blossom Team. The stack will most likely be Clojure in the backend and ClojureScript in the frontend. So I better up my game. I started with the Advent of Code to have some real world exposure and not just write complicated “Hello, World” code. I’m planning on writing a series of blog posts, documenting my journey.

+

Read more

+
+ +
+

Written by Sebastian on Sep 14, 2016 in freelance

+

An experiment

+

In the upcoming weeks, I’d like to do an experiment involving Pokemon Go and the in-game currency Pokecoins. In short: I’ll work with a specific client for 2 hours each week doing my usual work and I’ll use the resulting money to buy Pokecoins. This work will be done on top of my current work.

+

Read more

+
+ +
+

Written by Sebastian on Feb 29, 2016 in dev

+

Idempotent version updates with Ansible

+

If you are a seasoned Vagrant user, you know the problem around provisioning. If some software version changes, everybody needs to provision his or her machine again. Otherwise, things will fail eventually. You’ll also run into problems if running the provisioning twice fails for some reason (e.g. because a file already exists somewhere).

+

Read more

+
+ +
+

Written by Sebastian on Jan 21, 2016 in dev

+

Dart, Vagrant and IDEs

+

Using Vagrant in order to have a reproducible development environment is second nature to me. All projects I work on have a Vagrantfile and it usually works great.

+

Read more

+
+ +
+

Written by Sebastian on Dec 11, 2015 in dev

+

Google App Engine Remote API within iPython Notebooks

+

At blossom, we are running on Google App Engine (GAE). It’s nice to stand on the shoulder of giants and don’t have to worry about servers at all. GAE takes care of scaling up and down for us, handles our database as well as storage and gives us great insight into our production environment.

+

Read more

+
+ +
+

Written by Sebastian on Sep 20, 2015 in productivity

+

What works for me

+

I struggle like most people with getting things done. I have read books and a lot of blog posts. Most didn’t work. I have implement some of the things David Allen suggests in “Getting things done”. I am keeping my inbox at zero most days with the tips from Andreas Klinger. But that’s about it. Nothing else ever worked - 5/3/1 tasks for the day, Pomodoro, keeping hand-written task lists, using various apps.

+

Read more

+
+ +
+

Written by Sebastian on Sep 13, 2015 in dev

+

Deploying to PythonAnywhere with TravisCI

+

Many of you might already know it: Soon I’ll be joining the blossom Team on their quest to project management awesomeness. The current development stack of blossom consists of Python and Dart running on Google App Engine. In order to get some experience with both Python and Dart, I started a small side project which is a perfect combination of my two main interests, development and music.

+

Read more

+
+ +
+

Written by Sebastian on Sep 04, 2015 in Community

+

Just ask!

+

What did you do last time you had a problem? The last time you couldn’t figure something out? Some people quit. Others try to force a solution on their own, applying insane amounts of time towards a problem. Often though, asking questions to the right people is the way to go.

+

Read more

+
+ +
+

Written by Sebastian on Aug 21, 2015 in dev

+

PhantomJS 2 on Wheezy and TravisCI

+

After playing with Dart for a little while, I also looked into testing with Dart and learned that the test library can use PhantomJS to test the code that interacts with the DOM. Being a testing junkie, I wanted to give this a try but learned that I needed PhantomJS 2 to get it working. Beside running it in my VM, I also wanted to have it running on TravisCI. This blog post tells you how to set up both.

+

Read more

+
+ +
+

Written by Sebastian on Jul 30, 2015 in health

+

My first few days with Jake

+

As announced on twitter, last week I ordered a batch of Jake. It arrived on Wednesday and I had to try it out the same evening. I’d like to share some thoughts about it and reactions I received. It’s only been a few days, so this is not some extensive review of the product or anyting.

+

Read more

+
+ +
+

Written by Sebastian on Jul 25, 2015 in dev

+

Deploying a Jekyll website to Github Pages using TravisCI

+

Today I switched over this blog to be subject to continous deployment. Whenever I push a new commit (which might be a design change, new blog post like this or some small change), everything is build and automatically pushed to Github Pages, which host this blog.

+

Read more

+
+ +
+

Written by Sebastian on Mar 25, 2015 in dev

+

Running Ansible devel (on Codeship)

+

If you are tasked with managing servers, you might have read about Ansible. If you do not know it, here is a quick intro: With Ansible you can define tasks which should be run on your (remote) hosts and Ansible takes those tasks, opens a ssh connection into your hosts and executes them.

+

Read more

+
+ +
+

Written by Sebastian on Jan 27, 2015 in dev

+

Vagrant base boxes

+

If you are using vagrant on a daily basis, you might already be using something else than the base box suggested by vagrant (which would be the hashicorp/precise32 or hashicorp/precise64). If you are thinking about creating your own base boxes or are interested in the topic, read on. If you have no idea what I am talking about, the vagrant documentation can tell you more about Boxes.

+

Read more

+
+ +
+

Written by Sebastian on Jan 18, 2015 in dev

+

Developing cllctr

+

Over the past two month, I have been building a new weekend project: cllctr. It’s a CD database you can use to store information about your CDs. Say goodbye to excel lists and over-the-top stuff as discogs or collectorz. cllctr is focused on the right amount of data, balancing the time needed to enter new CDs or organize a exisiting collection.

+

Read more

+
+ +
+

Written by Sebastian on Oct 28, 2014 in dev

+

Vagrant and tmpfs

+

When I am talking about Vagrant at usergroups and conferences, one thing I always mention is that shared folders are slow, especially with Virtualbox. While it’s true for reading to some extend, the biggest bottleneck I experience is writing to the shared folder. This happens a lot with cache and logs in development mode when you use e.g. symfony2. These folders live inside the project dir and depending on your project structure you might not be able to redirect them to some folder inside the vm.

+

Read more

+
+ +
+

Written by Sebastian on Aug 01, 2014 in community

+

Join us at ViennaPHP

+

If you are a PHP developer who lives in Vienna, you might have heared of ViennaPHP. It’s a local usergroup organized by Stefan Hupe and myself to bring together the PHP community.

+

Read more

+
+ +
+

Written by Sebastian on Jun 28, 2014 in dev

+

Jekyll: Update your sitemap automatically with rake

+

If you write your blog to also be found through Google, you may have a sitemap which makes it easy for Google and Bing to crawl your page. It might be good idea to inform both if this sitemap changes so they can send their crawlers your way and update their index with the great stuff you just put on their. This can be done by doing a GET request and passing the url to your sitemap as a parameter. Easy as pie, right?

+

Read more

+
+ +
+

Written by Sebastian on May 31, 2014 in tools

+

Backups with Obnam

+

You know you should do backups, right? Taking all those data you store on your laptop and put it somewhere safe. There are so many ways of doing backups but people still don’t do regular backups. Maybe you put the data on an external drive and swear you’ll do this every month from now on. Chances are you’ll be to busy next month and the month after. And you still got a backup, right?

+

Read more

+
+ +
+

Written by Sebastian on May 23, 2014 in blogging

+

From Phrozn to Jekyll

+

As you might know by now, I like static side generators. They combine the flexibility of a templating system with the easy deployment of static pages. Creating pages feels like working in a programming language and deploying the page in the end is as easy as putting some html files on an ftp (which you shouldn’t do, of course). There is no need for a complicated setup or deployment process because it’s just some HTML served from nginx (or any other webserver). You can put the files on Amazon S3, your own host or github pages. And as it’s only static files being served, performance is as good as it gets.

+

Read more

+
+ +
+

Written by Sebastian on May 16, 2014 in general

+

Hello World!

+

I wanted to do this for a long time. Write a “Hello World” on a new blog. So here it is. Awesome! But this is not a normal “Hello world” trying to get my feet wet, this is a post on why one should avoid blogging plattforms like Blogspot, where my blog lived up until now.

+

Read more

+
+ + +
+ + + + diff --git a/now.html b/now.html new file mode 100644 index 0000000..c99a917 --- /dev/null +++ b/now.html @@ -0,0 +1,96 @@ + + + + + Now; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +

Sebastian Göttschkes

+

+ + +

This page is part of the now page movement!

+ +

What I'm doing these days

+ + + +

Last updated in July 2024.

+ +

+ + + + diff --git a/p/an-experiment.html b/p/an-experiment.html new file mode 100644 index 0000000..f3030b5 --- /dev/null +++ b/p/an-experiment.html @@ -0,0 +1,80 @@ + + + + + An experiment; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Sep 14, 2016 in Freelance

+

An experiment

+

In the upcoming weeks, I’d like to do an experiment involving Pokemon Go and the in-game currency Pokecoins. In short: I’ll work with a specific client for 2 hours each week doing my usual work and I’ll use the resulting money to buy Pokecoins. This work will be done on top of my current work.

Client needed

I am currently looking for a new client I can work with. There are a few requirements that need to be met in order to be a fit:

Ideal work would be:

As statet in the title, this is an experiment, which means that I’ll work on a rate that is lower than my usual hourly rate. Other than that, the client can expect my full commitment and my usual high quality work. If you know any company/person who would be interested in this, please forward my contact info which can be found at http://agileaddicts.com!

The experiment

The Pokemon Go hype got me! I’m not playing as much as a month ago (which might be because I was on holiday back then), but I invest quiet some time into the game. If you are not familiar with the game, it’s essentially running around with your mobile phone and catching Pokemon that pop up. You can also fight with your Pokemon in gyms, which gives you a reward (Pokecoins) which you can then use to buy additional items. You can also buy Pokecoins for real money. 100 Pokecoins cost 1€ and it takes me about 1 1/2 hour to get 100 Pokecoins, which is also the maximum for one day (21 hours to be exact).

Doing the math, it’s insane to spent over 1 hour to “earn” something which is worth 1€. On the other hand, I play other games for hours which do not give me any “reward” and I do watch Netflix and I do various other activities which actually cost me money.

And this is where my experiment comes in. I’d like to use the money from the 2 hours a week on Pokecoins and see if this changes my experience. I didn’t spent any money on the game yet which means I can directly compare the two ways of playing. I’ll also make sure to reduce my playing time by 2 hours, so those 2 hours won’t come out of my sleeping time or other activities.

I expect that the two hours working will increase my Pokemon Go experience. The amount of coins will be a lot more than I could get in-game and can be used to make up for the two hours “lost” working instead of playing. Of course, there is the possibility that I’d rather play to earn those coins than work. Or it could be that not much changes.

I’ll of course update my blog with results!

+
+ +
+ + + + diff --git a/p/backups-with-obnam.html b/p/backups-with-obnam.html new file mode 100644 index 0000000..e6aa177 --- /dev/null +++ b/p/backups-with-obnam.html @@ -0,0 +1,80 @@ + + + + + Backups with Obnam; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on May 31, 2014 in Tools

+

Backups with Obnam

+

You know you should do backups, right? Taking all those data you store on your laptop and put it somewhere safe. There are so many ways of doing backups but people still don’t do regular backups. Maybe you put the data on an external drive and swear you’ll do this every month from now on. Chances are you’ll be to busy next month and the month after. And you still got a backup, right?

I was like this. I wrote done a plan when to backup on an external drive and even to DVDs, what to backup and in which intervals. It was a great plan. But it didn’t take into account that when the time comes to sit down and do the backup, other things are more important. Because there is still the last backup, right. And how often does the hard drive die after all! And there don’t have been that many changes to the data I want to backup, so it doesn’t make sense.

Obnam to the rescue

With this experience in mind, I looked for some tools. On Mac, you got Time Machine shipped with your OS, which is great. Many Linux distributions ship some tool for backup and restoring as well, and there is always rsync to do the job if nothing else is available. On Windows you have some basic functionality built in and a ton of tools to choose from. Running Linux, I decided to give Obnam a try. With the ability to do backups to hard drives and over sftp it fits my idea of putting backups on external drives as well as some remote server.

Obnam also has a nice concept of storing chunks of data which are automatically dedublicated, meaning every backup is a full backup but only new data needs to be transfered and takes away space. Every backup is a generation and you can decide which generations to keep and which are obsolete after a while. Obnam also makes it easy to encrypt your backup if you wish to do so. You’ll need a pgp key and tell Obnam where to find it, but everything else is automated.

Baby steps

Installing it on my Linux Mint was fairly easy. Mint uses Debian under the hood, so I could use apt (after adding the liw repository to my sources) to install the Obnam package which resolves all dependencies for me. That gave me the obnam command which is used to do backups and restores and work with generations of backups.

The most basic command to do a backup would be obnam backup -r /path/to/repository /path/which/should/be/backed/up. This creates a so called backup repository (which is a folder used by Obnam to store all data) at /path/to/repository and backup all data in path/which/should/be/backed/up. Also note that the repository can be in a remote location you have ssh access to by using sftp: sftp://user@host:/path/to/repository. If you want to backup different directories, you can specify as many as you want as the last parameters: obnam backup -r /some/repo /home/user/dir1 /home/user/dir2 /home/user/dir3.

Obnam takes arguments to not include cache directories into the backup (--exclude-caches) and to place a log at a location you choose where you can see what gets included in the backup (--log /path/to/logfile.log). There are more arguments for various tasks. If you want to exclude specific files or folders, you can do this as well. This works with regular expressions and I didn’t really get it to work, so now I rather include the stuff I want then exclude the stuff I don’t need. Works pretty well.

Also note that you can create a configuration file for Obnam to use. As I am doing my backups through a bash script, I don’t care passing a few more parameters - If you intend on using Obnam manually, you might wanna look into setting some parameters in the configuration file so you don’t need to repeat yourself.

Lets script it!

With the tool at hand, I started writing a script which does a backup of my home directory to one of my external hard drives. After checking if the hard drive is connected, all there is left is the backup command for Obnam and the command to let Obnam forget previous backups if they are to old. You can tell Obnam which backups you want to keep and it will forget all the others. This is pretty neat as you don’t need all those old ones around.

I decided to use the strategy often used when doing server backups: Keep one backup for the last 30 days each, one backup for each of the past 8 weeks and one for each of the last 14 month. This works well with the need to restore (you might remember you had a particular file last Wednesday but you won’t remember if you had it 6 month or 7 monh ago) as well as with the amount of space your backups will consume. The command to let Obnam forget those older backups is: obnam forget --keep="30d,8w,14m" --repository /path/to/repo.

As I want to use different backup locations, I also added a parameter the script takes which decides which repository to use, which directories to backup and what to forget. You can have a look at the current version of the script in my dotfiles repository: https://github.com/Sgoettschkes/dotfiles/blob/master/bin/backup

Afterthoughts

For the first time, I have a working backup solution in place. I have thought of different ways I could loose my data and my current backup solution makes sure I have a backup in any case. I can loose my laptop or even have a fire destroy my home office and be good backup-wise. At the same time, if my server looses my backups as well, I stil have an (older) backup on an external drive which is stored at a different location and will only be transfered to my laptop for backups every so often.

I didn’t talk about restoring those data, but I’ll do that in a second post. A backup you cannot restore is useless, so testing your backups is a good idea. And, like backups, this isn’t done as often as it should because it’s work nobody wants to do. With an (automatic) script you can take that work away and be sure of your backups working. I also skipped over encryption, because I don’t use it currently. I might look into this in the future. If I decide to do so, I’ll write an additional blog post.

As for Obnam itself, it’s a great tool which does one thing and does it right. It has good documentation and it seems the developers have thought about the most common use cases and made them work flawlessly. Being a commandline tool it’s also easy to create a script which does all the hard work and have you sit back, relax and enjoy all the other things in life - Not worried about your data anymore.

+
+ +
+ + + + diff --git a/p/building-onesen-in-public.html b/p/building-onesen-in-public.html new file mode 100644 index 0000000..9d60c0b --- /dev/null +++ b/p/building-onesen-in-public.html @@ -0,0 +1,80 @@ + + + + + Building OneSen in public; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jan 01, 2023 in Dev

+

Building OneSen in public

+

A unique visual representation of the 'OneSen' note-taking app

It’s been a while since I posted on my blog. Almost two years, to be precise. I was focusing my energy on other parts of my life, and, to be honest, I never got back into publishing regular blog posts after falling off the wagon in 2017. But now I am back, and I intend to publish more often.

Building OneSen

Last week, I started a new side project. It’s called OneSen, and currently, it’s a text field you can use to store your daily notes. In the future, I envision it to be a note-taking app focusing on daily notes (much like micro-blogging) and simplicity. I also have ideas to add the capabilities of LLMs to enrich the experience and get insights into your notes. The alpha version, without a design, text, or explanations, can be found at https://onesen.app. But it works 😉

Build in public

My current plan is to build this app in public. I like the idea of (radical) transparency, which is present when being open about everything happening. I like to teach others what works for me and what doesn’t. I like to inspire others to build something on their own, make their work more public, or share more. We can all benefit from the experiences others have already had.

The idea

To be transparent, I have to give credit where credit is due. The idea for OneSen came from Dainius, who mentioned that he’d like a straightforward interface to take notes and have them stored daily, much like a physical notebook with its pages dedicated to a specific date.

I let the idea sit, and because I thought about it more over time, I decided to act on it. I kept the notebook analogy since the concept closely relates to physical notebooks. Currently, each notebook contains pages, one page for each day. A user can only change the current day, and the system saves changes automatically when the text changes. It’s as easy as possible. Go to your notebook, write something, and leave.

The MVP

My current plan for the MVP is to add a design, make it possible to view past dates and rename notebooks. Those changes should be enough to make it usable and see if people want something like this.

The long term vision

OneSen could go in many directions. One idea I had was the “One second a day” video app for writing. Write something daily and combine it per week, month, or year.

Another idea was to add LLM capabilities, analyze notes, and gather insights using AI. This feature could be interesting for people writing morning pages or journals.

I am also thinking about encrypting notes on the client, adding security that almost no other note-taking app has. The drawback would be that working with notes would need to happen on the client since even searching would not be possible with encrypted notes.

+
+ +
+ + + + diff --git a/p/clojure-0-the-setup.html b/p/clojure-0-the-setup.html new file mode 100644 index 0000000..8cce976 --- /dev/null +++ b/p/clojure-0-the-setup.html @@ -0,0 +1,80 @@ + + + + + (clojure 0) The setup; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Dec 05, 2016 in Dev

+

(clojure 0) The setup

+

In the last week I started playing with Clojure. The reason for this are some upcoming projects with the blossom Team. The stack will most likely be Clojure in the backend and ClojureScript in the frontend. So I better up my game. I started with the Advent of Code to have some real world exposure and not just write complicated “Hello, World” code. I’m planning on writing a series of blog posts, documenting my journey.

Vagrant

People who know me knew this was coming. I start every project in a clean vm managed by Vagrant to not clutter my laptop, remove stuff without a trace and not get caught in some weird dependency hell. I also got a vagrant setup for learning new stuff which is usually my go-to project when working on things like “Advent of Code”.

So the first step for me was to add Clojure to the tech stack inside the Vagrant machine. To be honest, I expected some problems. There usually are. The only real easy setup I ever had was Dart, wehre downloading the zip and extracting it is all you really need. With clojure, it was almost as easy. Make sure Java is installed, download the leiningen script, make sure it’s executable and in the path, done.

Clojure and Leiningen

Leiningen to me feels like a package manager on steriods and I love it. Using Leiningen to install Clojure is pure joy because it does it automatically. No need to figure anything out on your own. And it just works. I never installed something explicitly. If I add a dependency to my project.clj and run or test the project, Leiningen discovers the change and installs it automatically. I’m still amazed by how easy it feels to work with Leiningen.

Leiningen also creates a new project scaffolding if you need it. Perfect for newcomers who have no idea how a project setup in Clojure looks like lein new PROJECTNAME and lein got you covered. You can use Leiningen to run the project, test it and build it as needed.

Tests included

The new project also includes an incomplete test, urging you to fix it. This is great because it makes sure there is a working test setup included in the scaffolding make it easier to get started with tests. Nobody can be forced to write tests, but the easier it is to get started the more likely developers are to pick it up. And it’s more fun as well!

In the next part I’ll talk about the first Advent of Code solutions as well as my feeling about Clojure after working with it a bit. Stay tuned ;)

+
+ +
+ + + + diff --git a/p/clojure-1-the-beginning.html b/p/clojure-1-the-beginning.html new file mode 100644 index 0000000..0684916 --- /dev/null +++ b/p/clojure-1-the-beginning.html @@ -0,0 +1,80 @@ + + + + + (clojure 1) The beginning; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Dec 08, 2016 in Dev

+

(clojure 1) The beginning

+

After “The setup“, I dove right into Clojure. As said before, I want to get to know Clojure by working on Advent of Code (AoC). I realize that coding puzzles and dojos and katas are not real world applications and one might miss certain things like performance or running an application in production. But they are small enough to provide fast feedback and to not hit a big wall. Even if I am not able to solve one, I can always skip that and keep going with another puzzle.

lein run & lein test

Before starting with real code, I created a project for AoC. lein new adventofcode, ran in the /vagrant folder inside my Vagrant vm created a scaffold of a Clojure project the way Leiningen decided it should be. Within this directory, the program can be run by lein run and tested with lein test.

I was again surprised by how easy this was. Everything works out of the box, no changes needed. The test fails, but that’s expected.

And if you want to play with Clojure a bit before writing real code, a quick lein repl brings up a nice repl you can use to get a feeling for the syntax.

Advent of Code, Day 1

Right after that first few minutes, I got into the code. Looking at the first day of AoC, it seemed plausible to see if I could get the input for the puzzle by downloading the file. A quick google search later it seemed fairly easy: Add a dependency to the code, use the library to request a url and get a string back. Adding the dependency is one line in the project.cls, requiring it is one more line and doing the request is again one line. That’s three lines. I was hooked!

Leiningen detects changes in the dependencies and downloads everything automatically once you run lein run or any other Leiningen command. I added all my code to the core.clj file because I didn’t want to figure out how to best distribute code between many files. I usually focus on one thing at a time when learning a new language!

Sadly, my code didn’t work. The reason was that the puzzle input needs login. I quickly decided that this would be to much of a hassle and put the puzzle input into a local txt file. Reading this file, I found the slurp function available in the clojure core which reads a file to a string. Again, very easy. It would be a problem with very big files but for now this was fine.

Solving the first day was a succession of this steps: Google for an isolated problem, find a function or way to solve it, continue.

Error messages

While solving the first day, it became clear to me that Clojure error messages are horrible. Seriously, the are the worst messages I have ever seen. Usually, they are just a Stack Trace from an Exception. Sometimes, they point to a specific line somehwere in there and sometimes it doesn’t even mention your code at all.

It took way longer than needed because I had a weird error I couldn’t solve because I had no idea where to look. I looked at every piece of the code and after I already though the compiler was wrong I found the tiny little error.

A lot fo errors are EOF problems because on of the many closing paranthesis is missing. Again, there is no real hint where this is so you again are on your own counting paranthesis.

Done

It took me maybe 3 hours to get both parts of the puzzle for day 1. Given Clojure is a completly new language for me and I had a few problems along the way, I guess this is an ok time spawn. I had a misunderstanding in the puzzle which took some time as well, so overall I was pretty happy.

I’ll keep my impressions of the language for now and will get into them when I’m a little further down the road!

+
+ +
+ + + + diff --git a/p/clojure-2-getting-used-to.html b/p/clojure-2-getting-used-to.html new file mode 100644 index 0000000..01916dd --- /dev/null +++ b/p/clojure-2-getting-used-to.html @@ -0,0 +1,80 @@ + + + + + (clojure 2) Getting used to; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Dec 21, 2016 in Dev

+

(clojure 2) Getting used to

+

After “The setup“ and “The beginning“, my time with Clojure was somewhat limited due to pre-christmas stress. But this gave me time to reflect on what I learned so far, what I liked and didn’t like. I also attended the December Meetup by (clojure ‘vienna) and pair programmed Clojure for a few hours.

The ecosystem

I already said I liked the way Leiningen worked. Nothing has changed here. Leiningen feels stable and works great. I didn’t come across any issues using it, which is a great way for a language to work.

I didn’t find an editor/IDE I liked yet. I started with vim because it is what I know and universal enough. I also set up Atom and use it but don’t really like it. My biggest problem right now is that Clojure and Leiningen are installed inside a virtual machine, making many tools unusable. I’d love to be able to evaluate statements from my editor directly by running them inside the vm but it turns out that’s a bit of a problem. There are tools to do that, but the setup needed is just to much.

My current workflow is writing code in Atom, running it sshd into the vm and also having an additional ssh session running with the Leiningen repl started to test out different things.

The workflow

Programming in a functional lanuage is just different then using a procedural language. It’s more about passing data around and rearrange it instead of maintaining a global state. It’s true that it’s much easier to reason about a small part of the software as the input and output is defined and can be tested.

When pairing on Day 9 of Advent of Code, my coding partner who is also new to Clojure mentioned that he can’t see himself working in the language due to this being so different than what he’s used to from other languages (mainly Java). While I understand what he means I honestly think that it’s not better or worse but different. It takes time to get used to it.

One thing I noticed was that testing was very easy because you can always split up big function into smaller ones, making them easily testable. It was also easy to reason about the different parts. The only hard thing was to put together the plan on how to get started and which path to take. But this discussion was useful and it was good to have it in the beginning instead of moving along and “just doing things”.

The current status

The last weeks have been busy, so I didn’t dive deeper into Clojure. My next step is taking a book and working through it while on christmas holiday. After using Clojure in practice and learning a ton I feel like a bit more theoretical understanding would be good at this point.

I’d also like to write a small app in Clojure to see what this feels like. Advent of Code, Project Euler and others are great but they don’t mimic the real world. I realized with Advent of Code that I spent most of the time figuring out the algorithm, which does not help me getting any further with Clojure.

The meta

Beside working with Clojure, I also watched a few talks by Rich Hickey. I like his ideas and got the feeling his one of those people not buying into hype and “best practice” but asking hard questions and thinking for himself. I might not agree with everything he says and don’t agree with every decision in Clojure, but overall he seems very reasonable. That’s more than enough for me to get behind Clojure.

The next update will most likely on the book I chose to work through and what I learned. And it might only get here after my holiday, so enjoy the holidays, relax and stay tuned :)

+
+ +
+ + + + diff --git a/p/dart-vagrant-and-ides.html b/p/dart-vagrant-and-ides.html new file mode 100644 index 0000000..22af877 --- /dev/null +++ b/p/dart-vagrant-and-ides.html @@ -0,0 +1,80 @@ + + + + + Dart, Vagrant and IDEs; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jan 21, 2016 in Dev

+

Dart, Vagrant and IDEs

+

Using Vagrant in order to have a reproducible development environment is second nature to me. All projects I work on have a Vagrantfile and it usually works great.

Packages and your IDE

One of the biggest problems when running your code inside a virtual machine is the IDE, at least for me. The code lives on my host machine and is synced into the vm, so I use an editor or IDE on my host. I might not have the needed dependencies on my host machine, so the IDE struggles to provide code completion (among other features which only work when the IDE can inspect the libraries used).

PyCharm (and many other JetBrains IDEs) has the ability to use a remote interpreter for Python. Pointing it to the Vagrant machine, PyCharm will ssh into the virtual machine and get the interpreter as well as all libraries installed through pip from there. This works rather well, so even though I don’t have flask installed on my host, I have code completion for the flask API and PyCharm tells me if I am missing an argument. If I use Python 3 for my project and it’s installed in the vm, I don’t need it on my host at all (In fact, I don’t need any Python installed, which is nice if you happen to develop on Windows).

Dart and the packages symlinks

The Dart plugin for the JetBrains IDEs does not have such a feature. It relies on the Dart SDK installed on your host. I wouldn’t mind installing Dart, but there are some problems:

I didn’t find a solution to my first problem, but could solve the other two!

.packages to the rescue

As far as the .pub-cache folder is concerned, there is a third solution beside downloading twice and syncing your pub cache: pub looks up the PUB_CACHE environment variable and if it contains a valid path, it puts the dependencies there. This way, you can store all your dependencies in your project root in a .pub-cache (or whatever you wanna call it) folder. It’s synced to your host automatically, so no need to install the same libraries twice.

The symlinks are a bigger problem, but if you are using Dart 1.12 or higher, there is a way around. By using --no-package-symlinks as an argument to pub get ( or pub upgrade), no symlinks are created. Instead, a single .packages file is used to store all links for the packages needed. Sadly, this links are again absolute. But with a little bit of Dart magic (ok, regex magic), you can change those to be relative and work both on the vm and the host.

If you are using Dart before 1.12, you can of course try to rewrite the symlinks which might work as well!

The rough edges

Because the PUB_CACHE environment variable needs to be set correctly, I only run pub get and pub upgrade inside the vm where my provisioning takes care of putting everything in place. On my host, the PUB_CACHE might point to a different project, messing everything up.

One also needs to remember to run pub get with the correct argument and running the script to replace the links to be relative. If you are using some task runner (like grinder), this is no issue as you have your task and it takes care of doing all the steps needed. If you are still running tasks manually, well, you might want to consider using a task runner.

The state of packages managers

While this was intended as a practical post about Dart in the vm and IDEs, there is an underlying question: Why is it so complicated? Turns out that package management is complicated. Every package manager I have worked with has some problems. NPM (for Node.js) downloads every dependency once for each library that depends on it. There is no sharing going on, even if two libraries in one project depend on the exact same version.

pip (for Python) and gem (for ruby) install packages globally and rely on external solutions (or workarounds) to install dependencies locally for every project. composer (for PHP) installs everything locally but only once, making it the best package manager I worked with in the past. If you work on a lot of projects and all depend on the same set of libraries, there is a certain overhead in downloading those libraries for each project though!

pub seems to take into account all those problems: it installs each version needed of each library globally (if you don’t mess with the PUB_CACHE), which means you can have different projects use different versions of the same library OR use the same library without storing it twice. Of course, within one project you can only use one version of each library, so “dependency hell” is still possible.

The perfect solution?

The best solution would of course be if the Dart plugin could use a remote SDK. I doubt this will come anytime soon (I didn’t see anybody ranting about it anyway). The solution outlined above should also work with any IDE capable of using the .packages file for Dart.

Having a working solution in place makes working on Dart projects which run inside a Vagrant vm much nicer. You should try it!

+
+ +
+ + + + diff --git a/p/deploying-a-jekyll-website-to-github-pages-using-travisci.html b/p/deploying-a-jekyll-website-to-github-pages-using-travisci.html new file mode 100644 index 0000000..4059ef7 --- /dev/null +++ b/p/deploying-a-jekyll-website-to-github-pages-using-travisci.html @@ -0,0 +1,127 @@ + + + + + Deploying a Jekyll website to Github Pages using TravisCI; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jul 25, 2015 in Dev

+

Deploying a Jekyll website to Github Pages using TravisCI

+

Today I switched over this blog to be subject to continous deployment. Whenever I push a new commit (which might be a design change, new blog post like this or some small change), everything is build and automatically pushed to Github Pages, which host this blog.

Let me tell you how I did it!

What?

This blog is served as static pages, meaning only HTML, CSS and Javascript are stored on the servers. There is no application in the background, getting the blog posts out of a database like Wordpress does it. Everything is written in text files which are then used to generate HTML. The software I use is Jekyll. Because it’s only HTML, I can host it almost everywhere and I chose Github Pages because the repository is already on github and it works really great.

There is a way to get around building the static website locally: Github Pages can run Jekyll in the back, generating the pages for you. The drawback is that you can’t use Jekyll plugins, which means it’s limited to the things Jekyll can do out of the box. This is why I build the pages locally and then push the result to Github, letting Github Pages serve the static HTML.

And then there is TravisCI, which is a Continous Integration service “in the cloud”, meaning you don’t have to run anything locally. It executes your tests and can also deploy your code. It’s free for repositories hosted on Github which are public, so it’s a perfect choice for me!

Please note that this is not a beginners tutorial on Jekyll, Github Pages or TravisCI. It’s a description on how to connect those three, so you should either have Jekyll running on Github Pages already or pick the things needed up on the way. I won’t go into detail on that!

Why?

Up until now, I had to run commands locally to build the static pages and then push the stuff to github so it can be served. This meant I had to have Jekyll installed locally, all the right gems in place and needed to remember which commands to execute.

It also meant I couldn’t just clone the repo on some other PC, change some files and be done. I had to either install everything locally or use vagrant with it’s dependencies. That’s way to much overhead for quickly fixing a typo! And I could not use the github web interface to change stuff directly. I could change it, then update my local repository when I get home, build the page and upload it again. What a hassle!

How?

Let’s get started! Let me explain my end result briefly: If I changed something in some file and push the resulting commits, TravisCI generates the whole static page using Jekyll and then pushes these changes to the Github Pages branch (usually gh-pages or master). The whole Jekyll setup lives in a different branch.

For this post, I’ll assume your Github Pages branch is master and the branch the Jekyll setup lives in is source. This is the setup of my blog as well. I am using rake to put together my tasks and my Rakefile looks something like this:

require "jekyll"
+require "tmpdir"
+
+GITHUB_REPONAME = "sgoettschkes/sgoettschkes.github.io"
+GITHUB_REMOTE = "https://#{ENV['GH_TOKEN']}@github.com/#{GITHUB_REPONAME}"
+
+desc "Generate blog files"
+task :generate do
+    Jekyll::Site.new(Jekyll.configuration({
+        "source"      => ".",
+        "destination" => "_site"
+    })).process
+end
+
+desc "Generate and publish blog to gh-pages"
+task :publish => [:generate] do
+    fail "Not on Travis" if "#{ENV['TRAVIS']}" != "true"
+
+    Dir.mktmpdir do |tmp|
+        cp_r "_site/.", tmp
+
+        Dir.chdir tmp
+
+        system "git init"
+        system "git config user.name 'Sebastian Göttschkes'"
+        system "git config user.email 'sebastian.goettschkes@googlemail.com'"
+
+        system "git add ."
+        message = "Site updated at #{Time.now.utc}"
+        system "git commit -m #{message.inspect}"
+        system "git remote add origin #{GITHUB_REMOTE}"
+        system "git push --force origin master"
+    end
+end

What’s going on? Well, we have the generate command which runs a Jekyll build and stores the static website into the _site folder. The publish tasks fails if it isn’t run on TravisCI, creates a temporary dir and copies everything from _site in there. It then makes this temporary dir a github repository, adds all content, commits it and pushes it to github into the master branch. As this would overwrite previous commits, --force is needed. This means the master branch has only one commit at any time. It’s a workaround, but it works pretty well for me. The history is in the source branch, which is all I need.

If you look closely, you’ll see #{ENV['GH_TOKEN']} in the GITHUB_REMOTE variable. It holds the Github token which let you deploy without using a ssh key. To get this variable filled, we need to put it into our .travis.yml. Putting it there in plain text would mean everybody with read access to our repo could push code just like TravisCI does. Which means we need to encrypt it!

But first, let’s look at the .travis.yml file:

language: ruby
+sudo: false
+branches:
+    only:
+        - source
+rvm:
+    - 2.1.5
+install:
+    - gem install --no-rdoc --no-ri bundler jekyll rake
+script:
+    - rake generate
+after_success:
+    - rake publish

This file tells TravisCI to have ruby installed, only build the source branch, use ruby 2.1.5, install bundler, jekyll and rake before running rake generate. If this command returns error code 0, it will run rake publish.

The missing part is the environment variable from above, containing our token. First, install the “travis” gem using gem install travis. Then query the Github API for your token:

curl -u <USERNAME> \
+    -d '{"scopes":["public_repo"],"note":"CI: <REPONAME>"}' \
+    https://api.github.com/authorizations

Don’t forget to replace your username and the reponame! The response should include a key called “oken” which holds your token. The last step is encrypting it and adding it to the .travis.yml file. The travis gem can do this for you. From your repository root, run travis encrypt --add 'GH_TOKEN=<TOKEN>'. The gem adds the encrypted value directly to your file!

After enabling the build process on the TravisCI website, push your changes and see if everything works out!

What’s next?

With TravisCI enabled, you could add linters for Markdown, CSS and javascript as well as a javascript test framework if you’d like. Just add the commands to be run to your .travis.yml file and before pushing your updated version, Travis will look if everything still works.

+
+ +
+ + + + diff --git a/p/deploying-to-pythonanywhere-with-travisci.html b/p/deploying-to-pythonanywhere-with-travisci.html new file mode 100644 index 0000000..93a39f4 --- /dev/null +++ b/p/deploying-to-pythonanywhere-with-travisci.html @@ -0,0 +1,80 @@ + + + + + Deploying to PythonAnywhere with TravisCI; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Sep 13, 2015 in Dev

+

Deploying to PythonAnywhere with TravisCI

+

Many of you might already know it: Soon I’ll be joining the blossom Team on their quest to project management awesomeness. The current development stack of blossom consists of Python and Dart running on Google App Engine. In order to get some experience with both Python and Dart, I started a small side project which is a perfect combination of my two main interests, development and music.

The project

The project is a small webpage. To be honest, it could be done with wordpress or tumblr way easier! But developing it manually I can dive into Python and Dart all while working on something usefull. The alternative would be to setup the webpage with one of the systems named above and then work on some example code to learn Python and Dart, which doesn’t sound fun.

I created a Flask app which has some routes. It reads some data from csv (no database at this point) and then renders some lists out of this data. I added tests for both the data extraction and the routes as well as pylint to check my codestyle.

To filter a few lists I added a little bit of dart code. It’s compiled to js using dart2js. There is also stylus as a css preprocessor. For testing the Dart code I added PhantomJs to the setup but only use it to check one dart method right now.

Continous Integration

As a first step, I set up Continous Integration with TravisCI. The service gets triggered when I push commits to GitHub, checks them out and runs my tests. It’s pretty good to get feedback if something fails. I set TravisCI up to test both Python 2.7 and PyPy as well as Dart stable and unstable. I wasn’t able to get invoke running with Python 3 which is why I don’t test it.

Getting it running was a bit of a hassle because Dart needs to be installed by hand. There is also the need to install PhantomJS 2.0 because TravisCI only has 1.9 installed.

Adding PythonAnywhere to the setup

I chose PythonAnywhere because I liked the idea of having a PaaS which focuses on Python. I could have gone with Heroku or any other PaaS offering Python support. I didn’t do extensive research so your views might vary, but I had the feeling that for Python specifically, PythonAnywhere looked like the best provider from the outside. And I am pretty happy with it right now.

After reviewing the options to deploy code to PythonAnywhere, I opted for rsync. It can be done over ssh to simplify authentication. After generating a ssh key pair and adding the public key to my PythonAnywhere container, I decrypted the private key file with the travis gem and added the decrypted file together with the public key to the repo.

For the deployment to work, I added a “script” deployment and made a deploy script which encrypts the private key, moves both keys to the ~/.ssh folder, executes the rsync command and runs pip install on PythonAnywhere. The last step for the script is to touch the wsgi file so the web worker is restarted.

In the end, everything was pretty straightforward and easy. It took some trying out a few things as there are no guides out there.

In the end, everything was pretty straightforward and easy. It took some trying out a few things as there are no guides out there.

If you wanna dive into the code I am using, have a look at tbalive/website on GitHub. If you need help setting it up, get in touch!

+
+ +
+ + + + diff --git a/p/developing-cllctr.html b/p/developing-cllctr.html new file mode 100644 index 0000000..c2676ef --- /dev/null +++ b/p/developing-cllctr.html @@ -0,0 +1,80 @@ + + + + + Developing cllctr; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jan 18, 2015 in Dev

+

Developing cllctr

+

Over the past two month, I have been building a new weekend project: cllctr. It’s a CD database you can use to store information about your CDs. Say goodbye to excel lists and over-the-top stuff as discogs or collectorz. cllctr is focused on the right amount of data, balancing the time needed to enter new CDs or organize a exisiting collection.

Currently, cllctr is more or less just a few pages where you can add/edit a CD and view a list of all your CDs. With time, the list of features will grow for sure. I have a ton of them in my head and only need some time ;)

In this blog post, I’d like to focus on the technical details of cllctr. How did I build it, why did I choose to do things they way I did and so on.

On the shoulder of giants

The backend is written in PHP and php-fpm is used together with Nginx to serve the webpages. MongoDB takes care of persisting the data and redis is used to store sessions as well as some calculated data like the amount of CDs somebody has.

Backend matters

symfony2 is my web framework of choice. It is using stuff like monolog for logging or swiftmailer to send out emails, which comes in handy because it’s both very flexible yet easy to handle.

To ensure code quality, a collection of tools like phplint, phploc, security-checker, phpmd, pdepend, phpcpd, php_codesniffer and phpunit are used. phing is the build tool in place, making it easy to run all those tools at once. All those tools can be run by executing the test target with phing. It stops if any of those tools reports any problems. I have even written my own little bash script which counts the amount of “TODO” comments and if it’s over 15, fails. This makes it easy to spot problems in the code. It’s not perfect though as every tool has it’s own settings and something fails because of wrong settings. Some tools report data which cannot be interpreted by the software itself (like phploc or pdepend) but must be checked by a developer.

Phing also takes care of building and updating the whole application. It can update the database to ensure indexes and unique fields, clear the cache and has some targets for development (like adding test data). Everything is designed to be easily run by anybody working on the project. As the only developer it’s hard to say if the goal of “runs without any hassle” is really achieved, but it sure feels like it.

Frontend goodies

To make the webpage responsive, I am using the foundation framework. It comes with jQuery, so this is what I am currently using for the (limit amount of) javascript code. I’ll definitely switch to something more elaborate in the future if the frontend code gets more. Foundation is written in Sass which is what I am using for Css as well.

To compile Sass to Css, combine/minify the javascript code and optimize the pictures, grunt is used. It’s way better for these frontend stuff than Phing is and even if I don’t like having two build tools, it works great. Npm is used to install grunt and it’s plugins as well as Sass. To install foundation, bower is used. There is currently no good alternative to this way and although this means there are three package manages involved, it works ok. I’d love to see npm being able to install bower packages as well!

Grunt takes the foundation code installed through bower, runs it through sass to produce css and through uglifyjs to produce the js. For image, I am using the imagemin plugin for grunt which uses many different tools to minify images and place them in the public folder of the webapp.

All things DevOps

To develop cllctr, I am using a vagrant vm. Everything is setup so that checking out the code (which lives on GitHub) and running vagrant up is all you need to do to run the application locally (Of course there must be vagrant and virtualbox installed). The provisioning of the vm is done with Ansible, my new favourite orchestration tool.

If new code is pushed to GitHub, it’s automatically checked out by Codeship. The project is build inside a Codeship vm, the phing test target is run and if everything is green, the project is deployed using rsync.

The deploy targets are servers running at DigitalOcean. There are currently two of them, one for staging and one for production. Both host the webserver, application and databases. Codeship deployes the code depending on the branch it lives in. The develop branch is deployed to the staging server and the master branch is deployed to production.

The DigitalOcean droplets are managed by Ansible as well. A Ansible run configures a staging or production server so he’s ready to receive the code to run the application. The Ansible code lives on GitHub as well and is run by Codeship. The way it works is the same as for the application: If new code is pushed to the develop branch of that repository, an Ansible run with the staging server is done. For the master branch the same run is done, only this time for the production server.

To monitor the servers, I have set up Monitority which pings them every now and then. To check the performance, I am using WebPageTest. The domain name and DNS are from NameCheap.

Mindset

When I started with cllctr, my goal was to build something useful. I also wanted to make sure to use a mixture between stuff I know (symfony, Sass + foundation, jQuery, Vagrant) but also dive into new stuff (Ansible to some extend, npm/Grunt/bower, Continuous Deployment/codeship). This currently makes sure I am moving forward but also feeling challenged.

I started by quickly getting something ready which could be used. It wasn’t pretty, it wasn’t well tested, the Phing setup wasn’t really there yet, and deployment wasn’t possible. Then I stopped adding functionality and got all those ready. I made sure tests where run pretty early (The first commit to the cllctr repo was on November 20th, the first test run on Codeship only 4 days later). For every feature I added I also made some other code nice. I wrote down areas I want to improve and followed up on these plans.

I also did things the easiest way possible. Just implement it so it works. Afterwards, refactor it to adhere to coding standards, best practices and so on. One example where my tests. For the functional tests, I had a phing target which set up the database and one that ran phpunit which depended upon those database setup. All tests then used this one database setup. So tests where not really seperated and I had to work around some things, but it worked. A week ago or so I refactored it in a way that each functional test sets up a database to use, so each test is really isolated now.

Stuff to improve

Of course I am not done. While adding new features and making cllctr nicer, I also want to improve in some areas. The tests clearly need some work. Some things are not tested at all, some are functional tests because I wasn’t able to mock some symfony internals.

Deployment with rsync works, but is not resilient enough. If rsync has a problem in the middle of a deployment, half the code is from the new version and half the code is from the old version. There are tools out there like capistrano/capifony, fabric and others which can be used to deploy code into production. I’ll definitly have a look at them!

There currently are no backups done on the production server. It’s ok for now as the software is alpha and if the server really dies, than so be it. But for the near future backups should be done and restoring of those backups should be tested. I wrote about this topic and a setup I had for another project which automatically downloaded backups from S3, loaded them locally and ran some tests. I’d like to set something like this up once again because it’s really awesome to know your backups are working at the end of a script run.

I’d also like to get monitoring up and running. Especially log file monitoring is not done at all. I get emailed the errors that are happening, but I feel there are better ways especially to see if e.g. 404s are hit more often than usual. Such errors can go unnoticed for very long!

And of course it would be cool to try new things, like adding SSL (never done that before), using more web servers with load balancing, scaling the database servers. But for this to be justified, there need to be a big user base.

Try it!

If you are collecting CDs, please give cllctr a try. Tell me what you like and what features you want to see. I’d love to chat about it and improve cllctr together with the people using it.

+
+ +
+ + + + diff --git a/p/from-phrozn-to-jekyll.html b/p/from-phrozn-to-jekyll.html new file mode 100644 index 0000000..c2bcc66 --- /dev/null +++ b/p/from-phrozn-to-jekyll.html @@ -0,0 +1,80 @@ + + + + + From Phrozn to Jekyll; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on May 23, 2014 in Blogging

+

From Phrozn to Jekyll

+

As you might know by now, I like static side generators. They combine the flexibility of a templating system with the easy deployment of static pages. Creating pages feels like working in a programming language and deploying the page in the end is as easy as putting some html files on an ftp (which you shouldn’t do, of course). There is no need for a complicated setup or deployment process because it’s just some HTML served from nginx (or any other webserver). You can put the files on Amazon S3, your own host or github pages. And as it’s only static files being served, performance is as good as it gets.

Jekyll and Hyde

I discovered Jekyll a while ago and gave it a test run. It worked very good and I was impressed. As I said, I really liked the HTML output which can be put almost everywhere. There was one major problem for me at least: Deploying to github pages was a pain if you wanted to use plugins, because github runs Jekyll with the safe option, which means that no local plugins are used. This is understandable because plugins would make it possible to run any kind of ruby code on the github servers.

To make it work, the “default” solution back then was to put the Jekyll source files into a branch like source. There you would generate the HTML into the _site directory and have a post-commit hook, which took the changeset of each commit, extract the objects which changed in the _site directory and create a new commit in the master branch with these. It was a little hacky, but it worked most of the time.

I felt, however, that this was a bit complicated and made collaboration harder than needed. First of all, the source branch had both the Jekyll source files as well as the generated HTML in source control. I’m not a huge fan of this because it means you can get in pretty weird situations where you change a source file but not run Jekyll, meaning the HTML stays the same. It also makes it easy for collaborators to change the HTML directly, commit it and have it overwritten the next time somebody else uses Jekyll to generate the HTML.

The hack itself felt wrong as well. First and foremost, not having the post commit hook in place messes everything up. Pull requests are impossible to manage. A change in the source branch which doesn’t change the output (settings or changes in a draft/unpublished page) creates an empty commit in the master branch. All this can be managed somehow, but it seems like to much hassle.

Enter Phrozn

Looking for a static site generator for ViennaPHP, it made sense to look in the PHP world, It’s a PHP Usergroup after all, so using something from the PHP ecosystem looked like the right choice. I found Phrozn and started to use it soon after.

The one thing I loved immediatly was that Phrozn stores the source files in a .phrozn directory and puts the generated HTML in the root directoy, which means you don’t need any workarounds for github pages. Just commit, push to the github repo and you’re done. Awesome!

The most basics tasks worked well with Phrozn. Make a layout, make a static page, add some CSS with Sass and some javascript. Very cool! Problems started when I tried realizing this blog. The documentation is mostly non-existing or outdated. Generating a index page with the last posts, which is easy in Jekyll, seemed undoable.

Looking for solutions I discovered that the last commit to the Phrozn github repository is 8 month old and there are issues unsolved for a year and more. The documentation on plugins is outdated for over 2 years as far as I understand.

Don’t get me wrong: This is open source and I am not complaining that somebody decided to use his spare time on something else than the project I need right now. I fully understand that. But I won’t use my time to learn a tool which is not activly developer anymore. I also don’t have time to develop it myself. And that’s why I decided to abandon Phrozn and migrate to something else.

Back to square one

I looked around once more and stumbled upon StaticGen, a page listing many static site generators. Jekyll is leading the list in any way possible. It is the most popular one, has a great documentation and many supporters. There is a good plugin system and it just works.

Everything I liked about Jekyll the first time came back when I took it for a test drive to see what changed. The only thing bugging me back then was the deployment to github pages, so I tried to find a solution that does not suck as much. I found a blog post by @zapparov showing a Rakefile which publishes a Jekyll page to github.

The idea is to make a temporary directory, copy the _site dir there, initialize a git repository, commit and then do a git push --force to the master or gh-pages branch. At first, that seemed strange. But the more I think about it, the more I like it. Ignoring the _site directory with git means the source branch only contains the Jekyll source files. The master branch has no history and only a generic commit message, but then again ot is only there for github pages. The history can be seen in the source branch. Contributers can do a pull request against this branch. It can be merged locally and is put into production by executing the rake task. It’s also pretty clear that in order to change the page, you need to modify the Jekyll source.

The idea to use a Rakefile has some additional benefits. It got me thinking what else I can automate. I got an idea to send an update.xml to Google everytime something on the page changes using a Rakefile. Maybe there is a plugin - If not, I might write one.

Lessons learned

The most troubling thing for me was to discover that Phrozn isn’t developed anymore. I’ll make sure to check the state of development and if there is a community behind the project if it looks like becoming a big part of my setup. It also showed me once more that no software does 100% what you need. There is (almost) always a trade-off - Choose wisely.

+
+ +
+ + + + diff --git a/p/google-app-engine-remote-api-within-ipython-notebooks.html b/p/google-app-engine-remote-api-within-ipython-notebooks.html new file mode 100644 index 0000000..3009df5 --- /dev/null +++ b/p/google-app-engine-remote-api-within-ipython-notebooks.html @@ -0,0 +1,86 @@ + + + + + Google App Engine Remote API within iPython Notebooks; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Dec 11, 2015 in Dev

+

Google App Engine Remote API within iPython Notebooks

+

At blossom, we are running on Google App Engine (GAE). It’s nice to stand on the shoulder of giants and don’t have to worry about servers at all. GAE takes care of scaling up and down for us, handles our database as well as storage and gives us great insight into our production environment.

One thing I don’t like about GAE is that in order to have a look at the data inside our production database, you’ll either use the web interface or a REPL (which is called remote_api_shell). The web interface just doesn’t cut it if you want to look at different tables, compare data and do things like “Give me all projects and tasks for this set of users”. With the remote_api_shell, you can do that but it’s fairly limited in terms of repeating things and sharing your results.

Python in your browser

For learning Python and testing solutions, I am usually running an iPython kernel within Jupyter. If you don’t know about the Jupyter project yet, go check it out! It’s a server running on your localhost which serves a web frontend where you can create so-called notebooks and write code in various languages (depending which Jupyter kernels you have installed) right inside your browser. The code is executed through the server on your localhost, so it’s the real deal and not some Javascript library with an incomplete feature-set.

With a Juypter server running and the iPython kernel installed, I can set up new notebooks to play around with e.g. RegEx. It’s also fairly easy to do some tutorial or see how, for example, generators work. Notebooks get stored on my machine, meaning I can pick up where I left whenever I want. Changing code, re-executing it and copying it is also super easy. And if I want to share the notebook, I can export it to HTML, markdown and even pdf.

Combining the GAE Remote API with iPython notebooks

It became clear to me pretty fast that by using an iPython notebook to connect to the GAE Remote API and then working with objects like I do in the code could make things much easier. Sadly, I wasn’t able to pull it off as I hit more than one wall trying to get the environment set up the right way. With some luck, I found a post by Andrey where he stated that he got this setup working. I shot him an email and he was very helpful and we got two different solutions working within a few emails.

It works pretty straight forward: When the iPython kernel boots, it looks for files to execute in a specific startup folder at ~/.ipython/default_profile/startup. Putting a startup.py file there which provides a function to connect to the GAE Remote API is the only thing needed:

This makes it possible that within a notebook, you call the function connect_to_gae() and you are able to work with your objects or GQL just as you would inside your project:

connect_to_gae()
+
+from models import User
+User.query().get()
+
+from google.appengine.ext import ndb
+ndb.gql("SELECT * FROM Project where __key__ = KEY('Project', 'somekey')").fetch()

For details about the implementation, have a look at the GIST. It’s pretty weird in the way that you have to add and remove stuff from the sys.path before being able to actually call the functions that establish the connection. By wrapping all this into a function, I make sure it’s only invoked when needed. Other notebooks that are not intended to work with the GAE Remote API won’t work any different!

The startup.py file for blossom has a few additional lines which include our project in the path so I can import our models and work with them easily. You can also add common imports there or provide helper functions you need a lot in your notebooks.

With this setup, I was able to do some analysis on our database, got it formatted nicely and shared the HTML with my co-workers with ease. And by providing them the notebook itself, they can modify my work and put their own on top, making collaboration totally easy!

+
+ +
+ + + + diff --git a/p/hello-world.html b/p/hello-world.html new file mode 100644 index 0000000..e141cad --- /dev/null +++ b/p/hello-world.html @@ -0,0 +1,80 @@ + + + + + Hello World!; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on May 16, 2014 in General

+

Hello World!

+

I wanted to do this for a long time. Write a “Hello World” on a new blog. So here it is. Awesome! But this is not a normal “Hello world” trying to get my feet wet, this is a post on why one should avoid blogging plattforms like Blogspot, where my blog lived up until now.

Quick wins

When I started blogging on Blogger, I did it out of convenience. Nothing to set up, a little configuration and it just worked. Integration with Google Services worked, I could set up IFTTT to send a tweet when a new blog post was released and I it was running on stable Google Servers.

Of course there were issues. I tried to integrate Google AdSense, but I couldn’t sign up (and can’t today). The form within Blogger for signing up your blog just doesn’t work. The rich text editor is a mess and I rant everytime I try to work with it. The traffic shown in Blogger is off by 100% sometimes from what Google Analytics reports. The theming works, but is complex and some stuff just doesn’t work.

The long tail

Fed up with the restrictions and mostly because I enjoyed working with Ghost so much, I realized I need to quit Blogger. And that’s were the problems begin. Exporting of old blog posts might be possible, but it’s a mess. As I used the domain Blogger gave me, I cannot redirect users, which sucks.

This really made me think. How am I going to process? Shall I just continue using Blogger. How am I moving users from my old blog to my new if I set up something new? Will I be able to move Google Traffic from my old blog onto my new?

All this questions wouldn’t be an issue if I had made a different decision back when starting my blog. Setting up a custom domain is possible with Blogger and I would be able to change the DNS and run any other blog software I like or any other service. I could set up custom redirects for blog posts and move all the traffic away from Blogger.

Thinking about freedom

This experience made me think about freedom. Some services don’t lock you in by design, like Dropbox. Uninstalling the dropbox service on your pc leaves your files where they are now and syncing stops - You can take your business elsewhere. On the other hand, if your workflows are designed around Dropbox, moving away gets harder. That’s not an issue with Dropbox. They enable you to do certain thinks which in turn may make it hard to quit Dropbox.

It’s the same with Blogger. They are offering a free service and nobody forces me to start or continue using it. I was the one putting blog posts on there and driving traffic to the page. I was the one going with the free domain. I could easily rant about how aweful they are for making exports of blog posts that hard or for not being able to reroute my subdomain. But I could have checked this when I signed up. This is nothing one can expect from a free service.

Fool me once…

I made that mistake and I learned from it. I am running my own blog now. It’s using a static site generator named Phrozn, who creates HTML from markdown files. The page is hostet on github pages. I am using my own domain sgoettschkes.me. This makes it easy for me to move away from github or Phrozn anytime I want. It also let me control the whole setup. And, writing in Markdown format is just fun!

+
+ +
+ + + + diff --git a/p/idempotent-version-updates-with-ansible.html b/p/idempotent-version-updates-with-ansible.html new file mode 100644 index 0000000..f163f6a --- /dev/null +++ b/p/idempotent-version-updates-with-ansible.html @@ -0,0 +1,94 @@ + + + + + Idempotent version updates with Ansible; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Feb 29, 2016 in Dev

+

Idempotent version updates with Ansible

+

If you are a seasoned Vagrant user, you know the problem around provisioning. If some software version changes, everybody needs to provision his or her machine again. Otherwise, things will fail eventually. You’ll also run into problems if running the provisioning twice fails for some reason (e.g. because a file already exists somewhere).

Idempotent setup

If you are setting up your provisioning, the first thing you need to make sure is that you can run the setup steps more than once and it works without manual steps in between. Nothing is more frustrating than running vagrant provision and being left with errors because something is already installed or a file is already at a specific place.

This is easy to do, even with a basic shell provisioner. You can check for the existence of files and test if some software is already installed. It’s also pretty easy to test: Just run the provisioning again and see if it works. If not, add appropriate tests and not run the offending commands.

Updating vs. optimizing

Getting the provisioning step to update software to the correct version is more tricky. It’s easy with package managers like composer for PHP or pip for Python where you have a file containing all dependencies and their versions and the package manager takes care of the rest. If you need to download, compile and install a library, you are faced with three very different options: You can do all the steps every time when provisioning, which takes time even though most of the time nothing changed. You can also install it once and then forget about it, which means nothing happens if a new version for a software is available. The third thing is figuring out the current version as well as the target version and only run the steps for installing it if they don’t match.

With Ansible, the Vagrant provisioner of choice for me, all three ways are possible. It’s important to be clear which one you are choosing so you know what drawbacks your solution might have.

To run some steps every time, give them no restriction like creates for commands and Ansible will do as told. If you are using the command or shell module, Ansible will run it every time. If your process is downloading some tar or zip and extracting it, maybe running a setup command as well, Ansible will download it every time and you’ll get new versions available under the url.

If you wanna run the whole process only once , use creates or similar instructions. Ansible will check if the file or directory is there and if it is, skip the step. As an example, let’s say you download some tar and extract it somewhere. The unarchive module takes a parameter called creates. If you set it to the extracted path, the command will do nothing if that folder is already there. As Ansible can’t possible figure out if the version inside the tar is the same as the existing one, it won’t unarchive it even though the version changed.

Conditionals

If you want to run a command or a set of commands only if the installed version of a software differs from the target version, things are getting interesting. Let me walk you through that scenario. As an example, I’ll use the Dart SDK.

First, we need a command to get the current version installed. For Dart, that’s reading the version file which contains only the version string. Other tools might make this more complicated, e.g. the Google Cloud SDK, which prints a lot of information in addition to the version when running gcloud --version. I usually use some command line magic like sed or cut to extract the part I need. Using the shell module from ansible, I pipe them together to end up with the version being the stdout for that command. We need to use register to put the output of that task into a variable. The whole task could look like this:

- name: Read the dart version
+  shell: cat /usr/local/lib/dart-sdk/version
+  register: current_dart_version
+  ignore_errors: True
+  changed_when: dart_version != current_dart_version.stdout

As you can see, I ignore errors because the shell command might fail if dart is not installed. I use changed_when because I like a clean output from Ansible. You can also see a variable named dart_version which I did not mention yet. It’s simply a variable keeping the target dart version.

Next, let us download the Dart SDK if needed:

- name: download dart sdk
+  get_url:
+    dest=/tmp/dartsdk.zip
+    force=yes
+    url=https://storage.googleapis.com/dart-archive/channels/stable/release/{{ dart_version }}/sdk/dartsdk-linux-x64-release.zip
+  when: dart_version != current_dart_version.stdout

Again, we use the dart_version as a target both in the url and for the when clause which compares the stdout from the above command with our target. The last step is to extract the zip:

- name: extract dart sdk
+  unarchive:
+    copy=no
+    dest=/usr/local/lib
+    src=/tmp/dartsdk.zip
+  when: dart_version != current_dart_version.stdout

The same pattern repeats here as well. We only run the command if the target version is different from the current one. You can use this pattern for all kinds of installation and steps performed after the installation is done.

Getting fail-safe

Even though this seems very much all you can want from a provisioning step, if you have to take care of real servers in production, you might want to be even more cautious. In the example above, if some files are not present in the new SDK, they are not deleted as the extract commands does not take care of this. We could work around this by first deleting the SDK.

In production, it might be valuable to not just replace a version but have two versions installed and switch a symlink or something to change over. This way, it’s not possible that a process has access to the software in an unpredictable state.

One tip regarding the version extraction: There are different ways to read the version! You might be able to read the version from some version-file or run the command with a COMMAND --version parameter. Look for different ways and see if some outputs only the exact version. And if you have to, using some regex with sed might not be the cleanest way but it does the job.

+
+ +
+ + + + diff --git a/p/jekyll-update-your-sitemap-automatically-with-rake.html b/p/jekyll-update-your-sitemap-automatically-with-rake.html new file mode 100644 index 0000000..e6274b2 --- /dev/null +++ b/p/jekyll-update-your-sitemap-automatically-with-rake.html @@ -0,0 +1,115 @@ + + + + + Jekyll: Update your sitemap automatically with rake; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jun 28, 2014 in Dev

+

Jekyll: Update your sitemap automatically with rake

+

If you write your blog to also be found through Google, you may have a sitemap which makes it easy for Google and Bing to crawl your page. It might be good idea to inform both if this sitemap changes so they can send their crawlers your way and update their index with the great stuff you just put on their. This can be done by doing a GET request and passing the url to your sitemap as a parameter. Easy as pie, right?

But my page is static

If you are running a wordpress blog or some other dynamic blogging engine, there is a chance that sending this request is either build in or available as a plugin. If not, you can always develop the parts yourself. But what if you are using Jekyll or another static blogging engine which doesn’t execute any code on the server? Well, you can push the html to the webserver and then do a ping to google from your own laptop. That works with curl or wget or whatever you like.

To prevent you from forgetting to run the request and to always remember what the syntax is, you can always write a rake task which publishes your page and then pings Google and Bing. Let’s do that!

Start with something easy

A short disclaimer before we begin: I copied most of the code from a blog post I cannot seem to find anymore and used it, so it’s not my property. If you know who actually wrote this code, please tell me so I can give credit!

First, let’s tell Jekyll to generate the static files we need:

desc "Generate blog files"
+task :generate do
+    Jekyll::Site.new(Jekyll.configuration({
+        "source"      => ".",
+        "destination" => "_site"
+    })).process
+end

That was easy. Running rake generate will now do the same as jekyll build but we can reuse it when we push our code to github (or any other git repository we have write access to). I am using github pages as an example where you put the HTML code into the master branch while the source for Jekyll lies within the source branch. So rake is creating a new repo within the _site directory, adding and commiting all files and then doing a psuh force to github:

desc "Generate and publish blog to gh-pages"
+task :publish => [:generate] do
+    Dir.mktmpdir do |tmp|
+        cp_r "_site/.", tmp
+
+        pwd = Dir.pwd
+        Dir.chdir tmp
+
+        system "git init"
+        system "git add ."
+        message = "Site updated at #{Time.now.utc}"
+        system "git commit -m #{message.inspect}"
+        system "git remote add origin git@github.com:#{GITHUB_REPONAME}.git"
+        system "git push origin master --force"
+
+        Dir.chdir pwd
+    end
+end

You can easily exchange that part for “just” a push to a remote repository or an rsync call to push the files to a remote location.

Add some magic

And now let’s add a rake task to ping Google and Bing about our new sitemap:

desc "Push sitemap to Google and Bing"
+task :ping do
+    urls = [
+        "http://www.google.com/webmasters/tools/ping?sitemap=#{SITEMAP_PATH}",
+        "http://www.bing.com/webmaster/ping.aspx?siteMap=#{SITEMAP_PATH}"]
+    urls.each do |url|
+        uri = URI.parse(url)
+        req = Net::HTTP::Get.new(uri.to_s)
+        res = Net::HTTP.start(uri.host, uri.port) { |http|
+            http.request(req)
+        }
+    end
+end

The last part is calling the ping rake task from within the generate task which can be done with Rake::Task["ping"].invoke. Now, whenever calling rake publish, it generates the html, pushes it to github and pings Google and Bing. To see the end result, check out my Rakefile.

Success!

+
+ +
+ + + + diff --git a/p/join-us-at-viennaphp.html b/p/join-us-at-viennaphp.html new file mode 100644 index 0000000..7c54775 --- /dev/null +++ b/p/join-us-at-viennaphp.html @@ -0,0 +1,80 @@ + + + + + Join us at ViennaPHP; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Aug 01, 2014 in Community

+

Join us at ViennaPHP

+

If you are a PHP developer who lives in Vienna, you might have heared of ViennaPHP. It’s a local usergroup organized by Stefan Hupe and myself to bring together the PHP community.

What we do

As you can see on Meetup, we are organizing a monthly gathering of software developers where we have talks (usually 2-3) by members of the community and well-known speakers from within the community and sometimes even from other countries (like David Mytton from ServerDensity or Freerich Bäthge from Sensiolabs Germany). We start with snacks, then have one or two full talks and a mini-talk with breaks to socialize. Afterwards we stay at the location a little longer to discuss the topics just presented or other interesting subjects. It’s not unusual that the last people leave well after 11 p.m.

Thanks to our sponsors, among whom are many local companies as well as Jetbrains, we can offer free drinks and pizza to everyone. So attendees of our local gatherings usually discuss various topics with a slice of pizza in the hand and argue about the talks afterwards while enjoying a beer (or other beverage). The Stockwerk Co-Working space is letting us use their third floor without pay and organize the drinks the pizza, which is awesome! One of the reasons ViennaPHP is this great is because of our sponsors.

Why you should join us

If you are in Vienna, you should definitly join us. Head over to meetup and become a member of our group. You’ll get information about upcoming events and can get in touch with all the other members. But being on meetup is only 10%. The more importing thing is showing up to a gathering! Learning about new topics, discussing with other people and having a good time is done in person, not virtually.

We are also doing many special meetups: We had meetups together with other usergroups to start exchange between all those different framework groups, the ViennaDB database group or the web performance UG. We did a Christmas special where everyone brought something and we enjoyed ourself instead of listening to talks! We are having an open air meetup next week! And there is much more to come. Our goal is to provide usefull information with talks and discussion but also try new things and have a good time doing unusual things.

Help wanted!

If you enjoy the meetup and wanna help, shoot me an email! We are currently two people organizing ViennaPHP and Stefan is pretty busy right now. I could use someone helping me with basic things like scheduling speakers and looking for new ones, organizing sponsors or doing the “PR” (emails, twitter).

See you on 05.08

Of course I hope to see you next Tuesday, 05.08, for our Open Air meetup. It’ll be awesome!

+
+ +
+ + + + diff --git a/p/just-ask.html b/p/just-ask.html new file mode 100644 index 0000000..bd9b2b0 --- /dev/null +++ b/p/just-ask.html @@ -0,0 +1,80 @@ + + + + + Just ask!; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Sep 04, 2015 in Community

+

Just ask!

+

What did you do last time you had a problem? The last time you couldn’t figure something out? Some people quit. Others try to force a solution on their own, applying insane amounts of time towards a problem. Often though, asking questions to the right people is the way to go.

Advice and help are one email away

Many people don’t realize that they are not alone out there. Wether it’s a tricky programming problem, advice on which option to choose or even asking for help in situations where one can’t to it alone.

When I started diving into Python, I shot an email to a few people whom I knew worked with Python and which also worked on web platforms. I asked them for advice on how to start, good resources on my way and so on. I got a ton of feedback and some email threads are still going with me shooting specific questions their way.

Just yesterday I emailed a few people I know are freelancing or have freelanced in the past with some specific questions. I already got good feedback and I am sure there is more to come.

“Networking”

The key, of course, is knowing whom to ask. This depends highly on what your questions are. The better you know people, the more likely you are to get an response of course. So building a network of people is a good idea. And by network I don’t mean attending “Networking events”, hand out business cards and talk about how awesome you are. I mean going to meetups, talking to people, providing advice and help. People who are known to help the community will get help themselve far more often than unknown persons (for obvious reasons).

It’s also important to build a broad network as you don’t know where your road is heading. If you are a developer, don’t only talk to developers who are working in your language. Get to know other languages and connect to people their as well. And don’t only talk to developers either. Try to find sysadmins, project managers, marketing, “business people”. Talk to people from big cooperations, small businesses, startups. And then get out of your profession at all and try to find artists, athletes all the way to zookeepers (you get the idea).

How to ask

You got your network and you got a question or came up with a problem. Now you need to ask. My only advice here is: Be respectful! Not only in how you write the email, but also in whom you write. Ask yourself really hard if this person is the right person. You are wasting somebody elses time, so you should make sure he is the right person. Don’t just shoot and email to 300 people!

I am also writing each email by hand and don’t copy/paste if I write two persons for the same reason. The emails end up looking somewhat the same, but there is always a small personal twist you can add which, in my opinion, makes a huge difference.

Emails should be short and to the point, as people don’t have time to read 2 pages of prose. This means thinking about your question in the first place, trying to answer it yourself. Don’t be that guy that asks everybody for every minor thing! Also try to think if StackOverflow, a Google Group or IRC chat are better suited for your question. Sometimes a personal email is not the right thing.

Cold calling

The last thing I’d like to write about are emails to people you do not yet know. There is nothing wrong with those as long as you stick to the principles above and are ok with not getting a reply. Again, people are busy and they might have other things on their mind.

And of course, if you get an email asking or advice or help, try to answer it!

+
+ +
+ + + + diff --git a/p/my-first-few-days-with-jake.html b/p/my-first-few-days-with-jake.html new file mode 100644 index 0000000..e411798 --- /dev/null +++ b/p/my-first-few-days-with-jake.html @@ -0,0 +1,80 @@ + + + + + My first few days with Jake; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jul 30, 2015 in Health

+

My first few days with Jake

+

As announced on twitter, last week I ordered a batch of Jake. It arrived on Wednesday and I had to try it out the same evening. I’d like to share some thoughts about it and reactions I received. It’s only been a few days, so this is not some extensive review of the product or anyting.

Jake?

Jake is a “meal replacement product”, meaning it has all the ingredients your body needs in the right amount, making it suitable to replace entire meals. It can be compared to Soylent. It’s not a suplement, so it does not contain only some nutrients and minerals, but all your body needs (according to the European Food Safety Authority).

Ordering, Shipping and Preparing

Ordering through the webshop was straightforward. The order arrived 5 days later with a weekend in between, which is totally ok. There were no shipping costs, which is refreshing. Not wondering how much it’ll cost in the end makes it way easier to order (at least for me).

The package contained a shaker and 20 Jake Sport “meals”. Preparing is also pretty easy: 400 ml water, add the powder, shake well, drink. It’s basically a one-minute thing.

“Eating” Jake

Jake comes with Vanilla taste, which is ok for me. I’m not a big fan of Vanilla but I don’t really bother. Jakes tastes ok. It’s a little thick which I guess it has to be in order to fill the stomache to a certain degree. Other than that, there is really nothing to say.

After eating Jake, I certainly feel like I had a full meal. Not the “I’m to full to eat anything”, but the “that was a nice meal” full. I don’t feel hungry anymore and don’t have any side effects.

Yesterday, I replaced dinner with Jake. I was on the road and it came in handy. I noticed that a few hours after my “dinner”, I felt a bit hungry but not like I didn’t had anything to eat (low energy() but rather “I could eat something because my stomach is not full anymore”. It didn’t bother me though, because this happens to me from time to time and I’m used to it.

Reactions

I was really surprised by the reactions towards me trying Jake. I had (and have) no intention of replacing my entire diet with Jake and bought it to try it and have something when not at home/no time to prepare a meal.

The strongest reactions where from my girlfriend and co-workers who were not able to understand why anybody would do this to begin with. They love food and replacing a meal with Jake seems absurd to them. I understand where they come from but as somebody who sees eating as something that needs to be done, I don’t share their point of view.

I also got some people interested, either because they had never heard about Jake (or Soylent) or because they had though about trying it themself and were curious as to what I had to say about it.

Next steps

I’ll use the badge I ordered and then re-evaluate. From the first days, I’ll definitly order more. My plans on only replacing certain meals infrequently still stands, now even more as I cannot see myself relying on Jake alone for a day. I’d definitly miss something.

+
+ +
+ + + + diff --git a/p/my-personal-state-of-clojure.html b/p/my-personal-state-of-clojure.html new file mode 100644 index 0000000..0ffaf1a --- /dev/null +++ b/p/my-personal-state-of-clojure.html @@ -0,0 +1,80 @@ + + + + + My personal state of Clojure; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Oct 23, 2017 in Dev

+

My personal state of Clojure

+

Almost a year ago I wrote my last blog post on this blog. It wasn’t planned, it just happened. I was busy doing other, unrelated things and while I sometimes had something to say, I never took the time to take it to my blog. As with many things which are not top priority, it got lost along the way.

As a comeback, I figured a personal look into Clojure might be interesting for some. Let’s go!

One year with Clojure

We started using Clojure for a new project (green-field, as some might say) at Blossom. We are building the project alongside running Blossom and other projects, which means progress is naturally slow. We also decided to look into Datomic as well, harming progress even further. But while we removed Datomic from our stack again, we keep Clojure and Clojurescript. Getting used to Clojure and Clojurescript took some times as well but now I feel good about the way I write code and I grew to like it a lot. I still got much to learn, but that’s ok as I like learning new things. Beside work, I used Clojure for 3 side projects (a Chatbot, a article-based website and a map-based web app).

Clojure as a language

I have worked with many languages over the years. Clojure feels more mature than many other “small” languages, both the language itself as well as the ecosystem. Leiningen is a great tool for managing dependencies and to buid your project. Built on top of the JVM means that running the application in production is possible everywhere where you’d be able to run Java applications.

After some time to get used to the Clojure way, I am very fast implementing features in Clojure. Having the codebase split up into many small functions which are pure in most cases makes it easy to plug them together in new ways, remove a certain step or add another one. Testing is easy as well, given that Clojure works with data most of the time.

And, before the argument comes up: Sure, just having (pure) functions doesn’t protect you from a messy code base. You can have unreadable, untraceable code in almost any language. But my personal feeling is that by sticking to small functions and trying to keep them pure goes a long way towards good software.

The Clojars repository contains many packages and I haven’t had any problems finding the libraries I needed throughout the year. Having the possibility to add Maven dependencies or Jar files if no Clojure library is available adds to the ecosystem as well. Using Java libraries isn’t as smooth as the ones written in Clojure, but that’s mostly due to the underlying differences in thinking than a shortcoming of Clojure. And even though it sometimes feels weird, it works quiet well.

The community

Clojure has a small, nice community. You can feel the excitment about the language at our local Usergroup as well as when talking about Clojure at conferences and with fellow developers. Many times I had people jumping in to help me with my problems and being nice. I also don’t feel like I am looked down on or anything if I don’t understand a certain concept. In languages like Haskell I always felt a little stupid for not knowing how to solve a certain problem “the functional way”. I didn’t have this feeling in Clojure.

As with any other small language, having a small community has drawbacks. Sometimes, you come across a problem nobody ever had. This is rare in the big languages as almost everything has been done. Sometimes, you are looking around and while it seems others have solved the problem, they didn’t openly discuss it. Again, it happens because of the smaller amount of people around.

Additionally, the opinion of a few does make a lot more noise in a small community. It did show recently as a negative blog post hit Hackernews and reddit and the community started discussing. I don’t think a post like this would hit the Javascript or Python community as hard. With this in mind, it becomes even more crucial to try to discuss in a civil manner than to attack. People are spending their free time so we all can benefit. Even if you don’t like or don’t agree, showing some decency and respect for the work goes a long way.

Getting payed

Beside Blossom, I do some freelancing and while I’d love to use Clojure there as well, I wasn’t able to find any opportunities yet. The market for Clojure developers seems small. I was surprised to see some Clojure job posts on Upwork and have applied there, without much success yet.

In Vienna, there are some companies using Clojure for their products. I am excited for their success stories and the inspiration for other companies to make the bold move and invest in a rather small language. I feel that with the Java interoperability the risks of such a move are minimized.

Should you drop everything else and learn Clojure? I don’t think so. Make sure to experiment with Clojure. If you like it and if you feel you could be more productive, try to incorporate it into existing projects or new ones. But don’t just try Clojure. See if Erlang/Elixir is for you, if you can get more done with another “big” language or if you are still happy with your language of choice.

I don’t see Clojure as a game changer, just as a nicer, cleaner way to express intend. But that’s a lot of what a language should foster, right?

+
+ +
+ + + + diff --git a/p/phantomjs-2-on-wheezy-and-travisci.html b/p/phantomjs-2-on-wheezy-and-travisci.html new file mode 100644 index 0000000..28748c2 --- /dev/null +++ b/p/phantomjs-2-on-wheezy-and-travisci.html @@ -0,0 +1,83 @@ + + + + + PhantomJS 2 on Wheezy and TravisCI; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Aug 21, 2015 in Dev

+

PhantomJS 2 on Wheezy and TravisCI

+

After playing with Dart for a little while, I also looked into testing with Dart and learned that the test library can use PhantomJS to test the code that interacts with the DOM. Being a testing junkie, I wanted to give this a try but learned that I needed PhantomJS 2 to get it working. Beside running it in my VM, I also wanted to have it running on TravisCI. This blog post tells you how to set up both.

Compiling it yourself

I struggled to find a binary version of PhantomJS 2. This version was released on January, but there are problems regarding building the binaries to work on different systems. The contributors are working on a solution. Until then, you have to build PhantomJS 2 yourself.

There is a script for this and the build steps seem rather easy. I ran into problems doing this on my Debian Wheezy VM because of not enough RAM. So I spun up another one, gave it 2 GB of RAM and two processors and ran the script with --jobs 2 which seemed to do the trick.

After 2 hours of obscure output, a shiny executable named phantomjs was there. I put it up to download so everybody running Debian Wheezy can use it. Due to static linking it might not work, but give it a try:

sudo apt-get install -y libicu52 libfontconfig1 libjpeg62 libpng12-0
+sudo wget -O /usr/bin/phantomjs https://copy.com/YqL4Uc9T0PERoApf
+sudo chmod +x /usr/bin/phantomjs

Do it on TravisCI

After running the tests locally, you might wanna run them on your CI server as well. If you are using TravisCI, you might be surprised. Even though TravisCI has PhantomJS preinstalled, it’s the 1.9.8 version.

Put this commands into your install or pre_install section to get PhantomJS working:

curl -s https://packagecloud.io/install/repositories/armando/phantomjs/script.deb.sh | sudo bash
+sudo apt-get install phantomjs=2.0.0

Now the only problem is that the old PhantomJS is in the PATH before the /usr/bin. This means that phantomjs points to the old 1.9.8 version. My workaround for this is running the following command when running the Dart tests: export PATH=/usr/bin:$PATH && pub run test -p phantomjs test/test_app.dart. Maybe there is a nicer way. I though about removing the path to the old phantomjs executable from the PATH but could not find any easy way to do this. I also tried finding any settings for the dart test runner to tell him where to find the PhantomJS executable, but wasn’t successfull.

With PhantomJS running locally and on TravisCI, you can go ahead and write tests for your Dart code. Happy testing!

+
+ +
+ + + + diff --git a/p/phoenix-testing-with-cypress.html b/p/phoenix-testing-with-cypress.html new file mode 100644 index 0000000..b4d9c44 --- /dev/null +++ b/p/phoenix-testing-with-cypress.html @@ -0,0 +1,132 @@ + + + + + Phoenix UI testing with Cypress, Part 1; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Feb 03, 2021 in Dev

+

Phoenix UI testing with Cypress, Part 1

+

I still remember the days I tried to achieve UI testing with Selenium, PhantomJS and various other tools. It was a hassle. It didn’t run on CI because it needed some kind of window manager. It was unstable.

Introducing Cypress

Cypress has solved these issues, hiding the complexity of UI testing and leaving you with the task of writing the tests. It comes with it’s own UI where you can run tests and see what the test does in realtime. Cypress can do screenshots, videos of the tests and more.

In this tutorial, we’ll mostly use cypress run. This command acts like mix test does: Running your tests and displaying the result within your terminal window. I encourage you to check out the other features of Cypress on your own.

Cypress Setup

I assume you have an existing Elixir/Phoenix project you want to start using Cypress with. Your frontend files are located at assets/ while your elixir tests are stored within test/.

To install Cypress, we use npm: cd assets/ && npm install cypress --save-dev. Cypress installs itself and is afterwards available as a command line tool at assets/node_modules/cypress/bin/cypress.

Cypress by default expects all test files and support files to be located at cypress/. I’d argue a much better place for these files is in test/cypress, so this is where we’re going to place them. If you like your Cypress tests to live someplace else, you’ll find this guide helpful to figure out which adjustments you need to make.

Config and support files

Let’s start with the config file cypress.json in your root directory:

{
+  "componentFolder": false,
+  "downloadsFolder": "tmp/cypress/downloads",
+  "fixturesFolder": "test/cypress/fixtures",
+  "integrationFolder": "test/cypress/integration",
+  "pluginsFile": false,
+  "screenshotOnRunFailure": false,
+  "screenshotsFolder": "tmp/cypress/screenshots",
+  "supportFile": false,
+  "testFiles": "**/*.*",
+  "video": false,
+  "videosFolder": "tmp/cypress/videos"
+}

As you can see, we overwrite all folders, either pointing to test/cypress or tmp/cypress (for files to be ignored). We also don’t use support files or plugins and deactivate screenshots and videos.

The first test

Now it’s time to write the first test, a simple request to our homepage. Tests for Cypress are placed in the integrations folder which means creating the file test/cypress/integration/index_spec.js:

describe('Homepage', () => {
+  it('Visit homepage without interaction', () => {
+    cy.visit('http://localhost:4000/')
+  })
+})

You can run this test using the command ./assets/node_modules/cypress/bin/cypress run but it will fail if your Phoenix server does not run. Try it again after starting the server with mix phx.serer in another terminal window.

The All-in-one shell file

We want to run the tests with one command, both locally and on a CI server. I used the shell script suggested by https://www.alanvardy.com/post/phoenix-cypress-tests and modified them a bit. Create a file cypress-run.sh, make it executable (chmod +x cypress-run.sh) and put the following code into it:

 #!/bin/sh
+
+MIX_ENV=cypress mix ecto.reset
+echo "===STARTING PHX SERVER==="
+echo "===IF STARTING CYPRESS FAILS==="
+echo "===RUN npm install cypress --save-dev ==="
+echo "===IN THE assets/ FOLDER==="
+MIX_ENV=cypress mix phx.server &
+pid=$! # Store server pid
+echo "===WAITING FOR PHX SERVER==="
+until $(curl --output /dev/null --silent --head --fail http://localhost:4002); do
+    printf '.'
+    sleep 5
+done
+echo ""
+echo "===PHX SERVER RUNNING==="
+echo "===STARTING CYPRESS==="
+./assets/node_modules/.bin/cypress run
+result=$?
+kill -9 $pid # kill server
+echo "===KILLING PHX SERVER==="
+exit $result

As you might have noticed, the MIX_ENV is set to cypress. To create this env, we need the new configuration file config/cypress.exs:

use Mix.Config
+
+# Configure your database
+config :phonix, Phonix.Repo,
+  username: "postgres",
+  password: "postgres",
+  database: "phonix_cypress",
+  hostname: "localhost",
+  pool_size: 10
+
+config :phonix, PhonixWeb.Endpoint,
+  http: [port: 4002],
+  server: true
+
+# Print only warnings and errors during test
+config :logger, level: :warn

This approach is also copied from https://www.alanvardy.com/post/phoenix-cypress-tests. It’s a great idea to separate the test environment from the ui test environment. As Alan suggests, you could use a tool like ex_check which can run your normal tests and your ui tests in parallel, which is only possible if you use different databases and thus different environments.

We are using a different port in the cypress env (4002), so make sure to adjust your tests accordingly.

Now you can run your UI tests by executing ./cypress-run.sh. This script should run on your CI environment as well as locally. Just make sure to run npm install in your CI run!

What else?

I intend to write a second part, figuring out how to use fixtures or reset the database between tests. I saw a few blog posts on how to do this, utilizing sockets in phoenix to take commands. I don’t really like the approach and might come up with a way to work with the database directly. We’ll see!

+
+ +
+ + + + diff --git a/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html b/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html new file mode 100644 index 0000000..a9b8121 --- /dev/null +++ b/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html @@ -0,0 +1,124 @@ + + + + + Quickstart guide for clojure (compojure), clojurescript (figwheel), garden; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Nov 03, 2017 in Dev

+

Quickstart guide for clojure (compojure), clojurescript (figwheel), garden

+

Setting up new projects is always exciting, but if you have done it a few times, it’s getting old quick. I have set up a few projects in the last time and I believe I have a nice setup going which I’m about to share with everybody interested. There is nothing new in here and if you are a seasoned Clojure developer, you might not learn much. If you are just starting out or have some work done in Clojure but need a working setup or some input on your current setup, you are at the right place.

All my projects live insight Vagrant virtual machines. I’ll be using Clojure with Compojure and Ring, Clojurescript with figwheel and garden. I’m also throwing in the cooper library.

Clojure

All my web Clojure projects include compojure and the lein-ring plugin. Both are mature and work very well, so I didn’t look any further. My project.clj at this point looks like:

(defproject mycompany/myproject "0.1.0"
+  :dependencies [[compojure "1.6.0"]
+                 [org.clojure/clojure "1.8.0"]]
+  :main myproject.core
+  :min-lein-version "2.0.0"
+  :plugins [[lein-ring "0.12.1"]]
+  :ring {:auto-reload? true
+         :handler myproject.core/app
+         :open-browser? false
+         :reload-paths ["src/" "resources/"]}
+  :source-paths ["src/clj"])

The ring setup is important as it allows for auto-recompiling when run through lein ring server. open-browser is there because I run the project inside a vm, so there is no browser to open and instead of remembering to run lein ring server-headless every time, I disable it altogether.

ClojureScript

Adding Clojurescript is just a dependency away, especially if you don’t start with figwheel and other libraries but keep it plain and simple:

(defproject mycompany/myproject "0.1.0"
+  :cljsbuild {:builds [{:source-paths ["src/cljs"]
+                        :compiler {:optimizations :whitespace
+                                   :output-to "resources/public/js/main.js"
+                                   :output-dir "resources/public/js"}}]}
+  :dependencies [[compojure "1.6.0"]
+                 [org.clojure/clojure "1.8.0"]
+                 [org.clojure/clojurescript "1.9.946"]
+                 [ring/ring-core "1.6.2"]
+                 [selmer "1.11.2"]]
+  :main myproject.core
+  :min-lein-version "2.0.0"
+  :plugins [[lein-cljsbuild "1.1.7"]
+            [lein-ring "0.12.1"]]
+  :resource-paths ["resources"]
+  :ring {:auto-reload? true
+         :handler myproject.core/app
+         :open-browser? false
+         :reload-paths ["src/" "resources/"]}
+  :source-paths ["src/clj"])

lein-cljsbuild helps transpiling ClojureScript to Javascript by running lein cljsbuild once or lein cljsbuild auto.

Garden

I usually try to stick with the choices popular within the ecosystem. I have used Sass and Less in the past and was fine with both. Garden is another CSS precompiler, but you write your CC as Clojure data structures, making it easy to integrate it within the ecosystem. There is nothing wrong with using another precompiler for CSS or write plain CSS if the project calls for it.

For garden, you only need the dependency [garden "1.3.3"] as well as the plugin [lein-garden "0.3.0"]. After that, adding the garden config to your project.clj works like this:

  :garden {:builds [{:id "screen"
+                     :source-paths ["src/garden"]
+                     :stylesheet myproject.core/main
+                     :compiler {:output-to "resources/public/css/main.css"
+                                :pretty-print? false}}]}

As you can see, the garden source code goes into src/garden. Within src, there is also clj and cljs to split up the different parts (backend, frontend, CSS).

Figwheel

We glanced over figwheel when setting up ClojureScript. If you would stop right now and would start working on your project, you’d need to wait for the ClojureScript compiler to generate the js on every change, than reload the website, navigate where you left of and look at your changed.

With figwheel, only the part of your ClojureScript app that changed get recompiled and these parts get pushed to the browser directly which in turns exchanges the code parts so you see the changes directly.

With all this going on, figwheel was the first hurdle for me. Adding it was straightforward by adding [lein-figwheel "0.5.14"] to the plugin section of the project.clj. After the, the cljsbuild config needed to be changed:

  :cljsbuild {:builds [{:compiler {:asset-path "js/out"
+                                   :main "myproject.core"
+                                   :optimizations :none
+                                   :output-to "resources/public/js/main.js"
+                                   :output-dir "resources/public/js/out"}
+                        :figwheel {:websocket-host "myproject.local"}
+                        :id "dev"
+                        :source-paths ["src/cljs"]}]}

The websocket-host was needed because of the vm. I run the project through a hostname and not by mapping localhost ports. The second thing needed was the figwheel config itself:

  :figwheel {:css-dirs ["resources/public/css"]
+             :hawk-options {:watcher :polling}
+             :ring-handler myproject.core/app
+             :server-port 3000}

css-dirs is important to have hot code reloading for CSS as well. The hawk-options is needed because of the vm (again), as figwheel does not detect code changes (due to the way Vagrant mounts folders). By adding the ring-handler, the ring server is run when running figwheel, making it easier than running both processes in parallel.

Bonus points: Cooper

The next thing for me was to not have the need to run both lein figwheel and lein garden auto in two different shells. Luckily, there is cooper, which can be used to run many tasks in parallel. Add the lein plugin and a small config, and you are good to go:

  :cooper {"figwheel"  ["lein" "figwheel"]
+           "garden" ["lein" "garden" "auto"]}

After that, lein cooper figwheel garden got you covered.

Follow along

If you want to see this changes in full (without cooper right now), go over to https://github.com/Sgoettschkes/web-clj and step through the commits. You can see every code change in detail much better than I would ever be able to outline in this blog.

I’ll also be adding more (and writing a second blog post) on testing Clojure and ClojureScript as well as adding some example code to the repo. Stay tuned!

I’m also looking for feedback towards my setup, both in terms if libraries I used and the ways I plugged them together. You can either comment here, add an issue to the github repo or find me on social media as well as send me an email.

+
+ +
+ + + + diff --git a/p/running-ansible-devel-on-codeship.html b/p/running-ansible-devel-on-codeship.html new file mode 100644 index 0000000..e888a8b --- /dev/null +++ b/p/running-ansible-devel-on-codeship.html @@ -0,0 +1,80 @@ + + + + + Running Ansible devel (on Codeship); Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Mar 25, 2015 in Dev

+

Running Ansible devel (on Codeship)

+

If you are tasked with managing servers, you might have read about Ansible. If you do not know it, here is a quick intro: With Ansible you can define tasks which should be run on your (remote) hosts and Ansible takes those tasks, opens a ssh connection into your hosts and executes them.

The tool is very flexible, making it possible to set up your remote hosts like you would with chef or puppet but can also be used to set up your Raspberry Pi, your Vagrant machines or your localhost!

I won’t go into detail on how my Ansible playbooks look like. There are many tutorials about this out there.

Working with Ansible

I started by installing Ansible through apt-get, because it’s the easiest thing to do if you are running Debian or a Debian-based distro like Ubuntu. The version shipped is pretty old and I was stuck with Ansible 1.7, but then again I didn’t need most of the new features.

I am using Ansible to install software on my localhost and Raspberry Pi as well as my private server. I also maintain 2 small virtual servers for my weekend project, cllctr.

Going bleeding edge

I had Ansible forked and cloned on my system for a long time because I did a tiny contribution to the docs some time ago. I still used the one from apt because I didn’t really care for the new features. At AnsibleFest 2015, I heard the talks about 2.0 and what happens in the Ansible universe and I wanted to be a part of it.

I decided to run Ansible from devel (the master branch of the official repository).

Ansible is written in python, so if you got python installed, you’ll need pip (the python package manager) and install some packages through pip which Ansible depends upon. Afterwards, cloning the Ansible repository and running the hacking/env-setup bash script is enough to get it working. The env-setup will change your PATH and add some environment variables but it’s temporary, so make sure to put it in your .bashrc or somewhere so it’s executed whenever you start a shell!

Continuous Deployment for your infrastructure

Let’s get back to my weekend project for a moment. I try to use best practice and try new things with cllctr even if they don’t really make sense for a weekend project. The code is deployed using Codeship whenever I push a new commit. Depending on the branch, it’s deployed to a staging or production environment.

I wanted to do the same with my Ansible run because it makes sure my code is always runable and the everbody with write access to the repo can deploy the infrastructure. Defining the environment needed to do an Ansible run and having the steps written down makes it possible to set up his own environment with a detailed and correct manual.

Codeship makes this really easy. Pip is already there, so installing Ansible with pip is a one-command thing and setting up the ansible-playbook command as the deployment command is easy as well. The only thing is the need for Codeship to be able to ssh into the servers, but with the Codeship ssh key for your projects, that’s easily done as well!

Ansible devel at Codeship

For a while, my local Ansible was always up-to-date and Codeship ran the 1.8.4 version available through pip at that time. It was fine as I didn’t use any features not available in 1.8.4 and didn’t hit any bugs.

But a few weeks ago, I had a specific use case and because I could not solve it myself, I asked the mailing list. An hour later Brian Coca opened a pull request to add the functionality I needed. I was stunned. The only problem: I was not able to use it as it was not merged into the devel branch yet.

I decided to use my own fork of Ansible to run locally. This meant that I could apply the PR from Brian on my fork and use it even though the upstream repository didn’t had it merged yet. It worked like a charm and I was able to test the PR and confirm it works as expected.

Now the only problem was using this specific version of Ansible at Codeship. This - again - was easier than expected. Cloning my repo and running it turned out to be a 5 minute change. One upsite to this is that because I use my own fork of Ansible, I have full control over changes done to the devel branch. This means I can pull in upstream changes from time to time, test them locally and push them to my fork.

If you use the Ansible repo directly on a service like Codeship, it might pull in a different version on every run, leaving the possibility of unexpected breaks. If you don’t want to maintain your own fork, you can of course checkout a specific commit SHA or tag and “pin” your Ansible version that way!

Learnings

After getting Ansible running from devel I felt stupid for not doing it earlier. If you like using bleeding edge software, give it a try! Using the same version everywhere is almost a must if you want to avoid failing ansible runs. If you got Co-Workers running ansible as well, bleeding edge might not be the right way.

But if you got the freedom to experiment, do it. I didn’t regret it one bit!

+
+ +
+ + + + diff --git a/p/vagrant-and-tmpfs.html b/p/vagrant-and-tmpfs.html new file mode 100644 index 0000000..018a13e --- /dev/null +++ b/p/vagrant-and-tmpfs.html @@ -0,0 +1,80 @@ + + + + + Vagrant and tmpfs; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Oct 28, 2014 in Dev

+

Vagrant and tmpfs

+

When I am talking about Vagrant at usergroups and conferences, one thing I always mention is that shared folders are slow, especially with Virtualbox. While it’s true for reading to some extend, the biggest bottleneck I experience is writing to the shared folder. This happens a lot with cache and logs in development mode when you use e.g. symfony2. These folders live inside the project dir and depending on your project structure you might not be able to redirect them to some folder inside the vm.

Mounting inside share folders

At the vagrant workshop for ViennaPHP, somebody mentioned to me the possibility to mount shared folders inside the virtual machine as a tmpfs drive. This surprised me because I was under the impression that you cannot mount folders inside a shared folder. Turns out you can, which opens a lot of possibilities.

One is mounting the cache and log dir as a tmpfs drive, bypassing the shared folder and keeping the cache files and logs in RAM. As writing do disk is slower than using the RAM, this should make things faster. Compared to shared folders, it should be huge improvement. The disadvantage of this is that all files will be gone after the machine is powered off and that you can’t access it easily from your host.

“Benchmarking”

In order to test this, I created a small python script which creates 10.000 files with one line of text and deletes them afterwards. I let it run with three targets: a folder inside the vm, a synced folder and a tmpfs folder.

To my surprise, it’s faster to write to a folder inside a vm then to tmpfs. This might be because of the virtualization of the RAM which adds overhead. But, as I expected it’s much faster (about 10 times as fast) to use tmpfs than writing to a shared folder. The exact numbers for me were:

Other ways to improve vagrant performance

Of course there are other ways to speed up your vagrant setup. You can try various things:

And then there is docker, which might take away most of the overhead but adds complexity and doesn’t really isolate development environments. If I find the time to research a bit more, I’ll put a post on docker up as well!

+
+ +
+ + + + diff --git a/p/vagrant-base-boxes.html b/p/vagrant-base-boxes.html new file mode 100644 index 0000000..e03fabd --- /dev/null +++ b/p/vagrant-base-boxes.html @@ -0,0 +1,80 @@ + + + + + Vagrant base boxes; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Jan 27, 2015 in Dev

+

Vagrant base boxes

+

If you are using vagrant on a daily basis, you might already be using something else than the base box suggested by vagrant (which would be the hashicorp/precise32 or hashicorp/precise64). If you are thinking about creating your own base boxes or are interested in the topic, read on. If you have no idea what I am talking about, the vagrant documentation can tell you more about Boxes.

I got to the point where I wanted to use Debian instead of Ubuntu and, looking through Vagrant Cloud (now Atlas) no box did really appeal to me. So I decided to build my own base box on top of the chef/debian-7.4. I am using vagrant to build this base box, meaning I have a Vagrantfile which imports the chef base box, then some shell provisioning to install the absolut minimum I need for every box (packages like vim for example).

This is pretty handy: If chef updates their base box I can update my base box just by destroying it, building it new, packaging it and uploading it to Atlas. If I decide to add some new software, the same workflow applies. Provision it again, package it, upload it.

It also means I don’t have to work with packer, which seems like an amazing tool but is just not what I want to spend my time on right now.

Evolution of a base box

I started my base box pretty simple with one Vagrantfile and some shell provisioners inside. Afterwards, I’d manually package the box and upload it. The result was my first base box, Sgoettschkes/debian7. I didn’t really think about other users and updatet it from time to time, installing upgrades for all installed packages and maybe adding something I wanted to be present in the base box.

When switching from chef to Ansible for provisioning, using local Ansible provisioning rather then the Ansible provisioner from vagrant which needs Ansible present on the host, I also installed Ansible in the base box. My original idea was to remove chef because I don’t use it anymore. Back then I realized there are a few people out there using my base box (it has ~400 downloads to date), which means removing the provisioner people rely upon is not the best idea.

I decided to split the boxes up, providing Sgoettschkes/debian7-ansible which is, as you might have figured, the chef base box + my software selection and Ansible, as well as Sgoettschkes/debian7-chef, which is the same but with chef instead of Ansible. The Sgoettschkes/debian7 box is deprecated now and won’t receive any updates.

Knowing what’s inside

To find out what exactly is going on inside my base boxes, have a look at the git repository at GitHub, Sgoettschkes/va. It contains all three boxes. Looking into debian7-ansible, you’ll see a very basic Vagrantfile and the provision.sh which contains all commands which are executed inside the vm before it’s packaged and shipped.

It’s not perfect and executing shell commands might not be the nicest way, but I really like that it just works. Again, I could try getting a real base box working with packer and maybe I will in the future. But for now, this works exactly like I need it. No need to make the setup more complicated!

If you are using my debian7 box, consider switching to either debian7-ansible or debian7-chef. It should not break anything as the software stack is nearly the same (although not installing chef means ruby is not up-to-date on the ansible base box). If you are not using any of my base boxes yet, you might want to reconsider. As you see, I really take care of maintaining stable base boxes and as they are lightweight, you can use them as a foundation to build your stuff on top.

+
+ +
+ + + + diff --git a/p/what-works-for-me.html b/p/what-works-for-me.html new file mode 100644 index 0000000..0849f6f --- /dev/null +++ b/p/what-works-for-me.html @@ -0,0 +1,80 @@ + + + + + What works for me; Sgoettschkes.me; + + + + + + + + + + + + + + +
+
+
+

Sgoettschkes.me

+

Coding 5 to 9

+
+
+
+ + + +
+ +
+

Written by Sebastian on Sep 20, 2015 in Productivity

+

What works for me

+

I struggle like most people with getting things done. I have read books and a lot of blog posts. Most didn’t work. I have implement some of the things David Allen suggests in “Getting things done”. I am keeping my inbox at zero most days with the tips from Andreas Klinger. But that’s about it. Nothing else ever worked - 5/3/1 tasks for the day, Pomodoro, keeping hand-written task lists, using various apps.

Thinking about “why” rather than “what”

One of these days I sat down and though about why I wanted to be more productive. What I wanted to achieve. I didn’t start with yet another methodology which might or might not work. This is something I am missing from almost every blog post on productivity. People tell other people what to do, but seriously, if you don’t have any reasons for being productive, why would you apply those?

Another thing I am missing from those tips is a reminder that people are different and most tips won’t work for you. So, my next tips might not work for you. Try them anyway ;)

Three steps to productivity

The first thing I do (and actually did for quiet some time) is writing everything down. I have a “Next Actions” Trello board which keeps everything I need to do. Small items (clean up desk) and big projects (my upcoming blog post series on “Developer Workflows by Example” are both in their. I also keep a list of all my current projects. Both help me to get things out of my head and not forget about them. From the day I started it, the times I had to say “Oh, I did forget about this” went down by a lot and the times I said “Yeah, I know and will do it as soon as I have the time”.

The second thing I started is writing a journal every night. It’s rather short and takes about 5 minutes. It’s hand-written and I split everyday in “Body” and “Mind”. I write down how I felt that day, both physically and mentally. It helps me realize what kind of day it was and where I am at. Feelings that go unnoticed come out at night and I can process them. It’s a great tool for me.

The third thing I picked up is writing down the most important 3 tasks for the next day at night. I make sure to balance things and keep the tasks small, making them possible to achieve even if the day goes different than planned. Today is Sunday and my list had “Write three emails”, “Cleanup living room” and “Write blogpost” on it.

Failing

The hardest thing for me is having such a rythm and then missing a beat. Usually, these things happen and I start losing focus. Not this time. As I have the “why” this time, missing a journal entry or not completing an item on my todo list is not that bad. The “why” is still there and didn’t change, so I start again.

And so, I just keep going. Every day, just like that. All three things don’t take up a lot of time and can be done with a minimum of time. The three todos items keep me focused. Of course I’ll achieve a lot more on any given day, but those three are the major ones for the day.

And to keep my motivation up, every once in a while I’ll slip a fun but not that important task into the list as well ;)

+
+ +
+ + + + diff --git a/robots.txt b/robots.txt new file mode 100644 index 0000000..a0af121 --- /dev/null +++ b/robots.txt @@ -0,0 +1,5 @@ +User-agent: * + +Disallow: + +Sitemap: https://sgoettschkes.me/sitemap.xml diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..b174503 --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,124 @@ + + + + + https://sgoettschkes.me/p/from-phrozn-to-jekyll.html + + + + https://sgoettschkes.me/p/deploying-a-jekyll-website-to-github-pages-using-travisci.html + + + + https://sgoettschkes.me/p/google-app-engine-remote-api-within-ipython-notebooks.html + + + + https://sgoettschkes.me/p/phoenix-testing-with-cypress.html + + + + https://sgoettschkes.me/p/idempotent-version-updates-with-ansible.html + + + + https://sgoettschkes.me/p/developing-cllctr.html + + + + https://sgoettschkes.me/p/backups-with-obnam.html + + + + https://sgoettschkes.me/p/phantomjs-2-on-wheezy-and-travisci.html + + + + https://sgoettschkes.me/p/jekyll-update-your-sitemap-automatically-with-rake.html + + + + https://sgoettschkes.me/p/deploying-to-pythonanywhere-with-travisci.html + + + + https://sgoettschkes.me/p/vagrant-base-boxes.html + + + + https://sgoettschkes.me/p/join-us-at-viennaphp.html + + + + https://sgoettschkes.me/p/clojure-0-the-setup.html + + + + https://sgoettschkes.me/p/vagrant-and-tmpfs.html + + + + https://sgoettschkes.me/p/my-personal-state-of-clojure.html + + + + https://sgoettschkes.me/p/running-ansible-devel-on-codeship.html + + + + https://sgoettschkes.me/p/building-onesen-in-public.html + + + + https://sgoettschkes.me/p/just-ask.html + + + + https://sgoettschkes.me/p/hello-world.html + + + + https://sgoettschkes.me/p/clojure-2-getting-used-to.html + + + + https://sgoettschkes.me/p/quickstart-guide-for-clojure-compojure-clojurescript-figwheel-garden.html + + + + https://sgoettschkes.me/p/an-experiment.html + + + + https://sgoettschkes.me/p/dart-vagrant-and-ides.html + + + + https://sgoettschkes.me/p/clojure-1-the-beginning.html + + + + https://sgoettschkes.me/p/my-first-few-days-with-jake.html + + + + https://sgoettschkes.me/p/what-works-for-me.html + + + + https://sgoettschkes.me/about.html + + + + https://sgoettschkes.me/now.html + + + + https://sgoettschkes.me/hashtags.html + + + + https://sgoettschkes.me/index.html + + +