diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..c5ab3c810 --- /dev/null +++ b/404.html @@ -0,0 +1,3063 @@ + + + + + + + + + + + + + + + + + + + UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/assets/UU_logo_color.svg b/assets/UU_logo_color.svg new file mode 100644 index 000000000..2c9c1b6c0 --- /dev/null +++ b/assets/UU_logo_color.svg @@ -0,0 +1,347 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 000000000..1cf13b9f9 Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/javascripts/bundle.88dd0f4e.min.js b/assets/javascripts/bundle.88dd0f4e.min.js new file mode 100644 index 000000000..fb8f31090 --- /dev/null +++ b/assets/javascripts/bundle.88dd0f4e.min.js @@ -0,0 +1,16 @@ +"use strict";(()=>{var Wi=Object.create;var gr=Object.defineProperty;var Di=Object.getOwnPropertyDescriptor;var Vi=Object.getOwnPropertyNames,Vt=Object.getOwnPropertySymbols,Ni=Object.getPrototypeOf,yr=Object.prototype.hasOwnProperty,ao=Object.prototype.propertyIsEnumerable;var io=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,$=(e,t)=>{for(var r in t||(t={}))yr.call(t,r)&&io(e,r,t[r]);if(Vt)for(var r of Vt(t))ao.call(t,r)&&io(e,r,t[r]);return e};var so=(e,t)=>{var r={};for(var o in e)yr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Vt)for(var o of Vt(e))t.indexOf(o)<0&&ao.call(e,o)&&(r[o]=e[o]);return r};var xr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var zi=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Vi(t))!yr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=Di(t,n))||o.enumerable});return e};var Mt=(e,t,r)=>(r=e!=null?Wi(Ni(e)):{},zi(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var co=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var lo=xr((Er,po)=>{(function(e,t){typeof Er=="object"&&typeof po!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(k){return!!(k&&k!==document&&k.nodeName!=="HTML"&&k.nodeName!=="BODY"&&"classList"in k&&"contains"in k.classList)}function p(k){var ft=k.type,qe=k.tagName;return!!(qe==="INPUT"&&a[ft]&&!k.readOnly||qe==="TEXTAREA"&&!k.readOnly||k.isContentEditable)}function c(k){k.classList.contains("focus-visible")||(k.classList.add("focus-visible"),k.setAttribute("data-focus-visible-added",""))}function l(k){k.hasAttribute("data-focus-visible-added")&&(k.classList.remove("focus-visible"),k.removeAttribute("data-focus-visible-added"))}function f(k){k.metaKey||k.altKey||k.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(k){o=!1}function d(k){s(k.target)&&(o||p(k.target))&&c(k.target)}function y(k){s(k.target)&&(k.target.classList.contains("focus-visible")||k.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(k.target))}function L(k){document.visibilityState==="hidden"&&(n&&(o=!0),X())}function X(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function te(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(k){k.target.nodeName&&k.target.nodeName.toLowerCase()==="html"||(o=!1,te())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",L,!0),X(),r.addEventListener("focus",d,!0),r.addEventListener("blur",y,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var qr=xr((hy,On)=>{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var $a=/["'&<>]/;On.exports=Pa;function Pa(e){var t=""+e,r=$a.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof It=="object"&&typeof Yr=="object"?Yr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof It=="object"?It.ClipboardJS=r():t.ClipboardJS=r()})(It,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ui}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(A){return!1}}var d=function(A){var M=f()(A);return u("cut"),M},y=d;function L(V){var A=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[A?"right":"left"]="-9999px";var F=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(F,"px"),M.setAttribute("readonly",""),M.value=V,M}var X=function(A,M){var F=L(A);M.container.appendChild(F);var D=f()(F);return u("copy"),F.remove(),D},te=function(A){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},F="";return typeof A=="string"?F=X(A,M):A instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(A==null?void 0:A.type)?F=X(A.value,M):(F=f()(A),u("copy")),F},J=te;function k(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?k=function(M){return typeof M}:k=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},k(V)}var ft=function(){var A=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=A.action,F=M===void 0?"copy":M,D=A.container,Y=A.target,$e=A.text;if(F!=="copy"&&F!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&k(Y)==="object"&&Y.nodeType===1){if(F==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(F==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if($e)return J($e,{container:D});if(Y)return F==="cut"?y(Y):J(Y,{container:D})},qe=ft;function Fe(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Fe=function(M){return typeof M}:Fe=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Fe(V)}function ki(V,A){if(!(V instanceof A))throw new TypeError("Cannot call a class as a function")}function no(V,A){for(var M=0;M0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Fe(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function($e){return Y.onClick($e)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,$e=this.action(Y)||"copy",Dt=qe({action:$e,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Dt?"success":"error",{action:$e,text:Dt,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return y(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,$e=!!document.queryCommandSupported;return Y.forEach(function(Dt){$e=$e&&!!document.queryCommandSupported(Dt)}),$e}}]),M}(s()),Ui=Fi},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,d,y){var L=c.apply(this,arguments);return l.addEventListener(u,L,y),{destroy:function(){l.removeEventListener(u,L,y)}}}function p(l,f,u,d,y){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(L){return s(L,f,u,d,y)}))}function c(l,f,u,d){return function(y){y.delegateTarget=a(y.target,f),y.delegateTarget&&d.call(l,y)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,d,y){if(!u&&!d&&!y)throw new Error("Missing required arguments");if(!a.string(d))throw new TypeError("Second argument must be a String");if(!a.fn(y))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,d,y);if(a.nodeList(u))return l(u,d,y);if(a.string(u))return f(u,d,y);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,d,y){return u.addEventListener(d,y),{destroy:function(){u.removeEventListener(d,y)}}}function l(u,d,y){return Array.prototype.forEach.call(u,function(L){L.addEventListener(d,y)}),{destroy:function(){Array.prototype.forEach.call(u,function(L){L.removeEventListener(d,y)})}}}function f(u,d,y){return s(document.body,u,d,y)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||p(d,L)})},y&&(n[d]=y(n[d])))}function p(d,y){try{c(o[d](y))}catch(L){u(i[0][3],L)}}function c(d){d.value instanceof nt?Promise.resolve(d.value.v).then(l,f):u(i[0][2],d)}function l(d){p("next",d)}function f(d){p("throw",d)}function u(d,y){d(y),i.shift(),i.length&&p(i[0][0],i[0][1])}}function uo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof he=="function"?he(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function H(e){return typeof e=="function"}function ut(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ut(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function Qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Ue=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=he(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(L){t={error:L}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(H(l))try{l()}catch(L){i=L instanceof zt?L.errors:[L]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=he(f),d=u.next();!d.done;d=u.next()){var y=d.value;try{ho(y)}catch(L){i=i!=null?i:[],L instanceof zt?i=q(q([],N(i)),N(L.errors)):i.push(L)}}}catch(L){o={error:L}}finally{try{d&&!d.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)ho(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&Qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&Qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Ue.EMPTY;function qt(e){return e instanceof Ue||e&&"closed"in e&&H(e.remove)&&H(e.add)&&H(e.unsubscribe)}function ho(e){H(e)?e():e.unsubscribe()}var Pe={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var dt={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Ue(function(){o.currentObservers=null,Qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new j;return r.source=this,r},t.create=function(r,o){return new To(r,o)},t}(j);var To=function(e){oe(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){oe(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var At={now:function(){return(At.delegate||Date).now()},delegate:void 0};var Ct=function(e){oe(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=At);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(gt);var Lo=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(yt);var kr=new Lo(Oo);var Mo=function(e){oe(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=vt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(vt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(gt);var _o=function(e){oe(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(yt);var me=new _o(Mo);var S=new j(function(e){return e.complete()});function Yt(e){return e&&H(e.schedule)}function Hr(e){return e[e.length-1]}function Xe(e){return H(Hr(e))?e.pop():void 0}function ke(e){return Yt(Hr(e))?e.pop():void 0}function Bt(e,t){return typeof Hr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return H(e==null?void 0:e.then)}function Jt(e){return H(e[bt])}function Xt(e){return Symbol.asyncIterator&&H(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Zi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Zi();function tr(e){return H(e==null?void 0:e[er])}function rr(e){return fo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return H(e==null?void 0:e.getReader)}function U(e){if(e instanceof j)return e;if(e!=null){if(Jt(e))return ea(e);if(xt(e))return ta(e);if(Gt(e))return ra(e);if(Xt(e))return Ao(e);if(tr(e))return oa(e);if(or(e))return na(e)}throw Zt(e)}function ea(e){return new j(function(t){var r=e[bt]();if(H(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function ta(e){return new j(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?De(t):Qo(function(){return new ir}))}}function jr(e){return e<=0?function(){return S}:E(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,d=0,y=!1,L=!1,X=function(){f==null||f.unsubscribe(),f=void 0},te=function(){X(),l=u=void 0,y=L=!1},J=function(){var k=l;te(),k==null||k.unsubscribe()};return E(function(k,ft){d++,!L&&!y&&X();var qe=u=u!=null?u:r();ft.add(function(){d--,d===0&&!L&&!y&&(f=Ur(J,p))}),qe.subscribe(ft),!l&&d>0&&(l=new at({next:function(Fe){return qe.next(Fe)},error:function(Fe){L=!0,X(),f=Ur(te,n,Fe),qe.error(Fe)},complete:function(){y=!0,X(),f=Ur(te,a),qe.complete()}}),U(k).subscribe(l))})(c)}}function Ur(e,t){for(var r=[],o=2;oe.next(document)),e}function P(e,t=document){return Array.from(t.querySelectorAll(e))}function R(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Ie(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var wa=O(h(document.body,"focusin"),h(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Ie()||document.body),G(1));function et(e){return wa.pipe(m(t=>e.contains(t)),K())}function $t(e,t){return C(()=>O(h(e,"mouseenter").pipe(m(()=>!0)),h(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Le(+!r*t)):le,Q(e.matches(":hover"))))}function Jo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Jo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Jo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function Tt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),O(h(t,"load"),h(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),_(()=>document.head.removeChild(t)),Te(1))))}var Xo=new g,Ta=C(()=>typeof ResizeObserver=="undefined"?Tt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Xo.next(t)))),v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return Ta.pipe(w(r=>r.observe(t)),v(r=>Xo.pipe(b(o=>o.target===t),_(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function St(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Zo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ve(e){return{x:e.offsetLeft,y:e.offsetTop}}function en(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function tn(e){return O(h(window,"load"),h(window,"resize")).pipe(Me(0,me),m(()=>Ve(e)),Q(Ve(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function Ne(e){return O(h(e,"scroll"),h(window,"scroll"),h(window,"resize")).pipe(Me(0,me),m(()=>pr(e)),Q(pr(e)))}var rn=new g,Sa=C(()=>I(new IntersectionObserver(e=>{for(let t of e)rn.next(t)},{threshold:0}))).pipe(v(e=>O(Ye,I(e)).pipe(_(()=>e.disconnect()))),G(1));function tt(e){return Sa.pipe(w(t=>t.observe(e)),v(t=>rn.pipe(b(({target:r})=>r===e),_(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function on(e,t=16){return Ne(e).pipe(m(({y:r})=>{let o=ce(e),n=St(e);return r>=n.height-o.height-t}),K())}var lr={drawer:R("[data-md-toggle=drawer]"),search:R("[data-md-toggle=search]")};function nn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function ze(e){let t=lr[e];return h(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function Oa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function La(){return O(h(window,"compositionstart").pipe(m(()=>!0)),h(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function an(){let e=h(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:nn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Ie();if(typeof o!="undefined")return!Oa(o,r)}return!0}),pe());return La().pipe(v(t=>t?S:e))}function ye(){return new URL(location.href)}function lt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function sn(){return new g}function cn(){return location.hash.slice(1)}function pn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Ma(e){return O(h(window,"hashchange"),e).pipe(m(cn),Q(cn()),b(t=>t.length>0),G(1))}function ln(e){return Ma(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function Pt(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function mn(){let e=matchMedia("print");return O(h(window,"beforeprint").pipe(m(()=>!0)),h(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():S))}function zr(e,t){return new j(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function je(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function fn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function un(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function dn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function hn(){return O(h(window,"scroll",{passive:!0}),h(window,"resize",{passive:!0})).pipe(m(dn),Q(dn()))}function bn(){return{width:innerWidth,height:innerHeight}}function vn(){return h(window,"resize",{passive:!0}).pipe(m(bn),Q(bn()))}function gn(){return z([hn(),vn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(ee("size")),n=z([o,r]).pipe(m(()=>Ve(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function _a(e){return h(e,"message",t=>t.data)}function Aa(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function yn(e,t=new Worker(e)){let r=_a(t),o=Aa(t),n=new g;n.subscribe(o);let i=o.pipe(Z(),ie(!0));return n.pipe(Z(),Re(r.pipe(W(i))),pe())}var Ca=R("#__config"),Ot=JSON.parse(Ca.textContent);Ot.base=`${new URL(Ot.base,ye())}`;function xe(){return Ot}function B(e){return Ot.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?Ot.translations[e].replace("#",t.toString()):Ot.translations[e]}function Se(e,t=document){return R(`[data-md-component=${e}]`,t)}function ae(e,t=document){return P(`[data-md-component=${e}]`,t)}function ka(e){let t=R(".md-typeset > :first-child",e);return h(t,"click",{once:!0}).pipe(m(()=>R(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function xn(e){if(!B("announce.dismiss")||!e.childElementCount)return S;if(!e.hidden){let t=R(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),ka(e).pipe(w(r=>t.next(r)),_(()=>t.complete()),m(r=>$({ref:e},r)))})}function Ha(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function En(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Ha(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))}function Rt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function wn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function Tn(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Rt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function Sn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}var Ln=Mt(qr());function Qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,(0,Ln.default)(c))," "],[]).slice(0,-1),i=xe(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=xe();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&x("nav",{class:"md-tags"},e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)})),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Mn(e){let t=e[0].score,r=[...e],o=xe(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreQr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>Qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function _n(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Kr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function An(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ra(e){var o;let t=xe(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Cn(e,t){var o;let r=xe();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ra)))}var Ia=0;function ja(e){let t=z([et(e),$t(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Zo(e)).pipe(ne(Ne),pt(1),He(t),m(()=>en(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function Fa(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ia++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(Z(),ie(!1)).subscribe(a);let s=a.pipe(Ht(c=>Le(+!c*250,kr)),K(),v(c=>c?r:S),w(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>$t(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),re(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),d=u.width/2;if(l.role==="tooltip")return{x:d,y:8+u.height};if(u.y>=f.height/2){let{height:y}=ce(l);return{x:d,y:-16-y}}else return{x:d,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),re(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(R(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),ve(me),re(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ja(e).pipe(w(c=>i.next(c)),_(()=>i.complete()),m(c=>$({ref:e},c)))})}function mt(e,{viewport$:t},r=document.body){return Fa(e,{content$:new j(o=>{let n=e.title,i=wn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Ua(e,t){let r=C(()=>z([tn(e),Ne(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function kn(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(W(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),O(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Me(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),h(n,"click").pipe(W(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),h(n,"mousedown").pipe(W(a),re(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Ie())==null||c.blur()}}),r.pipe(W(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Ua(e,t).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function Wa(e){return e.tagName==="CODE"?P(".c, .c1, .cm",e):[e]}function Da(e){let t=[];for(let r of Wa(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function Hn(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Da(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,Tn(p,i)),s.replaceWith(a.get(p)))}return a.size===0?S:C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=[];for(let[l,f]of a)c.push([R(".md-typeset",f),R(`:scope > li:nth-child(${l})`,e)]);return o.pipe(W(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?Hn(f,u):Hn(u,f)}),O(...[...a].map(([,l])=>kn(l,t,{target$:r}))).pipe(_(()=>s.complete()),pe())})}function $n(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return $n(t)}}function Pn(e,t){return C(()=>{let r=$n(e);return typeof r!="undefined"?fr(r,e,t):S})}var Rn=Mt(Br());var Va=0;function In(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return In(t)}}function Na(e){return ge(e).pipe(m(({width:t})=>({scrollable:St(e).width>t})),ee("scrollable"))}function jn(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(jr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Rn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Va++}`;let l=Sn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(mt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=In(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(W(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:S)))}}return P(":scope > span[id]",e).length&&e.classList.add("md-code__content"),Na(e).pipe(w(c=>n.next(c)),_(()=>n.complete()),m(c=>$({ref:e},c)),Re(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function za(e,{target$:t,print$:r}){let o=!0;return O(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),w(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Fn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),za(e,t).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}var Un=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.flowchartTitleText{fill:var(--md-mermaid-label-fg-color)}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel p,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel p{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}.classDiagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}.statediagramTitleText{fill:var(--md-mermaid-label-fg-color)}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.entityTitleText{fill:var(--md-mermaid-label-fg-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}text:not([class]):last-child{fill:var(--md-mermaid-label-fg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Gr,Qa=0;function Ka(){return typeof mermaid=="undefined"||mermaid instanceof Element?Tt("https://unpkg.com/mermaid@11/dist/mermaid.min.js"):I(void 0)}function Wn(e){return e.classList.remove("mermaid"),Gr||(Gr=Ka().pipe(w(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Un,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Gr.subscribe(()=>co(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Qa++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Gr.pipe(m(()=>({ref:e})))}var Dn=x("table");function Vn(e){return e.replaceWith(Dn),Dn.replaceWith(An(e)),I({ref:e})}function Ya(e){let t=e.find(r=>r.checked)||e[0];return O(...e.map(r=>h(r,"change").pipe(m(()=>R(`label[for="${r.id}"]`))))).pipe(Q(R(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Nn(e,{viewport$:t,target$:r}){let o=R(".tabbed-labels",e),n=P(":scope > input",e),i=Kr("prev");e.append(i);let a=Kr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(Z(),ie(!0));z([s,ge(e),tt(e)]).pipe(W(p),Me(1,me)).subscribe({next([{active:c},l]){let f=Ve(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let d=pr(o);(f.xd.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([Ne(o),ge(o)]).pipe(W(p)).subscribe(([c,l])=>{let f=St(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),O(h(i,"click").pipe(m(()=>-1)),h(a,"click").pipe(m(()=>1))).pipe(W(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(W(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=R(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),h(l.firstElementChild,"click").pipe(W(p),b(f=>!(f.metaKey||f.ctrlKey)),w(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),re(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let y of P("[data-tabs]"))for(let L of P(":scope > input",y)){let X=R(`label[for="${L.id}"]`);if(X!==c&&X.innerText.trim()===f){X.setAttribute("data-md-switching",""),L.click();break}}window.scrollTo({top:e.offsetTop-u});let d=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...d])])}}),s.pipe(W(p)).subscribe(()=>{for(let c of P("audio, video",e))c.pause()}),Ya(n).pipe(w(c=>s.next(c)),_(()=>s.complete()),m(c=>$({ref:e},c)))}).pipe(Ke(se))}function zn(e,{viewport$:t,target$:r,print$:o}){return O(...P(".annotate:not(.highlight)",e).map(n=>Pn(n,{target$:r,print$:o})),...P("pre:not(.mermaid) > code",e).map(n=>jn(n,{target$:r,print$:o})),...P("pre.mermaid",e).map(n=>Wn(n)),...P("table:not([class])",e).map(n=>Vn(n)),...P("details",e).map(n=>Fn(n,{target$:r,print$:o})),...P("[data-tabs]",e).map(n=>Nn(n,{viewport$:t,target$:r})),...P("[title]",e).filter(()=>B("content.tooltips")).map(n=>mt(n,{viewport$:t})))}function Ba(e,{alert$:t}){return t.pipe(v(r=>O(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function qn(e,t){let r=R(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),Ba(e,t).pipe(w(n=>o.next(n)),_(()=>o.complete()),m(n=>$({ref:e},n)))})}var Ga=0;function Ja(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?Ne(o):I({x:0,y:0}),i=O(et(t),$t(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ve(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Qn(e){let t=e.title;if(!t.length)return S;let r=`__tooltip_${Ga++}`,o=Rt(r,"inline"),n=R(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),O(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Me(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(pt(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Ja(o,e).pipe(w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))}).pipe(Ke(se))}function Xa({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Be(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=ze("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Kn(e,t){return C(()=>z([ge(e),Xa(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function Yn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(Z(),ie(!0));o.pipe(ee("active"),He(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue(P("[title]",e)).pipe(b(()=>B("content.tooltips")),ne(a=>Qn(a)));return r.subscribe(o),t.pipe(W(n),m(a=>$({ref:e},a)),Re(i.pipe(W(n))))})}function Za(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),ee("active"))}function Bn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?S:Za(o,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))})}function Gn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),ee("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function es(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(ne(o=>h(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Jn(e){let t=P("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=Pt("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),re(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(ve(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),es(t).pipe(W(n.pipe(Ce(1))),ct(),w(a=>i.next(a)),_(()=>i.complete()),m(a=>$({ref:e},a)))})}function Xn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(w(o=>r.next({value:o})),_(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Jr=Mt(Br());function ts(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Zn({alert$:e}){Jr.default.isSupported()&&new j(t=>{new Jr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||ts(R(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(w(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function ei(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function rs(e,t){let r=new Map;for(let o of P("url",e)){let n=R("loc",o),i=[ei(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of P("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(ei(new URL(s),t))}}return r}function ur(e){return un(new URL("sitemap.xml",e)).pipe(m(t=>rs(t,new URL(e))),de(()=>I(new Map)))}function os(e,t){if(!(e.target instanceof Element))return S;let r=e.target.closest("a");if(r===null)return S;if(r.target||e.metaKey||e.ctrlKey)return S;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):S}function ti(e){let t=new Map;for(let r of P(":scope > *",e.head))t.set(r.outerHTML,r);return t}function ri(e){for(let t of P("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function ns(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=ti(document);for(let[o,n]of ti(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return We(P("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new j(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),S}),Z(),ie(document))}function oi({location$:e,viewport$:t,progress$:r}){let o=xe();if(location.protocol==="file:")return S;let n=ur(o.base);I(document).subscribe(ri);let i=h(document.body,"click").pipe(He(n),v(([p,c])=>os(p,c)),pe()),a=h(window,"popstate").pipe(m(ye),pe());i.pipe(re(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),O(i,a).subscribe(e);let s=e.pipe(ee("pathname"),v(p=>fn(p,{progress$:r}).pipe(de(()=>(lt(p,!0),S)))),v(ri),v(ns),pe());return O(s.pipe(re(e,(p,c)=>c)),s.pipe(v(()=>e),ee("pathname"),v(()=>e),ee("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),w(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",pn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),h(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(ee("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ni=Mt(qr());function ii(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ni.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function jt(e){return e.type===1}function dr(e){return e.type===3}function ai(e,t){let r=yn(e);return O(I(location.protocol!=="file:"),ze("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function si(e){var l;let{selectedVersionSitemap:t,selectedVersionBaseURL:r,currentLocation:o,currentBaseURL:n}=e,i=(l=Xr(n))==null?void 0:l.pathname;if(i===void 0)return;let a=ss(o.pathname,i);if(a===void 0)return;let s=ps(t.keys());if(!t.has(s))return;let p=Xr(a,s);if(!p||!t.has(p.href))return;let c=Xr(a,r);if(c)return c.hash=o.hash,c.search=o.search,c}function Xr(e,t){try{return new URL(e,t)}catch(r){return}}function ss(e,t){if(e.startsWith(t))return e.slice(t.length)}function cs(e,t){let r=Math.min(e.length,t.length),o;for(o=0;oS)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>h(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),re(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?S:(i.preventDefault(),I(new URL(p)))}}return S}),v(i=>ur(i).pipe(m(a=>{var s;return(s=si({selectedVersionSitemap:a,selectedVersionBaseURL:i,currentLocation:ye(),currentBaseURL:t.base}))!=null?s:i})))))).subscribe(n=>lt(n,!0)),z([r,o]).subscribe(([n,i])=>{R(".md-header__topic").appendChild(Cn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ls(e,{worker$:t}){let{searchParams:r}=ye();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),ze("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=ye();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=O(t.pipe(Ae(jt)),h(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function pi(e,{worker$:t}){let r=new g,o=r.pipe(Z(),ie(!0));z([t.pipe(Ae(jt)),r],(i,a)=>a).pipe(ee("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(ee("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),h(e.form,"reset").pipe(W(o)).subscribe(()=>e.focus());let n=R("header [for=__search]");return h(n,"click").subscribe(()=>e.focus()),ls(e,{worker$:t}).pipe(w(i=>r.next(i)),_(()=>r.complete()),m(i=>$({ref:e},i)),G(1))}function li(e,{worker$:t,query$:r}){let o=new g,n=on(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=R(":scope > :first-child",e),s=R(":scope > :last-child",e);ze("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(re(r),Wr(t.pipe(Ae(jt)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(w(()=>s.innerHTML=""),v(({items:l})=>O(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Be(4),Vr(n),v(([f])=>f)))),m(Mn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(ne(l=>{let f=fe("details",l);return typeof f=="undefined"?S:h(f,"toggle").pipe(W(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(w(l=>o.next(l)),_(()=>o.complete()),m(l=>$({ref:e},l)))}function ms(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=ye();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function mi(e,t){let r=new g,o=r.pipe(Z(),ie(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),h(e,"click").pipe(W(o)).subscribe(n=>n.preventDefault()),ms(e,t).pipe(w(n=>r.next(n)),_(()=>r.complete()),m(n=>$({ref:e},n)))}function fi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=O(h(n,"keydown"),h(n,"focus")).pipe(ve(se),m(()=>n.value),K());return o.pipe(He(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(w(s=>o.next(s)),_(()=>o.complete()),m(()=>({ref:e})))}function ui(e,{index$:t,keyboard$:r}){let o=xe();try{let n=ai(o.search,t),i=Se("search-query",e),a=Se("search-result",e);h(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Ie();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of P(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,d])=>d-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...P(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Ie()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=pi(i,{worker$:n});return O(s,li(a,{worker$:n,query$:s})).pipe(Re(...ae("search-share",e).map(p=>mi(p,{query$:s})),...ae("search-suggest",e).map(p=>fi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ye}}function di(e,{index$:t,location$:r}){return z([t,r.pipe(Q(ye()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>ii(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function fs(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Zr(e,o){var n=o,{header$:t}=n,r=so(n,["header$"]);let i=R(".md-sidebar__scrollwrap",e),{y:a}=Ve(i);return C(()=>{let s=new g,p=s.pipe(Z(),ie(!0)),c=s.pipe(Me(0,me));return c.pipe(re(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of P(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2})}}}),ue(P("label[tabindex]",e)).pipe(ne(l=>h(l,"click").pipe(ve(se),m(()=>l),W(p)))).subscribe(l=>{let f=R(`[id="${l.htmlFor}"]`);R(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),fs(e,r).pipe(w(l=>s.next(l)),_(()=>s.complete()),m(l=>$({ref:e},l)))})}function hi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return st(je(`${r}/releases/latest`).pipe(de(()=>S),m(o=>({version:o.tag_name})),De({})),je(r).pipe(de(()=>S),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return je(r).pipe(m(o=>({repositories:o.public_repos})),De({}))}}function bi(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return st(je(`${r}/releases/permalink/latest`).pipe(de(()=>S),m(({tag_name:o})=>({version:o})),De({})),je(r).pipe(de(()=>S),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),De({}))).pipe(m(([o,n])=>$($({},o),n)))}function vi(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return hi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return bi(r,o)}return S}var us;function ds(e){return us||(us=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return S}return vi(e.href).pipe(w(o=>__md_set("__source",o,sessionStorage)))}).pipe(de(()=>S),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function gi(e){let t=R(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(_n(o)),t.classList.add("md-source__repository--active")}),ds(e).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function hs(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),ee("hidden"))}function yi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):hs(e,t)).pipe(w(o=>r.next(o)),_(()=>r.complete()),m(o=>$({ref:e},o)))})}function bs(e,{viewport$:t,header$:r}){let o=new Map,n=P(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(ee("height"),m(({height:s})=>{let p=Se("main"),c=R(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(ee("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let d=f.offsetParent;for(;d;d=d.offsetParent)u+=d.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),He(i),v(([p,c])=>t.pipe(Fr(([l,f],{offset:{y:u},size:d})=>{let y=u+d.height>=Math.floor(s.height);for(;f.length;){let[,L]=f[0];if(L-c=u&&!y)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Be(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(Z(),ie(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=O(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),He(o.pipe(ve(se))),re(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:d}=ce(f);f.scrollTo({top:u-d/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(W(a),ee("offset"),_e(250),Ce(1),W(n.pipe(Ce(1))),ct({delay:250}),re(i)).subscribe(([,{prev:s}])=>{let p=ye(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),bs(e,{viewport$:t,header$:r}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))})}function vs(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Be(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),W(o.pipe(Ce(1))),ie(!0),ct({delay:250}),m(a=>({hidden:a})))}function Ei(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(Z(),ie(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(W(a),ee("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),h(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),vs(e,{viewport$:t,main$:o,target$:n}).pipe(w(s=>i.next(s)),_(()=>i.complete()),m(s=>$({ref:e},s)))}function wi({document$:e,viewport$:t}){e.pipe(v(()=>P(".md-ellipsis")),ne(r=>tt(r).pipe(W(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?mt(n,{viewport$:t}).pipe(W(e.pipe(Ce(1))),_(()=>n.removeAttribute("title"))):S})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>P(".md-status")),ne(r=>mt(r,{viewport$:t}))).subscribe()}function Ti({document$:e,tablet$:t}){e.pipe(v(()=>P(".md-toggle--indeterminate")),w(r=>{r.indeterminate=!0,r.checked=!1}),ne(r=>h(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),re(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function gs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Si({document$:e}){e.pipe(v(()=>P("[data-md-scrollfix]")),w(t=>t.removeAttribute("data-md-scrollfix")),b(gs),ne(t=>h(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function Oi({viewport$:e,tablet$:t}){z([ze("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),re(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function ys(){return location.protocol==="file:"?Tt(`${new URL("search/search_index.js",eo.base)}`).pipe(m(()=>__index),G(1)):je(new URL("search/search_index.json",eo.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Go(),Ut=sn(),Lt=ln(Ut),to=an(),Oe=gn(),hr=Pt("(min-width: 960px)"),Mi=Pt("(min-width: 1220px)"),_i=mn(),eo=xe(),Ai=document.forms.namedItem("search")?ys():Ye,ro=new g;Zn({alert$:ro});var oo=new g;B("navigation.instant")&&oi({location$:Ut,viewport$:Oe,progress$:oo}).subscribe(ot);var Li;((Li=eo.version)==null?void 0:Li.provider)==="mike"&&ci({document$:ot});O(Ut,Lt).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});to.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&<(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&<(r);break;case"Enter":let o=Ie();o instanceof HTMLLabelElement&&o.click()}});wi({viewport$:Oe,document$:ot});Ti({document$:ot,tablet$:hr});Si({document$:ot});Oi({viewport$:Oe,tablet$:hr});var rt=Kn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Gn(e,{viewport$:Oe,header$:rt})),G(1)),xs=O(...ae("consent").map(e=>En(e,{target$:Lt})),...ae("dialog").map(e=>qn(e,{alert$:ro})),...ae("palette").map(e=>Jn(e)),...ae("progress").map(e=>Xn(e,{progress$:oo})),...ae("search").map(e=>ui(e,{index$:Ai,keyboard$:to})),...ae("source").map(e=>gi(e))),Es=C(()=>O(...ae("announce").map(e=>xn(e)),...ae("content").map(e=>zn(e,{viewport$:Oe,target$:Lt,print$:_i})),...ae("content").map(e=>B("search.highlight")?di(e,{index$:Ai,location$:Ut}):S),...ae("header").map(e=>Yn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("header-title").map(e=>Bn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Mi,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Zr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>yi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>xi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})),...ae("top").map(e=>Ei(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Lt})))),Ci=ot.pipe(v(()=>Es),Re(xs),G(1));Ci.subscribe();window.document$=ot;window.location$=Ut;window.target$=Lt;window.keyboard$=to;window.viewport$=Oe;window.tablet$=hr;window.screen$=Mi;window.print$=_i;window.alert$=ro;window.progress$=oo;window.component$=Ci;})(); +//# sourceMappingURL=bundle.88dd0f4e.min.js.map + diff --git a/assets/javascripts/bundle.88dd0f4e.min.js.map b/assets/javascripts/bundle.88dd0f4e.min.js.map new file mode 100644 index 000000000..dab2a8754 --- /dev/null +++ b/assets/javascripts/bundle.88dd0f4e.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/escape-html/index.js", "node_modules/clipboard/dist/clipboard.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/tslib/tslib.es6.mjs", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/findurl/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/******************************************************************************\nCopyright (c) Microsoft Corporation.\n\nPermission to use, copy, modify, and/or distribute this software for any\npurpose with or without fee is hereby granted.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\nPERFORMANCE OF THIS SOFTWARE.\n***************************************************************************** */\n/* global Reflect, Promise, SuppressedError, Symbol, Iterator */\n\nvar extendStatics = function(d, b) {\n extendStatics = Object.setPrototypeOf ||\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\n return extendStatics(d, b);\n};\n\nexport function __extends(d, b) {\n if (typeof b !== \"function\" && b !== null)\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\n extendStatics(d, b);\n function __() { this.constructor = d; }\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\n}\n\nexport var __assign = function() {\n __assign = Object.assign || function __assign(t) {\n for (var s, i = 1, n = arguments.length; i < n; i++) {\n s = arguments[i];\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\n }\n return t;\n }\n return __assign.apply(this, arguments);\n}\n\nexport function __rest(s, e) {\n var t = {};\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\n t[p] = s[p];\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\n t[p[i]] = s[p[i]];\n }\n return t;\n}\n\nexport function __decorate(decorators, target, key, desc) {\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\n return c > 3 && r && Object.defineProperty(target, key, r), r;\n}\n\nexport function __param(paramIndex, decorator) {\n return function (target, key) { decorator(target, key, paramIndex); }\n}\n\nexport function __esDecorate(ctor, descriptorIn, decorators, contextIn, initializers, extraInitializers) {\n function accept(f) { if (f !== void 0 && typeof f !== \"function\") throw new TypeError(\"Function expected\"); return f; }\n var kind = contextIn.kind, key = kind === \"getter\" ? \"get\" : kind === \"setter\" ? \"set\" : \"value\";\n var target = !descriptorIn && ctor ? contextIn[\"static\"] ? ctor : ctor.prototype : null;\n var descriptor = descriptorIn || (target ? Object.getOwnPropertyDescriptor(target, contextIn.name) : {});\n var _, done = false;\n for (var i = decorators.length - 1; i >= 0; i--) {\n var context = {};\n for (var p in contextIn) context[p] = p === \"access\" ? {} : contextIn[p];\n for (var p in contextIn.access) context.access[p] = contextIn.access[p];\n context.addInitializer = function (f) { if (done) throw new TypeError(\"Cannot add initializers after decoration has completed\"); extraInitializers.push(accept(f || null)); };\n var result = (0, decorators[i])(kind === \"accessor\" ? { get: descriptor.get, set: descriptor.set } : descriptor[key], context);\n if (kind === \"accessor\") {\n if (result === void 0) continue;\n if (result === null || typeof result !== \"object\") throw new TypeError(\"Object expected\");\n if (_ = accept(result.get)) descriptor.get = _;\n if (_ = accept(result.set)) descriptor.set = _;\n if (_ = accept(result.init)) initializers.unshift(_);\n }\n else if (_ = accept(result)) {\n if (kind === \"field\") initializers.unshift(_);\n else descriptor[key] = _;\n }\n }\n if (target) Object.defineProperty(target, contextIn.name, descriptor);\n done = true;\n};\n\nexport function __runInitializers(thisArg, initializers, value) {\n var useValue = arguments.length > 2;\n for (var i = 0; i < initializers.length; i++) {\n value = useValue ? initializers[i].call(thisArg, value) : initializers[i].call(thisArg);\n }\n return useValue ? value : void 0;\n};\n\nexport function __propKey(x) {\n return typeof x === \"symbol\" ? x : \"\".concat(x);\n};\n\nexport function __setFunctionName(f, name, prefix) {\n if (typeof name === \"symbol\") name = name.description ? \"[\".concat(name.description, \"]\") : \"\";\n return Object.defineProperty(f, \"name\", { configurable: true, value: prefix ? \"\".concat(prefix, \" \", name) : name });\n};\n\nexport function __metadata(metadataKey, metadataValue) {\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\n}\n\nexport function __awaiter(thisArg, _arguments, P, generator) {\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\n return new (P || (P = Promise))(function (resolve, reject) {\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\n step((generator = generator.apply(thisArg, _arguments || [])).next());\n });\n}\n\nexport function __generator(thisArg, body) {\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g = Object.create((typeof Iterator === \"function\" ? Iterator : Object).prototype);\n return g.next = verb(0), g[\"throw\"] = verb(1), g[\"return\"] = verb(2), typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\n function verb(n) { return function (v) { return step([n, v]); }; }\n function step(op) {\n if (f) throw new TypeError(\"Generator is already executing.\");\n while (g && (g = 0, op[0] && (_ = 0)), _) try {\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\n if (y = 0, t) op = [op[0] & 2, t.value];\n switch (op[0]) {\n case 0: case 1: t = op; break;\n case 4: _.label++; return { value: op[1], done: false };\n case 5: _.label++; y = op[1]; op = [0]; continue;\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\n default:\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\n if (t[2]) _.ops.pop();\n _.trys.pop(); continue;\n }\n op = body.call(thisArg, _);\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\n }\n}\n\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n var desc = Object.getOwnPropertyDescriptor(m, k);\n if (!desc || (\"get\" in desc ? !m.__esModule : desc.writable || desc.configurable)) {\n desc = { enumerable: true, get: function() { return m[k]; } };\n }\n Object.defineProperty(o, k2, desc);\n}) : (function(o, m, k, k2) {\n if (k2 === undefined) k2 = k;\n o[k2] = m[k];\n});\n\nexport function __exportStar(m, o) {\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\n}\n\nexport function __values(o) {\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\n if (m) return m.call(o);\n if (o && typeof o.length === \"number\") return {\n next: function () {\n if (o && i >= o.length) o = void 0;\n return { value: o && o[i++], done: !o };\n }\n };\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\n}\n\nexport function __read(o, n) {\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\n if (!m) return o;\n var i = m.call(o), r, ar = [], e;\n try {\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\n }\n catch (error) { e = { error: error }; }\n finally {\n try {\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\n }\n finally { if (e) throw e.error; }\n }\n return ar;\n}\n\n/** @deprecated */\nexport function __spread() {\n for (var ar = [], i = 0; i < arguments.length; i++)\n ar = ar.concat(__read(arguments[i]));\n return ar;\n}\n\n/** @deprecated */\nexport function __spreadArrays() {\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\n r[k] = a[j];\n return r;\n}\n\nexport function __spreadArray(to, from, pack) {\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\n if (ar || !(i in from)) {\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\n ar[i] = from[i];\n }\n }\n return to.concat(ar || Array.prototype.slice.call(from));\n}\n\nexport function __await(v) {\n return this instanceof __await ? (this.v = v, this) : new __await(v);\n}\n\nexport function __asyncGenerator(thisArg, _arguments, generator) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\n return i = Object.create((typeof AsyncIterator === \"function\" ? AsyncIterator : Object).prototype), verb(\"next\"), verb(\"throw\"), verb(\"return\", awaitReturn), i[Symbol.asyncIterator] = function () { return this; }, i;\n function awaitReturn(f) { return function (v) { return Promise.resolve(v).then(f, reject); }; }\n function verb(n, f) { if (g[n]) { i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; if (f) i[n] = f(i[n]); } }\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\n function fulfill(value) { resume(\"next\", value); }\n function reject(value) { resume(\"throw\", value); }\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\n}\n\nexport function __asyncDelegator(o) {\n var i, p;\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: false } : f ? f(v) : v; } : f; }\n}\n\nexport function __asyncValues(o) {\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\n var m = o[Symbol.asyncIterator], i;\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\n}\n\nexport function __makeTemplateObject(cooked, raw) {\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\n return cooked;\n};\n\nvar __setModuleDefault = Object.create ? (function(o, v) {\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\n}) : function(o, v) {\n o[\"default\"] = v;\n};\n\nexport function __importStar(mod) {\n if (mod && mod.__esModule) return mod;\n var result = {};\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\n __setModuleDefault(result, mod);\n return result;\n}\n\nexport function __importDefault(mod) {\n return (mod && mod.__esModule) ? mod : { default: mod };\n}\n\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\n}\n\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\n}\n\nexport function __classPrivateFieldIn(state, receiver) {\n if (receiver === null || (typeof receiver !== \"object\" && typeof receiver !== \"function\")) throw new TypeError(\"Cannot use 'in' operator on non-object\");\n return typeof state === \"function\" ? receiver === state : state.has(receiver);\n}\n\nexport function __addDisposableResource(env, value, async) {\n if (value !== null && value !== void 0) {\n if (typeof value !== \"object\" && typeof value !== \"function\") throw new TypeError(\"Object expected.\");\n var dispose, inner;\n if (async) {\n if (!Symbol.asyncDispose) throw new TypeError(\"Symbol.asyncDispose is not defined.\");\n dispose = value[Symbol.asyncDispose];\n }\n if (dispose === void 0) {\n if (!Symbol.dispose) throw new TypeError(\"Symbol.dispose is not defined.\");\n dispose = value[Symbol.dispose];\n if (async) inner = dispose;\n }\n if (typeof dispose !== \"function\") throw new TypeError(\"Object not disposable.\");\n if (inner) dispose = function() { try { inner.call(this); } catch (e) { return Promise.reject(e); } };\n env.stack.push({ value: value, dispose: dispose, async: async });\n }\n else if (async) {\n env.stack.push({ async: true });\n }\n return value;\n}\n\nvar _SuppressedError = typeof SuppressedError === \"function\" ? SuppressedError : function (error, suppressed, message) {\n var e = new Error(message);\n return e.name = \"SuppressedError\", e.error = error, e.suppressed = suppressed, e;\n};\n\nexport function __disposeResources(env) {\n function fail(e) {\n env.error = env.hasError ? new _SuppressedError(e, env.error, \"An error was suppressed during disposal.\") : e;\n env.hasError = true;\n }\n var r, s = 0;\n function next() {\n while (r = env.stack.pop()) {\n try {\n if (!r.async && s === 1) return s = 0, env.stack.push(r), Promise.resolve().then(next);\n if (r.dispose) {\n var result = r.dispose.call(r.value);\n if (r.async) return s |= 2, Promise.resolve(result).then(next, function(e) { fail(e); return next(); });\n }\n else s |= 1;\n }\n catch (e) {\n fail(e);\n }\n }\n if (s === 1) return env.hasError ? Promise.reject(env.error) : Promise.resolve();\n if (env.hasError) throw env.error;\n }\n return next();\n}\n\nexport default {\n __extends,\n __assign,\n __rest,\n __decorate,\n __param,\n __metadata,\n __awaiter,\n __generator,\n __createBinding,\n __exportStar,\n __values,\n __read,\n __spread,\n __spreadArrays,\n __spreadArray,\n __await,\n __asyncGenerator,\n __asyncDelegator,\n __asyncValues,\n __makeTemplateObject,\n __importStar,\n __importDefault,\n __classPrivateFieldGet,\n __classPrivateFieldSet,\n __classPrivateFieldIn,\n __addDisposableResource,\n __disposeResources,\n};\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + +

Used cores

+ + Click on the figures below to get other time periods for each system, respectively. + +

Rackham

+ +
+ +

Bianca

+ +

+ +

Snowy

+ +

+ +
+ +

+ + diff --git a/cluster_guides/transfer_bianca/index.html b/cluster_guides/transfer_bianca/index.html new file mode 100644 index 000000000..4dbc7ea94 --- /dev/null +++ b/cluster_guides/transfer_bianca/index.html @@ -0,0 +1,3458 @@ + + + + + + + + + + + + + + + + + + + + + + + Transfer to/from Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Bianca

+
flowchart LR
+  subgraph sunet[SUNET]
+    subgraph bianca[Bianca]
+      wharf
+    end
+    transit[transit server]
+    sftp_server[SFTP server]
+    user[User in SUNET or user on Rackham or user on other NAISSS clusters]
+    wharf <--> transit
+    wharf <--> sftp_server
+    transit <--> user
+    sftp_server <--> user
+  end
+

File transfer is the process of getting files +from one place to the other. This page shows how to do file transfer to/from +the Bianca UPPMAX cluster.

+

For all file transfer on Bianca:

+ +

File transfer methods

+

There are multiple ways to transfer files to/from Bianca:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MethodFeatures
Using a graphical programGraphical interface, intuitive, for small amounts of data only
Using rsyncTerminal, recommended
Using sftpTerminal, easy to learn, can use terminal commands to select files
Using lftpTerminal
Transit server from/to Rackham, see belowTerminal, can be used to transfer data between clusters in general
Mounting wharf on your local computerBoth graphical and terminal, need a computer with sshfs installed
+

Using a graphical program

+

FileZilla connected to Bianca

+
+

FileZilla connected to Bianca

+
+

To transfer files to/from Bianca +one can use a graphical tool, such as FileZilla and WinSCP. +See Bianca file transfer using a graphical program +for details.

+

Using sftp

+

sftp is a terminal SFTP client to transfer files to/from Bianca. +See Bianca file transfer using sftp.

+

Using lftp

+

sftp is a terminal SFTP client to transfer files to/from Bianca. +See Bianca file transfer using lftp.

+

Using rsync

+

rsync is a terminal program to transfer files to/from Bianca. +See Bianca file transfer using rsync.

+

Transit server

+

To facilitate secure data transfers to, from, +and within the system for computing on sensitive data a special service is available +via SSH at transit.uppmax.uu.se.

+

A user that is logged in to Transit

+

See the UPPMAX documentation on the Transit server.

+
    +
  • +

    Note that your home directory is mounted read-only, any changes you do to your "local" home directory (on transit) will be lost upon logging out.

    +
  • +
  • +

    You can use commands like rsync, scp to fetch data and transfer it to your bianca wharf.

    +
      +
    • You can use cp to copy from Rackham to the wharf
    • +
    +
  • +
  • Remember that you cannot make lasting changes to anything except for mounted wharf directories. Therefore you have to use rsync and scp to transfer from the wharf to Rackham.
  • +
  • The mounted directory will be kept for later sessions.
  • +
+

Moving data from transit to Rackham

+
    +
  • On Rackham: (or other computer) copy files to Bianca via transit:
  • +
+
# scp
+scp path/my_files my_user@transit.uppmax.uu.se:sens2023531/
+
+# rsync
+rsync -avh path/my_files my_user@transit.uppmax.uu.se:sens2023531/
+
+
    +
  • On transit: copy files to Bianca from Rackham (or other computer)
  • +
+
# scp
+scp my_user@rackham.uppmax.uu.se:path/my_files ~/sens2023531/
+
+# rsync
+rsync -avh my_user@rackham.uppmax.uu.se:path/my_files ~/sens2023531/
+
+
:book:  `rsync` [tutorial](https://www.digitalocean.com/community/tutorials/how-to-use-rsync-to-sync-local-and-remote-directories) for beginners.
+
+

⚠ Keep in mind that project folders on Rackham are not available on transit.

+

Moving data between projects

+
    +
  • You can use transit to transfer data between projects + by mounting the wharfs for the different projects + and transferring data with rsync.
  • +
  • Note that you may only do this if this is allowed + (agreements, permissions, etc.)
  • +
+

Mounting wharf on your local computer

+

Mounting wharf means that a wharf folder is added to the +filesystem of your local computer, after which you can use +it like any other folder.

+

See the UPPMAX documentation of wharf on how to do so.

+
+
+

Summary

+
    +
  • For simple transfers use SFTP to connect to bianca-sftp.uppmax.uu.se - use command line sftp or tools that support SFTP protocol.
  • +
  • For rsync - sync files to pre-mounted wharf folder from Rackham or secure local computer.
  • +
  • Keep in mind that project folders on Rackham are not available on transit.
  • +
+
+

Bianca file transfer as image

+

Bianca

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/transfer_dardel/index.html b/cluster_guides/transfer_dardel/index.html new file mode 100644 index 000000000..89897bf97 --- /dev/null +++ b/cluster_guides/transfer_dardel/index.html @@ -0,0 +1,3281 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Dardel - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Dardel

+

Dardel server racks

+

This page describes how to transfer files to Dardel, +the HPC cluster at PDC in Stockholm.

+

Why do I need this?

+

The Rackham cluster will be decommissioned at the end of 2024 +so all projects have to migrate their data and calculations to other resources. +The plan from NAISS is that all Rackham users will move to +the Dardel cluster at PDC.

+

How do I do this?

+

First, we are here to help. +Please contact support if you run into problems +when trying the guide below.

+

To transfer your files to Dardel, follow the steps below.

+
flowchart TD
+  get_supr_project[1 Access to a SUPR project with Dardel]
+  get_pdc_account[2 Access to a PDC account]
+  create_ssh_key[3 Create SSH key pair on Rackham]
+  add_ssh_key[4 Add public SSH key to PDC Login Portal]
+  transfer_files[5 Transfer files to Dardel]
+
+  get_supr_project --> |requires| get_pdc_account
+  create_ssh_key --> |requires| add_ssh_key
+  get_pdc_account --> |requires| add_ssh_key
+  add_ssh_key --> |requires| transfer_files
+

3. Create an SSH key pair

+

How to create an SSH key pair is described in detail at the PDC page on how to create an SSH key pair.

+

On Rackham, do:

+
# generate the key
+ssh-keygen -t ed25519 -N "" -f ~/.ssh/id_ed25519_pdc
+
+

and you have created a SSH key pair.

+
+How do I know this worked? +

On Rackham, in a terminal, type:

+
$ cat ~/.ssh/id_ed25519_pdc.pub
+
+

This will show a text similar to:

+
ssh-ed25519 AAAA63Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se
+
+
+

5. Add the public SSH key to PDC:s Login Portal

+

How to add the SSH public key is described +in detail in the PDC documentation on how to add SSH keys.

+

You will need to get the public part of the key in order to complete this step.i On Rackham, in a terminal, type:

+
cat ~/.ssh/id_ed25519_pdc.pub
+
+

This will show a text similar to:

+
ssh-ed25519 AAAA63Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se
+
+

Select and copy that text, it is the public key you will add.

+

In short,

+
    +
  1. Open the PDC Login Portal
  2. +
  3. Follow the instructions there to login.
  4. +
  5. Click on the Add new key link.
  6. +
  7. Paste the public key you copied after running the cat command above.
  8. +
  9. Make up a name for the key so you know which computer it resides on. E.g. rackham-darsync
  10. +
  11. Press the Save button.
  12. +
+
+How does the adding the key look like? +

Click on 'Prove Indentity'

+
+

Click on 'Prove Indentity'

+
+

PDC key managements before any keys are added

+
+

PDC key managements before any keys are added.

+
+

How it looks when adding a new key

+
+

How it looks when adding a new key.

+
+
+

After having added your public SSH key, you will be able to see your registered keys.

+
+How does that look like? +

Here we see that there is an SSH key added

+
+

Here we see that there is an SSH key added.

+
+
+

The next thing you have to do is to add UPPMAX as a placer permitted to use your newly added key. Do that by pressing the Add address link for the key you just added. At the bottom of the form you have a section called Custom domain. Add *.uppmax.uu.se in that field and press Save.

+
+How does that look like? +

This is where you enter that UPPMAX is allowed to use this key.

+
+

This is where you enter that UPPMAX is allowed to use this key.

+
+
+
+For staff only +

@Richel, need a screenshot of adding custom domain.

+
+

To validate that it work you can connect to Dardel via SSH:

+
# replace your_dardel_username with your actual Dardel username
+ssh -i ~/.ssh/id_ed25519_pdc your_dardel_username@dardel.pdc.kth.se
+
+
+For staff only +

@Richel, need a screenshot of ssh working

+
+

6. Transfer files

+

To facilitate this move we have created Darsync, +a tool that can inspect your files and make suggestions +to make the transfer easier, +as well as generating a script file you can submit to Slurm +to perform the actual file transfer. +Read more about how to use Darsync here.

+

Here is a summary of how to run it, using /path/to/dir as a placeholder for the actual path to the directory you want to copy to Dardel:

+
module load darsync
+
+darsync check --local-dir /path/to/dir
+# fix any errors the check step found
+darsync gen --local-dir /path/to/dir --outfile ~/dardel_transfer_script.sh
+
+

6. Submit the script created by Darsync

+

Submit the transfer script created by Darsync to Slurm:

+
sbatch --output=~/dardel_transfer.out --error=~/dardel_transfer.err ~/dardel_transfer_script.sh
+
+

7. Check logs

+

Once the submitted job has finished, have a look at the log file produced by the job and make sure it did not end in a error message.

+
tail ~/dardel_transfer.out
+tail ~/dardel_transfer.err
+
+
+For staff only +

@Richel, need a screenshot of successful rsync command, as well as a failed one?

+
+

If there are any errors you can either run darsync gen again and correct any mistakes you made and submit the new script file.

+

If you have updated your files at UPPMAX and want to sync over the changes, just submit the same script file again and it will only transfer over the modified files.

+

If your data transfer took too long and got killed by Slurm, or if it crashed for some other reason, just submit the same script again and it till pick up from where it left off.

+

8. Delete the SSH key pair

+

When you are done with transferring files you should delete your SSH keys you created in the previous steps in this guide. The SSH keys created where created without a password to protect them (required to run darsync as a unattended job), and it's best to delete them.

+
rm ~/.ssh/id_ed25519_pdc*
+
+

Create new ones if you still need to connect to Dardel from UPPMAX. To create new keys with password on them, simply run:

+
ssh-keygen -t ed25519
+
+

and add the new public key (~/.ssh/id_ed25519.pub) to the PDC Login Portal following the same steps as above.

+

Once you are sure your data has been transferred, we recommend that you switch over to only work on Dardel. If you keep working on both clusters you will easily forget which cluster has the most up-to-date version of the files.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/transfer_rackham/index.html b/cluster_guides/transfer_rackham/index.html new file mode 100644 index 000000000..f6954408e --- /dev/null +++ b/cluster_guides/transfer_rackham/index.html @@ -0,0 +1,3360 @@ + + + + + + + + + + + + + + + + + + + + + + + Transfer to/from Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Rackham

+

There are multiple ways to transfer files to/from Rackham:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
MethodFeatures
Using a graphical programGraphical interface, intuitive, for small amounts of data only
Using SCPTerminal, easy to learn, can be used in scripts
Using SFTPTerminal, easy to learn, secure
Using transitTerminal, easy to learn, secure, can transfer between HPC clusters
+

Each of these methods is discussed below.

+

Using a graphical program

+

One can transfer files to/from Rackham using a graphical program. +A graphical interface is intuitive to most users. +However, it can be used for small amounts of data only +and whatever you do cannot be automated.

+

See Rackham file transfer using a graphical program +for a step-by-step guide how to transfer files using +a graphical tool.

+

Using SCP

+

One can transfer files to/from Rackham +using SCP in a terminal. +This works similar to a regular copy of files, +except that a remote address needs to be specified. +The advantage of SCP is that is can be used in scripts.

+

See Rackham file transfer using SCP +for a step-by-step guide how to transfer files using SCP.

+

Using SFTP

+

One can transfer files to/from Rackham using SFTP in a terminal. +One connects a local and a remote folder, +after which one can upload and download files. +SFTP is considered a secure file transfer protocol.

+

See Rackham file transfer using SFTP +for a step-by-step guide how to transfer files using SFTP.

+

Using transit

+

One can transfer files to/from Rackham using the UPPMAX transit server. +One connects a local folder and the transit server, +after which one can upload and download files.

+

See Rackham file transfer using transit +for a step-by-step guide how to transfer files using the transit UPPMAX server.

+

Overview

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fcf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+    classDef transit_node fill:#fff,color:#000,stroke:#fff
+
+    subgraph sub_inside[SUNET]
+      direction LR
+      user(User)
+      user_local_files(Local user files):::file_node
+
+      subgraph sub_transit_env[Transit]
+        transit_login(Transit login):::calculation_node
+        files_on_transit(Files posted to Transit):::transit_node
+      end
+      subgraph sub_rackham_shared_env[Rackham]
+          rackham_login(Rackham login node):::calculation_node
+          files_in_rackham_home(Files in Rackham home folder):::file_node
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#ccc,color:#000,stroke:#000
+    style sub_transit_env fill:#cfc,color:#000,stroke:#000
+    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000
+
+    user --> |has|user_local_files
+    user --> |logs in |transit_login
+    user --> |logs in |rackham_login
+
+    user_local_files <--> |graphical tool|files_in_rackham_home
+    user_local_files <--> |SCP|files_in_rackham_home
+    user_local_files <--> |SFTP|files_in_rackham_home
+    user_local_files <--> |graphical tool|files_on_transit
+    user_local_files <--> |SFTP|files_on_transit
+
+    rackham_login --> |can use|files_in_rackham_home
+
+    transit_login --> |can use|files_on_transit
+    files_on_transit <--> |transfer|files_in_rackham_home
+
+    files_in_rackham_home ~~~ transit_login
+
+

Overview of file transfer on Rackham +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'. +The user can be either inside or outside SUNET.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/transfer_transit/index.html b/cluster_guides/transfer_transit/index.html new file mode 100644 index 000000000..abc7453bf --- /dev/null +++ b/cluster_guides/transfer_transit/index.html @@ -0,0 +1,3258 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Transit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Transit

+

There are multiple ways to transfer files to/from Transit:

+
+What is Transit? +

Transit is an UPPMAX service to send files around. +It is not a file server.

+

See the page about Transit for more detailed information.

+
+ + + + + + + + + + + + + + + + + + + + + + + + + +
MethodFeatures
Using a graphical programGraphical interface, intuitive, for small amounts of data only
Using rsyncTerminal, easy to learn, secure
Using SFTPTerminal, easy to learn, secure
Using SCP⛔ only download, terminal, easy to learn, can be used in scripts
+

Each of these methods is discussed below.

+

Using a graphical program

+

One can transfer files to/from Transit using a graphical program. +A graphical interface is intuitive to most users. +However, it can be used for small amounts of data only +and whatever you do cannot be automated.

+

See Transit file transfer using a graphical program +for a step-by-step guide how to transfer files using +a graphical tool.

+

Using rsync

+

Transit is used as a stepping-stone to +transfer files to Bianca using rsync.

+

Using SCP

+

One cannot upload files to Transit using SCP in a terminal: +Transit only allows for sending files from A to B, not for storing them.

+

One can download the files on Transit. +However, Transit is not a file server. +Instead, the files that appear to be on Transit +are the files in your Rackham home folder. +Due to this, it makes more sense to use SCP to transfer files to/from Rackham.

+

For completeness sake, see Transit file transfer using SCP +for a step-by-step guide how to transfer files using SCP. +It show one cannot upload files to Transit.

+

Using SFTP

+

One can transfer files to/from Transit using SFTP in a terminal. +One connects a local and a remote folder, +after which one can upload and download files. +SFTP is considered a secure file transfer protocol.

+

See Transit file transfer using SFTP +for a step-by-step guide how to transfer files using SFTP.

+

Overview

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fff,color:#000,stroke:#000
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+    classDef transit_node fill:#fff,color:#000,stroke:#fff
+
+    subgraph sub_inside[SUNET]
+      user_local_files(Local user files):::file_node
+
+      subgraph sub_transit_env[Transit]
+        files_on_transit(Files posted to Transit):::transit_node
+      end
+      subgraph sub_rackham_shared_env[Rackham]
+        files_in_rackham_home(Files in Rackham home folder):::file_node
+      end
+      subgraph sub_bianca_private_env[Bianca]
+        files_in_bianca_project(Files in Bianca project folder):::file_node
+      end
+      subgraph sub_other_clusters[Other clusters]
+        files_on_other_clusters(Files on other clusters):::file_node
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#ccc,color:#000,stroke:#000
+    style sub_transit_env fill:#cfc,color:#000,stroke:#000
+    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000
+    style sub_bianca_private_env fill:#ccf,color:#000,stroke:#000
+    style sub_other_clusters fill:#ffc,color:#000,stroke:#000
+
+    user_local_files <--> |graphical tool|files_on_transit
+    user_local_files <--> |SFTP|files_on_transit
+
+    files_on_transit <--> |SCP|files_in_rackham_home
+    files_on_transit <--> |SFTP|files_in_rackham_home
+
+    files_on_transit <--> |SCP|files_in_bianca_project
+    files_on_transit <--> |SFTP|files_in_bianca_project
+
+    files_on_transit <--> |transfer|files_on_other_clusters
+
+
+

Overview of file transfer on Transit

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/transit/index.html b/cluster_guides/transit/index.html new file mode 100644 index 000000000..a22988cbb --- /dev/null +++ b/cluster_guides/transit/index.html @@ -0,0 +1,3138 @@ + + + + + + + + + + + + + + + + + + + Transit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Transit

+

Transit +is an UPPMAX service that can be used to securely transfer files +between online locations, such as your local computer, Bianca, +Rackham and other sensitive data clusters.

+
+Is Transit a file server? +

Transit is a service, not a file server. +Transit is not a file server, as it does not store files.

+

This can be observed by uploading files to Transit +and then closing this connection +before sending the files to a permanent location: +the Transit-only files will disappear.

+
+
+What is Transit? +

From https://sv.wikipedia.org/wiki/Brevl%C3%A5da#/media/Fil:Brevl%C3%A5dor.jpg

+
+

A Swedish post box. The yellow post box is for non-regional mail, +the blue for regional mail.

+
+

Transit can be viewed as a post box, +where the file you upload is a letter.

+

If you put a letter without an address in a post box, +it will be thrown away.

+

If you put an address on the letter, +the letter will be delivered. +Here, 'putting an address on the letter' +is to copy the file to the desired location.

+
+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/transit_file_transfer_using_gui/index.html b/cluster_guides/transit_file_transfer_using_gui/index.html new file mode 100644 index 000000000..d1677bc4d --- /dev/null +++ b/cluster_guides/transit_file_transfer_using_gui/index.html @@ -0,0 +1,3125 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Transit using a graphical tool - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Transit using a graphical tool

+

There are multiple ways to transfer files to/from Transit. +Here we describe how to do so using a graphical tool.

+

There are multiple graphical tools to do so:

+ + + + + + + + + + + + + + + + + + + + +
Link to procedureToolDescription
hereFileZillaFree, open source, works on all platforms (recommended)
hereWinSCPOnly works under Windows
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax/index.html b/cluster_guides/uppmax/index.html new file mode 100644 index 000000000..2aa23271b --- /dev/null +++ b/cluster_guides/uppmax/index.html @@ -0,0 +1,3141 @@ + + + + + + + + + + + + + + + + + + + UPPMAX - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX

+

UPPMAX in an organization +that provides HPC infrastructure that is physically located in Uppsala. +To do so, it provides the UPPMAX systems below.

+

UPPMAX systems

+

Here we place Bianca between the other UPPMAX systems.

+

There are three types of UPPMAX systems:

+
    +
  • Computing systems
  • +
  • Storage systems
  • +
  • Cloud services
  • +
+

One can apply for these resources, +as is described here.

+

UPPMAX computing systems

+

Computing systems allow a user to do heavier computational calculations. +At UPPMAX, we use multiple HPC clusters, +that are discussed here

+

UPPMAX storage systems

+

See UPPMAX systems.

+

UPPMAX Cloud services

+

See UPPMAX systems.

+

Cloud services allow a user to have something active (typically a website) +that can be accessed by the internet.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_as_an_organization/index.html b/cluster_guides/uppmax_as_an_organization/index.html new file mode 100644 index 000000000..973ebd797 --- /dev/null +++ b/cluster_guides/uppmax_as_an_organization/index.html @@ -0,0 +1,3137 @@ + + + + + + + + + + + + + + + + + + + UPPMAX as an organization - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX as an organization

+

UPPMAX is a provider of HPC infrastructure +that is physically located in Uppsala.

+
+Where can I find an overview of UPPMAX? +

One can find an overview of UPPMAX here

+
+

Here we place UPPMAX within the bigger, national, picture, +starting from the biggest source of money for research in Sweden.

+

Vetenskapsrådet logo

+

Vetenskapsrådet ('Science counsel', VR) is biggest funder +of research in Sweden and funds the national HPC infrastructure.

+

NAISS logo

+

The National Academic Infrastructure for Supercomputing in Sweden (NAISS) provides such HPC infrastructure: computing power, storage and data services. Applications for these resources starts at +this NAISS page. These resources are physically located in multiple places in Sweden, +among other Uppsala.

+

UPPMAX logo

+

Uppsala Multidisciplinary Center for Advanced Computational Science (UPPMAX = UppMACS) +provides the HPC infrastructure that is physically located in Uppsala. +Part of this is to provide training and support.

+
flowchart TD
+    HPC_Sweden(HPC in Sweden)
+    HPC_others(HPC in other cities)
+    HPC_Uppsala(HPC in Uppsala)
+    NAISS(NAISS)
+    UPPMAX(UPPMAX)
+    UU(Uppsala University)
+    Users(Users)
+    VR(Vetenskapsrådet)
+
+    VR --> |money| HPC_Sweden
+    HPC_Sweden -->|done by| NAISS
+    NAISS --> |money| HPC_others
+    NAISS --> |money| HPC_Uppsala
+    HPC_Uppsala -->|done by| UPPMAX
+    UU -->|money| HPC_Uppsala
+    Users -->|apply for HPC|NAISS
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_cloud/index.html b/cluster_guides/uppmax_cloud/index.html new file mode 100644 index 000000000..3e7a3f078 --- /dev/null +++ b/cluster_guides/uppmax_cloud/index.html @@ -0,0 +1,3161 @@ + + + + + + + + + + + + + + + + + + + UPPMAX cloud - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX cloud

+

Cloud services allow a user to have something active (typically a website) +that can be accessed by the internet.

+

The NAISS 'Swedish Science Cloud (SSC)', +consists out of multiple regions. +The eastern region (called EAST-1) of SCC is named +'Dis' (the Swedish word for 'haze') +and is hosted by Uppsala +university (the service is called 'UPPMAX cloud') +and Umeå University (north, HPC2N).

+

History of Dis

+

The UPPMAX cloud 'Dis' (Swedish word for 'haze') +and successor of 'Smog' (Swedish for 'smog') +was introduced in October 2017 and upgraded during 2020.

+

Apply for an SCC project

+

See the UPPMAX pages on 'Apply for an SCC project'

+

Technical specifications

+
    +
  • 40 compute nodes, 24 dedicated for NAISS and 16 for local projects. + Each compute node is equipped with 128-256GB memory + and dual CPU E5-2660 at 2.2GHz for a total of 16 cores per compute node
  • +
  • VM flavors for small (2 vCPUs) up to large (16 vCPUs) compute allocations
  • +
  • 250 TB of total volume storage.
  • +
  • Interconnect is 10GbE.
  • +
+

Object storage is planned for 2021 but currently unavailable.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_cluster/index.html b/cluster_guides/uppmax_cluster/index.html new file mode 100644 index 000000000..30ae53fb2 --- /dev/null +++ b/cluster_guides/uppmax_cluster/index.html @@ -0,0 +1,3615 @@ + + + + + + + + + + + + + + + + + + + + + + + The UPPMAX clusters - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

The UPPMAX clusters

+

UPPMAX is an organization that provides HPC clusters.

+
+Where can I find an overview of UPPMAX? +

One can find an overview of UPPMAX here

+
+
+Where can I find an overview of UPPMAX's systems? +

One can find an overview of UPPMAX's systems here

+
+

After giving an overview of the different UPPMAX clusters, +it is discussed what a computer cluster is, +how it differs from a supercomputer, +what the restrictions of a computer cluster are, +as well as some added restrictions on a sensitive data computer cluster.

+

This is followed by a detailed technical summary of the clusters +and a detailed overview of the clusters.

+

Overview of UPPMAX clusters

+

UPPMAX clusters are computing systems, +i.e. they allow a user to do heavy computational calculations.

+

All UPPMAX clusters are named after Tintin characters. +UPPMAX has, among others, the following clusters:

+
    +
  • Bianca: for sensitive data, general use. + In the near future, will be replaced by Maja
  • +
  • Rackham: regular data, general purpose. + Will be only for UU staff at 2025-01-01. + In the near future, will be replaced by Pelle
  • +
  • Snowy: regular data, long runs and GPU:s
  • +
+

Another cluster UPPMAX is involved in:

+
    +
  • Dardel: a general purpose HPC cluster in Stockholm. + Consider moving your files to it already
  • +
+
flowchart TD
+    UPPMAX(Which UPPMAX cluster?)
+    Bianca
+    Dardel
+    Maja
+    Pelle
+    Rackham
+    Snowy
+    is_sensitive[Do you use sensitive data?]
+    is_long[Do you use long runs and/or GPUs?]
+
+    UPPMAX --> is_sensitive
+    is_sensitive --> |yes|Bianca
+    is_sensitive --> |no|is_long
+    is_long --> |no|Rackham
+    is_long --> |yes|Snowy
+    Bianca --> |near future| Maja
+
+    Rackham --> |not UU, before 2025-01-01| Dardel
+    Rackham --> |UU, near future| Pelle
+

All UPPMAX clusters follow the same file system, +with special folders. See the UPPMAX page on its file +systems here.

+

What is a computer cluster technically?

+

A computer cluster is a machine that consists out of many computers. +These computers work together.

+

Each computer of a cluster is called a node.

+

There are three types of nodes:

+
    +
  • login nodes: nodes where a user enters and interacts with the system
  • +
+
+Logging in +

Logging in is described separately per cluster:

+ +
+
    +
  • calculation nodes: nodes that do the calculations
  • +
+
+Requesting a calculation to run +

Requesting a calculation to run is described here. +This is done by using the Slurm scheduler.

+
+
    +
  • interactive nodes: a type of calculation node, where a user can do calculations directly
  • +
+
+Requesting an interactive node +

Requesting an interactive node is described per cluster:

+ +

This is done by requesting an interactive node +from the Slurm scheduler.

+
+

Each node contains several CPU/GPU cores, RAM and local storage space.

+

A user logs in to a login node via the Internet.

+
flowchart TD
+
+  login_node(User on login node)
+  interactive_node(User on interactive node)
+  computation_node(Computation node)
+
+  login_node --> |move user, interative|interactive_node
+  login_node --> |submit jobs, sbatch|computation_node
+  computation_node -.-> |can become| interactive_node
+
+

The different types of nodes an UPPMAX cluster has.

+
+

Difference between a supercomputer and a (high-performing) computer cluster

+

A supercomputer, from https://en.wikipedia.org/wiki/File:IBM_Blue_Gene_P_supercomputer.jpg

+

A supercomputer is a machine that is optimized for doing calculations +quickly. For example, to predict the weather for tomorrow, the calculation +may not take a week. The image above is a supercomputer.

+

A computer cluster using some Raspberry Pi's

+

A computer cluster is a set of computers that work together so that they can be viewed as a single system. +The image above shows a home-made computer cluster. +This home-made computer cluster may not be suitable for high-performance computing.

+

The Rackham computer cluster

+

The image above shows Rackham, another UPPMAX +computer cluster, suitable for high-performance computing. +This makes Rackham an high-performance computing (HPC) cluster. +Bianca and Rackham are HPC clusters.

+

When using this definition:

+
+

a supercomputer is one big computer, +while high-performance computing is many computers working toward the same goal

+

Frank Downs

+
+

one could conclude that the UPPMAX HPC cluster can be used as a supercomputer +when a user runs a calculation on all nodes.

+

Restrictions on a computer cluster

+

A computer cluster is a group of computers that can run +many calculations, as requested by multiple people, at the same time.

+

To ensure fair use of this shared resource, regular users +are restricted in some ways:

+
    +
  • Users cannot run calculations directly. + Instead, users need to request either (1) a calculation to be run, + or (2) an interactive node
  • +
+
+Requesting a calculation to run +

Requesting a calculation to run is described here. +This is done by using the Slurm scheduler.

+
+
+Requesting an interactive node +

Requesting an interactive node is described per cluster:

+ +

This is done by requesting an interactive node +from the Slurm scheduler.

+
+
    +
  • Users cannot install software directly. + Instead, users need to use pre-installed software or learn + techniques how to run custom software anyway
  • +
+
+Using pre-installed software +

Using pre-installed software is described here. +This is done by using the module system.

+
+
+How to run custom software +

Using a Singularity container +allows you to run most custom software on any HPC cluster

+
+

These restrictions apply to most general-purpose clusters +and all UPPMAX clusters.

+

Restrictions on a sensitive data computer cluster

+

Next to the general restrictions above, +a sensitive data cluster has additional restrictions.

+

Here is an overview which clusters are designed for sensitive data:

+ + + + + + + + + + + + + + + + + + + + + +
Cluster nameSensitive data yes/no?
BiancaYes
RackhamNo
SnowyNo
+

On a sensitive data cluster, +(sensitive) data must be protected to remain there, +due to which there are these additional restrictions to users:

+
    +
  • Users have no direct access to internet. + Instead, users can up/download files from/to a special folder.
  • +
+
+File transfer +

Transferring files is described per sensitive data cluster:

+ +
+

The goal is to prevent the accidental up/download of sensitive data. +As these up/downloads are monitored, in case of an accident, +the extent of the leak and the person (accidentally) causing it +is known. Identifying a responsible person in case of such an +accident is required by law.

+

UPPMAX clusters technical summary

+

This is a technical summary of the UPPMAX clusters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
.RackhamSnowyBianca
PurposeGeneral-purposeGeneral-purposeSensitive
# Intel CPU Nodes486+144228288
# GPU Nodes-50, Nvidia T410, 2x Nvidia A100 each
Cores per node20/161616/64
Memory per node128 GB128 GB128 GB
Fat nodes256 GB & 1 TB256, 512 GB & 4 TB256 & 512 GB
Local disk (scratch)2/3 TB4 TB4 TB
Login nodesYesNo (reached from Rackham)Yes (2 cores and 15 GB)
"Home" storageDomusDomusCastor/Cygnus
"Project" StorageCrex, LutraCrex, LutraCastor/Cygnus
+

Detailed overview of the UPPMAX systems

+

+  graph TB
+
+  Node1 -- interactive --> SubGraph2Flow
+  Node1 -- sbatch --> SubGraph2Flow
+  subgraph "Snowy"
+  SubGraph2Flow(calculation nodes)
+        end
+
+        thinlinc -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow
+        terminal -- usr --> Node1
+        terminal -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow
+        Node1 -- usr-sensXXX + 2FA + no VPN ----> SubGraph1Flow
+
+        subgraph "Bianca"
+        SubGraph1Flow(Bianca login) -- usr+passwd --> private(private cluster)
+        private -- interactive --> calcB(calculation nodes)
+        private -- sbatch --> calcB
+        end
+
+        subgraph "Rackham"
+        Node1[Login] -- interactive --> Node2[calculation nodes]
+        Node1 -- sbatch --> Node2
+        end
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_filesystem/index.html b/cluster_guides/uppmax_filesystem/index.html new file mode 100644 index 000000000..43abcc5cb --- /dev/null +++ b/cluster_guides/uppmax_filesystem/index.html @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + UPPMAX filesystem - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX filesystem

+

One can store files on the UPPMAX clusters.

+

Here we show some common directories +and best practices.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Directory nameDescription
backupA folder that is guaranteed to have a backup for 30 days
Home folderYour home folder, /home/[username], e.g. /home/sven
nobackupA folder without a backup
Project folderYour project folder, /proj/[project_name], e.g. /proj/snic2021-22-780
WharfA Bianca-only folder for file transfer
+

Best practices

+
+Are there any horror story on this? +

Yes, ask the UPPMAX staff :-)

+
+
    +
  1. Keep an inventory of important data and make a plan + for how it should be treated. Inform collaborators of this plan.
  2. +
  3. Make sure you keep a separate copy of the most important data.
  4. +
  5. Put important data in a backed up directory + (and nothing else, so that the backup system does + not get bogged down with junk).
  6. +
  7. Run chmod -R -w . on directories containing critical + data that should normally be preserved.
  8. +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_history/index.html b/cluster_guides/uppmax_history/index.html new file mode 100644 index 000000000..417f5f4c1 --- /dev/null +++ b/cluster_guides/uppmax_history/index.html @@ -0,0 +1,3519 @@ + + + + + + + + + + + + + + + + + + + UPPMAX history - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX history

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ResourceOut of commissionSizePriceStartEndFeatures
Grendel2004-06-30?16 nodes-??Together with NSC
Ngorongoro2008-12-3148 CPUs-??SunFire 15k
Hagrid2008-01-31100 nodes-2003-12-012007-12-31SNIC/SweGrid
Ra2009-08-03100 nodes-2005-02-012009-01-31SNIC/matvet
Set2010-12-3110 nodes0.92006-07-012010-06-30SNIC, power5+IB
Isis2010-12-31200 nodes4.492007-02-012010-01-31SNIC/matvet
Os2010-12-3110 nodes02007-02-012011-12-31SNIC, IB
Grad2013-01-3164 nodes-2008-04-012012-03-31SNIC/SweGrid
Cell2012-01-312 nodes-2008-09-012009-08-312 nodes with cell-processors
Kalkyl2013-12-31348 nodes8.62009-12-122013-12-31KAW/SNIC
Bubo2013-12-09500TB5.32009-09-122013-12-01KAW/SNIC
Hökeborg.ca 230 m2, 7 cooling aggregates, 90 kVA UPS, racks A-D-2011-09-01-Computer hall, faculty means
Hökeborg.+3 cooling aggregates, racks E-F-2013-06-01-Computer hall, faculty means
Hökeborg.+3 cooling aggregates, +30kVA UPS, moved batteries, racks G-H-2015-04-15-Computer hall, faculty means
Lynx2015-11-09500 TB4.72011-12-012015-12-01KAW/SNIC
Halvan2016-04-0664 core, 2TB1.22011-02-112016-02-29Misc, extended support 1 year
Tintin.164 nodes5.32012-02-012016-02-01SNIC
Kali.1 nod, 30TB disk0.1XX+1 yeariRODS, KAW?
dCache.600 TB0.62012-11-192016-11-18SNIC
Gulo.1.2 PB1.92012-11-192016-11-18KAW/BILS
Pica.5.5 PB102013-10-012017-10-01KAW
Host.8 nodes0.452013-11-012017-11-01Used Ganeti, UPPMAX
Milou.248 nodes9.92013-11-012017-11-01KAW/BILS
Milou-f2.1 nod, 4 TB12014-02-012018-02-01Login node
Nestor2016-05-3148 nodes-2014-04-082018-04-08.
Apus2016-05-31500 TB-2014-01-132018-01-13.
Topolino.24 nodes-2014-04-082018-04-08BILS
Meles.279 TB-2014-01-132018-01-13.
Das.48 TB0.072015-07-012020-06-30New back mount, HP, data network redesign IT 2015/25
Core network.2 switches0.222015-07-012020-06-30Dell, core network, data network redesign IT 2015/50
Irma.250 nodes15.82015-10-012019-09-30Supermicro, data network redesign IT2014/93
Lupus.1 PB Lustre2.12016-03-032021-03-02Dell, data network redesign IT214/92
CEPH.252 TB0.352015-12-142019-12-13Dell, 7 servers, data network redesign IT 2015/84
Bianca.100 nodes3.12016-04-012020-03-31SouthPole, Huawei data network redesign IT 2015/65
Castor.1 PB, 18 servers12016-04-012020-03-31SouthPole, Huawei data network redesign IT 2015/65
Castor, +1 PB.1 PB, 18 servers2.32016-07-012020-07-31SouthPole, Huawei data network redesign IT 2015/65
Grus.1.5 PB, 14 servers1.8?2016-07-012020-07-31SouthPole, Huawei data network redesign IT 2015/65
Irham...2016-07-012024-01-12Decommissioned Irma nodes added to Rackham, became r[1001-1072,1179-1250]
Miarka...2021..
Rackham......
Snowy......
Pelle......
Maja......
Gorilla......
+
    +
  • Price is in millions of Swedish kroner
  • +
  • 'Start': start of the guarantee
  • +
  • 'End': end of the guarantee
  • +
  • 'data network redesign' is assumed to be the unabbreviated form of dnr
  • +
  • 'processors' is assumed to be the unabbreviated form of procs
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_storage_system/index.html b/cluster_guides/uppmax_storage_system/index.html new file mode 100644 index 000000000..c9038e40e --- /dev/null +++ b/cluster_guides/uppmax_storage_system/index.html @@ -0,0 +1,3111 @@ + + + + + + + + + + + + + + + + + + + UPPMAX storage system - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/uppmax_systems/index.html b/cluster_guides/uppmax_systems/index.html new file mode 100644 index 000000000..6d236da96 --- /dev/null +++ b/cluster_guides/uppmax_systems/index.html @@ -0,0 +1,3449 @@ + + + + + + + + + + + + + + + + + + + UPPMAX systems - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX systems

+

UPPMAX is an organization that provides +HPC infrastructure that is physically located in Uppsala.

+
+Where can I find an overview of UPPMAX? +

One can find an overview of UPPMAX here

+
+

This HPC infrastructure consists out of:

+ +

Below these systems are discussed.

+

UPPMAX computing systems

+

Computing systems allow a user to do heavier computational calculations.

+

UPPMAX has, among others, the following clusters:

+
    +
  • Rackham: regular data, general purpose
  • +
  • Snowy: regular data, long runs and GPU:s
  • +
  • Bianca: for sensitive data, general use
  • +
+

A technical summary can be found below.

+
flowchart TD
+    UPPMAX(Which UPPMAX cluster?)
+    Bianca
+    Rackham
+    Snowy
+    is_sensitive[Do you use sensitive data?]
+    is_long[Do you use long runs and/or GPUs?]
+
+    UPPMAX --> is_sensitive
+    is_sensitive --> |yes|Bianca
+    is_sensitive --> |no|is_long
+    is_long --> |no|Rackham
+    is_long --> |yes|Snowy
+

UPPMAX storage systems

+

Storage systems allow a user to storage (big amounts of) data, +for either active use (i.e. in calculations) or to archive it (cold data).

+

You are not supposed to do calculations on the cold data. This is stored on off-load storage where the file system is much slower. +You need to transfer the data to an active storage first.

+

The UPPMAX storage systems are:

+
    +
  • Active: Cygnus for Bianca, Crex for Rackham
  • +
  • Off-load: Lutra for Rackham
  • +
+
flowchart TD
+    UPPMAX[Which UPPMAX storage system?]
+    which_cluster[Which UPPMAX cluster?]
+    Cygnus
+    Lutra
+    usage_type{Type of use?}
+
+    UPPMAX-->which_cluster
+    which_cluster-->|Rackham|usage_type
+    which_cluster-->|Bianca|Cygnus
+    usage_type-->|active|Crex
+    usage_type-->|off-load|Lutra
+

See here +for more information.

+

UPPMAX Cloud services

+

See the UPPMAX cloud.

+

Difference between supercomputer and (high-performing) computer cluster

+

A supercomputer, from https://en.wikipedia.org/wiki/File:IBM_Blue_Gene_P_supercomputer.jpg

+

A supercomputer is a machine that is optimized for doing calculations +quickly. For example, to predict the weather for tomorrow, the calculation +may not take a week. The image above is a supercomputer.

+

A computer cluster using some Raspberry Pi's

+

A computer cluster is a machine that is optimized for doing a lot of calculations. +The image above shows a home-made computer cluster. +This home-made computer cluster may not be suitable for high-performance.

+

The Rackham computer cluster

+

The image above shows Rackham, another UPPMAX +computer cluster, suitable for high-performance computing. +This makes Rackham an high-performance computing (HPC) cluster. +Bianca and Rackham are HPC clusters.

+

Restrictions on a computer cluster

+

A computer cluster is a group of computers that can run +many calculations, as requested by multiple people, at the same time.

+

To ensure fair use of this shared resource, regular users +are restricted in some ways:

+
    +
  • Users cannot run calculations directly. + Instead, users need to request either (1) a calculation to be run, + or (2) an interactive node
  • +
+
+Requesting a calculation to run +

Requesting a calculation to run is described +here. +This is done by using the Slurm scheduler.

+
+
+Requesting an interactive node +

Requesting an interactive node is described +here. +This is done by requesting an interactive node +from the Slurm scheduler.

+
+
    +
  • Users cannot install software directly. + Instead, users need to use pre-installed software or learn + techniques how to run custom software anyway
  • +
+
+Using pre-installed software +

Using pre-installed software is described here. +This is done by using the module system.

+
+
+How to run custom software +

One can use Singularity containers +to run software on an HPC cluster.

+
+

These restrictions apply to most general-purpose clusters. +However, Bianca is a sensitive data cluster, to which +more restrictions apply.

+

Restrictions on a sensitive data computer cluster

+

Next to the general restrictions above, +Bianca also is a sensitive data cluster. +This sensitive data must be protected to remain only on Bianca, +due to which there are these additional restrictions to users:

+
    +
  • Users have no direct access to internet. + Instead, users can up/download files from/to a special folder.
  • +
+
+File transfer +

Transferring file is described here.

+
+

The goal is to prevent the accidental up/download of sensitive data. +As these up/downloads are monitored, in case of an accident, +the extent of the leak and the person (accidentally) causing it +is known. Identifying a responsible person in case of such an +accident is required by law.

+

What is a computer cluster technically?

+

A computer cluster is a machine that consists out of many computers. +These computers work together.

+

Each computer of a cluster is called a node.

+

There are three types of nodes:

+
    +
  • login nodes: nodes where a user enters and interacts with the system
  • +
+
+Logging in +

Logging in is described here.

+
+
    +
  • calculation nodes: nodes that do the calculations
  • +
+
+Requesting a calculation to run +

Requesting a calculation to run is part of this course +and is described here. +This is done by using the Slurm scheduler.

+
+
    +
  • interactive nodes: a type of calculation node, where a user can do calculations directly
  • +
+
+Requesting an interactive node +

Requesting an interactive node is part of this course +and is described here. +This is done by requesting an interactive node +from the Slurm scheduler.

+
+

Each node contains several CPU/GPU cores, RAM and local storage space.

+

A user logs in to a login node via the Internet.

+

Summary

+
+

keypoints

+
    +
  • NAISS provides HPC resources for Swedish research.
  • +
  • UPPMAX takes care of the Uppsala HPC facilities
  • +
  • Bianca is an HPC cluster for sensitive data
  • +
  • The restrictions on Bianca follow from Bianca being a shared resource + that uses sensitive data
  • +
+
+

Extra material

+

UPPMAX clusters technical summary

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
RackhamSnowyBianca
PurposeGeneral-purposeGeneral-purposeSensitive
# Intel CPU Nodes486+144228288
# GPU Nodes-50, Nvidia T410, 2x Nvidia A100 each
Cores per node20/161616/64
Memory per node128 GB128 GB128 GB
Fat nodes256 GB & 1 TB256, 512 GB & 4 TB256 & 512 GB
Local disk (scratch)2/3 TB4 TB4 TB
Login nodesYesNo (reached from Rackham)Yes (2 cores and 15 GB)
"Home" storageDomusDomusCastor
"Project" StorageCrex, LutraCrex, LutraCastor
+

Detailed overview of the UPPMAX systems

+

+  graph TB
+
+  Node1 -- interactive --> SubGraph2Flow
+  Node1 -- sbatch --> SubGraph2Flow
+  subgraph "Snowy"
+  SubGraph2Flow(calculation nodes)
+        end
+
+        thinlinc -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow
+        terminal -- usr --> Node1
+        terminal -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow
+        Node1 -- usr-sensXXX + 2FA + no VPN ----> SubGraph1Flow
+
+        subgraph "Bianca"
+        SubGraph1Flow(Bianca login) -- usr+passwd --> private(private cluster)
+        private -- interactive --> calcB(calculation nodes)
+        private -- sbatch --> calcB
+        end
+
+        subgraph "Rackham"
+        Node1[Login] -- interactive --> Node2[calculation nodes]
+        Node1 -- sbatch --> Node2
+        end
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/webexport/index.html b/cluster_guides/webexport/index.html new file mode 100644 index 000000000..e540ed993 --- /dev/null +++ b/cluster_guides/webexport/index.html @@ -0,0 +1,3239 @@ + + + + + + + + + + + + + + + + + + + + + + + Webexport - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Webexport guide

+

You can enable webexport by creating a publicly readable folder called webexport in your project directory (/proj/[project id]). The contents of that folder will be accessible through https://export.uppmax.uu.se/[project id]/.

+

This will not work on Bianca for security reasons.

+

Publicly readable folder

+
    +
  • A publicly readable folder has the execute permission set for "other" users.
  • +
  • Run the command chmod o+x webexport to ensure that the webexport directory has the correct permissions.
  • +
+

Control access

+
    +
  • +

    A subset of .htaccess/.htpasswd functionality is available to control access.

    +
  • +
  • +

    Example:

    +
      +
    • /crex/proj/naiss2024-1-123/webexport/Project_portal/.htaccess
    • +
    • +

      /crex/proj/naiss2024-1-123/Nisse/.htpasswd

      +
    • +
    • +

      Note that you need the full physical /crex/proj... path. This full path is given from the command pwd -P.

      +
    • +
    +
  • +
+
+

see also

+

You may want to check the external Easy_webshare_on_UPPMAX while we update this page

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/cluster_guides/wharf/index.html b/cluster_guides/wharf/index.html new file mode 100644 index 000000000..90bfa7dbe --- /dev/null +++ b/cluster_guides/wharf/index.html @@ -0,0 +1,3224 @@ + + + + + + + + + + + + + + + + + + + wharf - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

wharf

+

wharf is a folder on Bianca used +for file transfer on Bianca.

+

He it is described:

+ +

What is wharf?

+

The wharf is like a "postbox" 📮 for data/file exchange +between the Internet restricted Bianca cluster +and the remaining of the World Wide Internet. +This "postbox" is reachable to transfer data from two internal servers - +bianca-sftp.uppmax.uu.se and transit.uppmax.uu.se.

+

The wharf location

+

The path to this special folder is:

+
/proj/nobackup/[project_id]/wharf/[user_name]/[user_name]-[project_id]
+
+

where

+ +

For example:

+
/proj/nobackup/sens2023598/wharf/sven/sven-sens2023598
+
+

wharf use

+

To transfer data from/to Bianca, +wharf is to folder where files are sent to/from.

+

Do not keep files in wharf, as this folder is connected to the outside +world and hence is a security risk. Instead, move your data to your project folder.

+

You have full access to your wharf and read-only access +to other users' wharf folders in that same project.

+

wharf is only accessible when inside the university networks.

+

Mounting wharf

+

Mounting wharf means that a wharf folder is added to the +filesystem of your local computer, after which you can use +it like any other folder. The data shown in the folder is on Bianca, +not on your local storage.

+

One can mount wharf on your local computer using sshfs +when inside the university networks. +sshfs is available on most Linux distributions:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DistributionPackage name
Ubuntusshfs
Fedorafuse-sshfs
RHEL7/CentOS7 [1]fuse-sshfs
RHEL8 [2]fuse-sshfs
CentOS8 [3]fuse-sshfs
+
    +
  • [1] Enable EPEL repository
  • +
  • [2] Enable codeready-builder repository
  • +
  • [3] Enable powertools repository
  • +
+

UPPMAX does not have sshfs installed for security reasons.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/R_matlab_julia/index.html b/courses_workshops/R_matlab_julia/index.html new file mode 100644 index 000000000..ec1e53f38 --- /dev/null +++ b/courses_workshops/R_matlab_julia/index.html @@ -0,0 +1,3211 @@ + + + + + + + + + + + + + + + + + + + Introduction to running Julia, R, and Matlab in HPC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Introduction to running Julia, R, and Matlab in HPC

+

Learn how to run R, Matlab, and Julia at Swedish HPC centres. We will show you how to find and load the needed modules, how to write a batch script, as well as how to install and use your own packages, and more. +The course will consist of lectures interspersed with hands-on sessions where you get to try out what you have just learned.

+

We will mainly use Tetralith at NSC for the examples for the course, but there is little difference in how you use the various HPC centres in Sweden and you should have no problems applying the knowledge to the other systems.

+

NOTE: the course will NOT cover the topic of improving your programming skills in R, Matlab, and Julia. Likewise, we will not cover advanced techniques for code optimization.

+

NOTE if you are interested in running Python at Swedish HPC centres, then we recommend the course "Introduction to Python and Using Python in an HPC environment" which will run 24-25 April + 28-29 April. The first day is the introduction to Python and it is possible to just participate that day.

+

Remote/online participation: The course will be completely online and we will use Zoom. More information about connecting and such will be sent to the participants close to the course.

+

Prerequisites: some familiarity with the LINUX command line (recordings from HPC2N's Linux intro here and UPPMAX Linux Intro here and also here), basic R, Matlab, or Julia, depending on which language(s) you are interested in. See below for links to useful material if you need a refresher before the course.

+

Schedule

+

This course will consist of three days (9:00-16:00), one for each language. It is a cooperation between HPC2N, LUNARC, and UPPMAX.

+

Full schedule can be found on the rendered presentations for each course day: https://uppmax.github.io/R-python-julia-matlab-HPC/

+
    +
  • +

    Day 1, Mon. 24. March

    +
      +
    • 9:00 - 16:00 R
    • +
    +
  • +
  • +

    Day 2, Tue. 25. March

    +
      +
    • 9:00 - 16:00 Matlab
    • +
    +
  • +
  • +

    Day 3, Wed. 26. March

    +
      +
    • 9:00 - 16:00 Julia
    • +
    +
  • +
+

Materials

+
Exercises and .rst files can be downloaded from the course's GitHub page: <https://github.com/UPPMAX/R-python-julia-matlab-HPC>
+Rendered presentations can be found here: <https://uppmax.github.io/R-python-julia-matlab-HPC/>
+Recordings are here: TBA
+Q/A document for each day, as PDF: TBA
+
+ +

This is NOT in any way mandatory for participation or part of the course. It is a list of links to useful refresher material for those who would like to read up on Julia/R/Matlab/Linux/etc. before the course.

+
Julia
+    Aalto Univ.: <https://github.com/AaltoRSE/julia-introduction>
+    Software Carpentry: <https://carpentries-incubator.github.io/julia-novice/>
+R
+    Software Carpentry: <https://swcarpentry.github.io/r-novice-gapminder/index.html>
+    Parallel R: <https://github.com/menzzana/parallel_R_course>
+Matlab
+    Software Carpentry: <https://swcarpentry.github.io/matlab-novice-inflammation/>
+    Matlab documentation at MathWorks: <https://se.mathworks.com/help/matlab/index.html>
+Linux intro
+    Linux intro from "Introduction to Kebnekaise": <https://hpc2n.github.io/intro-linux/>  (Recordings)
+    Material contained in the UPPMAX introduction course: <https://www.uu.se/centrum/uppmax/utbildning/kurser-och-workshops/introduktion-till-uppmax>
+Slurm
+    Contained in the "Introduction to Kebnekaise" course: <https://hpc2n.github.io/intro-course/batch/> (Recordings)
+    UPPMAX SLURM guide: <https://docs.uppmax.uu.se/cluster_guides/slurm/>
+    Material contained in the UPPAX intro course: <https://www.uu.se/en/centre/uppmax/study/courses-and-workshops/introduction-to-uppmax>
+
+

Time and Dates: 24-26 March 2025, three days, one for each language. 9:00 - 16:00 each day. The last hour each day will be used for extra time for exercises.

+

Onboarding: Friday, 21. March (1 hour - time to be decided)

+

Location: ONLINE. Zoom link will be sent to participants a few days before the course.

+

Deadline for registration: 17. March 2025

+

Registration from HPC2N page

+

Participation in the course is free.

+

Please make sure have an account at SUPR as well as at NSC if you want to participate in the hands-on part of the training. There will be a course project on NSC that can be used to run the examples in during the hands-on. If you are affiliated with IRF, LTU, UMU, MIUN, or SLU and have account/project at HPC2N you can use HPC2N's local cluster if you prefer. Also, if you have an account/project at LUNARC or one at UPPMAX, you may use that instead if you want. If you do not have an account at SUPR and/or UPPMAX/HPC2N/LUNARC/NSC, you will be contacted with further instructions for how to create those. You are STRONGLY encouraged to sign up to SUPR as soon as possible after registering for the course.

+

NOTE:

+
Kebnekaise has become a local resource. Please also read the page about "Kebnekaise will be retired as a national resource". HPC2N accounts are ONLY meant for people who are at Umeå university, one of HPC2N's partnersites (IRF, LTU, MIUN, SLU), or are in a research group with a PI at one of those.
+Cosmos (LUNARC) is also a local resource, for those at Lund University.
+UPPMAX accounts are only for local Uppsala people.
+Everyone else must use NSC for the course.
+
+

Course project: As part of the hands-on, you may be given temporary access to a course project, which will be used for running the hands-on examples. There are some policies regarding this, that we ask that you follow:

+
You may be given access to the project before the course; please do not use the allocation for running your own codes in. Usage of the project before the course means the priority of jobs submitted to it goes down, diminishing the opportunity for you and your fellow participants to run the examples during the course. You can read more detailed information about the job policies of NSC here and NSC usage rules here.
+The course project will be open 1-2 weeks after the course, giving the participants the opportunity to test run examples and shorter codes related to the course. During this time, we ask that you only use it for running course related jobs. Use your own discretion, but it could be: (modified) examples from the hands-on, short personal codes that have been modified to test things learned at the course, etc.
+Anyone found to be misusing the course project, using up large amounts of the allocation for their own production runs, will be removed from the course project.
+You will likely also be given access to a storage area connected to the compute project. Any data you store there should be course-related and if you wish to save it you should copy it to somewhere else soon after the course as it will be deleted about a month later.
+
+

The course uses compute resources provided by the National Academic Infrastructure for Supercomputing in Sweden (NAISS) at NSC partially funded by the Swedish Research Council through grant agreement no. 2022-06725.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/awk/index.html b/courses_workshops/awk/index.html new file mode 100644 index 000000000..d74271ab6 --- /dev/null +++ b/courses_workshops/awk/index.html @@ -0,0 +1,3255 @@ + + + + + + + + + + + + + + + + + + + Awk workshop - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Awk workshop

+

AWK is an interpreted programming language designed for text processing and typically used as a data extraction and reporting tool.

+

This two-days workshop aims to promote and demonstrate the flexibility of the tool, where the overhead of more sophisticated approaches and programming languages is not worth the bother.

+

Learn how to

+
    +
  • use Awk as an advanced grep command, capable of arithmetic selection rules with control over the content of the matched lines
  • +
  • perform simple conversions, analysis or filter you data on the fly making it easy to plot or read in your favorite research tool
  • +
  • handle and take advantage on data split over multiple file data sets.
  • +
  • use Awk as simple function or data generator
  • +
  • perform simple sanity checks on your results
  • +
+

Awk for bioinformaticians

+

Use what you learn and dive into the basic concepts of bioinformatics with simple exercises on typical scientific problems and tasks.

+
+

Venue and registration:

+

Date: 16 and 17 January, 2025
+Time: 9:15 - 12:00 and 13:15 -16:00
+Location: Zoom: link will be sent to applicants
+Application: form.

+
+

Schedule

+
+

1-st day 9:15 - 12:00

+

Seminar session

+
    +
  • Examples of typical problems suitable for Awk “treatment”
  • +
  • Introduction to the basics of Awk scripting language
  • +
  • Solving interactively simple problems
  • +
+

1-st day lunch break

+

Exercises 13:15 -16:00

+
    +
  • Solving interactively the exercise problems
  • +
+

2-nd day 9:15 - 12:00

+ +

2-nd day lunch break

+

Walk-through session on various topics:

+
    +
  • Awk parsing “simultaneously” multiple input files
  • +
  • Multiple input files - second approach scenario will be discussed.
  • +
  • How to trick awk to accept options on the command line like regular program i.e. $ script.awk filename parameter1 parameter2 link
  • +
  • Declaring and calling functions in awk - link
  • +
  • Input/output to/from an external programs
  • +
  • Learn how to send input to an external program (might be based on your data) and read the result back - link
  • +
  • Handy tips: awk oneliners use with Vim, gnuplot…
  • +
+

Also: Suggest topic for discussion or see recently suggested topics.

+

Prerequisites

+
+

MacOS

+

The system provided awk version will work for most of the examples during the workshop with few exceptions, which are noted in the online material.

+

Tilda ~ sign on Mac with Swedish keyboard layout - Alt + ^

+

Linux

+

Several distributions have other awk flavors installed by default. The easiest fix is to install the gnu version gawk i.e. for Ubuntu: sudo apt install gawk

+

Windows 10/11

+
    +
  • Ubuntu for Windows 10 - it is better to read from the source, despite it might not be the easiest tutorial. To my experience, this is the best Linux environment without virtualization.
  • +
  • MobaXterm use the internal package manager to install gawk. The default is provided by Busybox and is not enough for the purpose of the workshop.
  • +
+

Linux computer center

+
    +
  • Just login to your account and use the provided awk - any version newer than 4 will work.
  • +
+
rackham3:[~] awk -V GNU Awk 4.0.2 Copyright (C) 1989, 1991-2012 Free Software Foundation.
+
+

Virtual Linux Machine

+

Just follow some tutorial on how to setup and use the virtual Linux environment.

+ +
+Feedback from previous workshops + +
+

Contacts for the course

+
+

Pavlin Mitev
+Jonas Söderberg
+Lars Eklund
+Richel Bilderbeek
+UPPMAX

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/bianca_intro/index.html b/courses_workshops/bianca_intro/index.html new file mode 100644 index 000000000..fd2dcb0a0 --- /dev/null +++ b/courses_workshops/bianca_intro/index.html @@ -0,0 +1,3140 @@ + + + + + + + + + + + + + + + + + + + Introduction to Bianca: Handling Sensitive Research Data - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Introduction to Bianca: Handling Sensitive Research Data

+

Are you just beginning to work with sensitive data in your research? If yes, welcome to a 1-day introduction to handling sensitive data on the UPPMAX cluster, Bianca. We will tell you about NAISS-SENS, how to login to Bianca, transfer files via wharf, basics of the SLURM workload manager and the module system.

+

This is a workshop is intended for beginner users of Bianca.

+

You do not need to be a member of a NAISS-SENS project in order to join the workshop. A SUPR course project will be available to all participants. The workshop will consist of both lectures and exercise sessions.

+

Prerequisites: none.

+

When: Wednesday, March 19, 2025.

+

Time: 09:00 - 12:00, and 13:00 - 16:00.

+

Where: online via Zoom. Connection details will be sent to registered participants.

+

Login help session: TBD

+

Registration form

+

Content

+
    +
  • Introduction
  • +
  • Intro to NAISS-Sens
  • +
  • Login: ThinLinc
  • +
  • Command line intro specific to Bianca
  • +
  • Module system
  • +
  • Intro to transferring files to and from Bianca
  • +
  • Compute nodes and slurm
  • +
  • Summary
  • +
  • Q/A
  • +
+

Workshop material

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/courses_workshops/index.html b/courses_workshops/courses_workshops/index.html new file mode 100644 index 000000000..5406ad13f --- /dev/null +++ b/courses_workshops/courses_workshops/index.html @@ -0,0 +1,3280 @@ + + + + + + + + + + + + + + + + + + + + + Courses and workshops - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Courses and workshops

+

At UPPMAX, we teach, by providing workshops and courses. +This page gives an overview of these.

+

Course dates are (or should be) provided at each course's website.

+

UPPMAX Local

+

The courses on how to use our local clusters, +such as Rackham and Snowy.

+ +

NAISS-Sens

+

The courses on how to use Bianca, +a NAISS HPC cluster for sensitive data.

+ +

NAISS centre agnostic

+

UPPMAX is part of NAISS and we do teach things that apply +to all NAISS HPC clusters.

+

Getting started

+ +

Programming

+

Python

+ +

Other

+ +

Other centers

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/img/rackham_logged_in.png b/courses_workshops/img/rackham_logged_in.png new file mode 100644 index 000000000..4505e44f7 Binary files /dev/null and b/courses_workshops/img/rackham_logged_in.png differ diff --git a/courses_workshops/img/supr_logged_in.png b/courses_workshops/img/supr_logged_in.png new file mode 100644 index 000000000..7b443ce97 Binary files /dev/null and b/courses_workshops/img/supr_logged_in.png differ diff --git a/courses_workshops/intro_to_python/index.html b/courses_workshops/intro_to_python/index.html new file mode 100644 index 000000000..2ddb308f4 --- /dev/null +++ b/courses_workshops/intro_to_python/index.html @@ -0,0 +1,3273 @@ + + + + + + + + + + + + + + + + + + + Introduction to Python - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Introduction to Python

+
+Announcement text +
+

Great course. Now, Python is not scary anymore.

+

A learner from this course (source)

+
+

This 1-day course helps you get started with Python, +by working through an online and free book. +We make use of HPC clusters +to write and run Python code. +The pace of this highly interactive course +is set by the majority of learners, +ensuring that any complete beginner +has enough time for exercises. +At the end of the day, you should feel comfortable with the basics +of Python and feel familiar to a book on Python to help you on your next steps.

+ +
+
+Registration form text +

Intro to Python 2025-03-07

+

This is the registration form for the UPPMAX course 'Intro to Python', +https://docs.uppmax.uu.se/courses_workshops/intro_to_python/

+

What is your email address?

+
+
+

Great course. Now, Python is not scary anymore.

+

A learner from this course (source)

+
+

This 1-day course helps you get started with Python, +by working through an online and free book. +We make use of the UPPMAX HPC cluster +to write and run Python code, +but you may use a different machine if you prefer. +The pace of this highly interactive course +is set by the majority of learners, +ensuring that any complete beginner +has enough time for exercises. +At the end of the day, you should feel comfortable with the basics +of Python and feel familiar to a book on Python to help you on your next steps.

+

You will:

+
    +
  • Feel comfortable with learning Python
  • +
  • Feel comfortable using an online and free book on Python
  • +
  • Write Python code on an HPC cluster
  • +
  • Run Python scripts on an HPC cluster
  • +
+

Practical matters:

+ +

Before the course, you must have done these four things:

+
    +
  • +

    Prerequisite 1/4: You have registered at our registration form at here

    +
  • +
  • +

    Prerequisite 2/4: A user account on a Swedish academic HPC cluster

    +
  • +
+
+How can I check if I have this? +

When you can login at https://supr.naiss.se/.

+

It should look similar to this:

+

A user that logged into SUPR

+
+
+How to get this? +

Register at https://supr.naiss.se/person/register/.

+
+
+What if I have a problem here? +

Contact richel.bilderbeek@uppmax.uu.se

+
+
    +
  • Prerequisite 3/4: Be able to login to an HPC cluster using SSH
  • +
+
+How can I check if I have this? +

This depends on the HPC cluster you are using. +For UPPMAX's Rackham, it look similar to this:

+

Logged into Rackham

+
+
+Is it OK if I can login using other methods? +

Probably: yes

+
    +
  • Using a website: yes
  • +
  • Using a local ThinLinc client: yes
  • +
+
+
+How to get this? +

Follow the instructions of your favorite HPC center +or the UPPMAX instruction

+
+
+What if I have a problem here? +

Contact richel.bilderbeek@uppmax.uu.se

+
+
    +
  • Prerequisite 4/4: have a good Zoom setup
  • +
+
+How can I check if I have this? +
    +
  • You are in a room where you can talk
  • +
  • You talk into a (standalone or headset) microphone
  • +
  • Others can clearly hear you when you talk
  • +
  • Others can see you
  • +
  • You can hear others clearly when they talk
  • +
+
+
+How to get this? +
    +
  • Find/schedule/book a room where you can talk
  • +
  • Buy a simple headset
  • +
+
+
+What if I don't have this? Is that OK? +

No.

+

You will feel left out, as the course in highly interactive. +It would be weird to the other learners.

+
+
+What if I have social anxiety? +

Sorry to hear that. In this course, it is OK to give a wrong +answer or to say 'I do not know'. This is what a former +learner had to say on this:

+
+

As a learner, you do not want to be berated when giving an answer. +Richel tries to gently deal with a wrong answer and he does this great

+

A learner from this course (source)

+
+

You are welcome to try and leave anytime you want. +The course material is made for self-study too, with videos +for all exercises. +Do fill in the evaluation when you leave early :-)

+
+

Coordinators

+
    +
  • ?
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/naiss_transfer/index.html b/courses_workshops/naiss_transfer/index.html new file mode 100644 index 000000000..069137ee8 --- /dev/null +++ b/courses_workshops/naiss_transfer/index.html @@ -0,0 +1,3181 @@ + + + + + + + + + + + + + + + + + + + Transferring Files to/from HPC Clusters - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Transferring Files to/from HPC Clusters

+

In this 3-hour workshop you learn to transfer files to or from Swedish academic HPC clusters. We will cover graphical as well as terminal tools and you will work highly interactively. At the end of the day, you should be comfortable in transferring files between local computer and a cluster and cross-clusters, and choosing the right tool for your use cases.

+

The workshop is intended for beginner users but with some Linux experience, see the course link below. You do not need to be a member of a NAISS project in order to join the workshop. A course project on one of the NAISS clusters will be available to those.

+

Prerequisites

+ +

Preliminary schedule overview

+
    +
  • FileZilla
  • +
  • Log in with terminal and file transfer using rsync
  • +
  • File transfer using scp and sftp
  • +
+

Coming course instance

+
    +
  • When: Fri 7 March, 2025, 9.00-12.00
  • +
  • +

    Where: Online via Zoom

    +
  • +
  • +

    Registration

    +
  • +
  • +

    Course material: TBA

    +
  • +
+

Coming course instances

+
    +
  • Friday May 16th 9:00-12:00 (week 20)
  • +
  • Friday Sep 5th (week 36)
  • +
  • Friday 14 Nov (week 46)
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/courses_workshops/uppmax_intro_course/index.html b/courses_workshops/uppmax_intro_course/index.html new file mode 100644 index 000000000..63363e678 --- /dev/null +++ b/courses_workshops/uppmax_intro_course/index.html @@ -0,0 +1,3151 @@ + + + + + + + + + + + + + + + + + + + Introduction to Linux and UPPMAX - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Introduction to Linux and UPPMAX

+

Overview and Schedule

+

UPPMAX application experts want to share their skills in a 3-day series of lectures. We will help you move from being a Linux novice to an UPPMAX expert. If you already have the fundamentals down, you are still sure to enjoy the tips and tricks in the later parts of the course. The lectures covering Linux and bash scripting are cluster-agnostic and may be attended by non-UPPMAX users as well. It is possible to sign up only for the lectures that are interesting to you.

+

When: February 10-12, 2025.

+

Where: online via Zoom.

+

Registration form

+

Schedule

+ + + + + + + + + + + + + + + + + + + + + + + +
Monday, February 10Tuesday, February 11Wednesday, February 12
MorningIntro to Linux and UPPMAX
Richèl Bilderbeek
Linux II
Douglas Scofield
Bash Scripting
Douglas Scofield
AfternoonIntro to UPPMAX
Richèl Bilderbeek
Linux III
Douglas Scofield
Slurm at UPPMAX
Diana Iusan
+

The lectures are scheduled 09:00 to 12:00 and 13:00 to 16:00 daily.

+

Startup instructions to course participants

+

Approximately two weeks before the course starts, you will receive a set of instructions for creating an account and joining the course project. It is important that you complete these steps well in advance of the course.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/1000-genome_project/index.html b/databases/1000-genome_project/index.html new file mode 100644 index 000000000..66c04b265 --- /dev/null +++ b/databases/1000-genome_project/index.html @@ -0,0 +1,3130 @@ + + + + + + + + + + + + + + + + + + + + + + + 1000-genome project - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

1000 genomes project

+

The 1000-genome project is an international collaboration to sequence the genomes of a large number of people. The complete archive is available from NCBI and EBI but downloading this massive quantity of next-gen data is time- and resource-consuming. UPPMAX now has a local copy of the sequencing and index files (BAM, BAI and BAS) as a shared resource.

+

The main archive is stored at /sw/data/KGP/central. Within this folder, "low" holds the primary dataset with one individual per folder (eg, "HG00096", "NA11831") holding data files for each sequencing technology applied. In the main folder, "high" holds the high-coverage data for a subset of the individuals.

+

One level up in the file system, /sw/data/KGP/regional holds sequence data for some individual countries outside the 1000-genome project. So far, very little data has been stored but this may be expanded.

+

Users interesting in any of this data should request membership +in the "KGP" group (via support@uppmax.uu.se). +This requirement is not intended to restrict the resource in any way, +but makes it easier to inform interested users of possible changes. +Considering the large storage space used, it is possible that the data would need to be reorganized or possibly even reduced in the future, depending of course on the perceived need for the resource by the members of the KGP group.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/blast/index.html b/databases/blast/index.html new file mode 100644 index 000000000..a8a5b5cae --- /dev/null +++ b/databases/blast/index.html @@ -0,0 +1,3447 @@ + + + + + + + + + + + + + + + + + + + + + + + BLAST databases available locally - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Blast databases available locally

+

Many pipelines involving annotation/assembly comparison involve Blast. Several Blast versions are available as modules, for example:

+
    +
  • blast/2.12.0+, etc. : the Blast+ suites (blastp, tblastn, etc.), recommended
  • +
  • diamond/2.0.14 : the DIAMOND protein aligner, recommended for protein databases. See UPPMAX's DIAMOND database webpage for more information.
  • +
  • blast/2.2.26, etc. : 'legacy' Blast (blastall, megablast, etc)
  • +
+

Use module spider blast to see available versions. As for all bioinformatics tools at Uppmax, module load bioinfo-tools is required before the blast modules are available.

+

Uppmax maintains local copies of many Blast databases, including many available at NCBI:

+
    +
  • ftp://ftp.ncbi.nih.gov/blast/db/README
  • +
  • ftp://ftp.ncbi.nlm.nih.gov/blast/documents/blastdb.html
  • +
  • https://www.ncbi.nlm.nih.gov/books/NBK62345/
  • +
  • https://ncbiinsights.ncbi.nlm.nih.gov/2020/02/21/rrna-databases/
  • +
  • https://www.ncbi.nlm.nih.gov/sars-cov-2/
  • +
  • https://www.ncbi.nlm.nih.gov/refseq/refseq_select/
  • +
  • https://blast.ncbi.nlm.nih.gov/smartblast/smartBlast.cgi?CMD=Web&PAGE_TYPE=BlastDocs#searchSets
  • +
+

as well as several UniProt databases.

+

Note that:

+
The local UPPMAX copies are found at /sw/data/blast_databases
+Doing module load blast_databases sets the environment variable BLASTDB to this directory; this is loaded as a prerequisite when loading any blast modules
+New versions are installed the first day of each month at 00.01 from local copies updated the 28th of the previous month beginning at 00.01
+When new versions are installed, the directory containing the previous versions is renamed to blast_databases_old
+blast_databases_old is deleted the second data of each month at 00.01
+
+

These databases use the "v5" format, which includes rich taxonomic information with sequences, and will only work with the Blast tools from the module blast/2.8.0+ and later. Earlier module versions can still be used, but you will need to provide/build your own databases. NCBI no longer updates databases with the older "v4" databases as of February 2020, and they have been deleted from UPPMAX. The final updates of these databases (again, as of this writing nearly two years old) are available from NCBI over FTP at ftp://ftp.ncbi.nlm.nih.gov/blast/db/v4.

+

Each NCBI-hosted database also includes a JSON file containing additional metadata for that particular database. These are found in /sw/data/blast_databases/ and are named databasename*.json. The exact name varies based on the format of the database. For example, the contents of the JSON file for the nr database can be see by running

+
cat /sw/data/blast_databases/nr*.json
+
+

The Blast databases available at UPPMAX are:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeSourceNotes
16S_ribosomal_RNAnucleotideNCBI16S ribosomal RNA (Bacteria and Archaea type strains)
18S_fungal_sequencesnucleotideNCBI18S ribosomal RNA sequences (SSU) from Fungi type and reference material (BioProject PRJNA39195)
28S_fungal_sequencesnucleotideNCBI28S ribosomal RNA sequences (LSU) from Fungi type and reference material (BioProject PRJNA51803)
BetacoronavirusnucleotideNCBIBetacoronavirus nucleotide sequences
cdd_deltaproteinNCBIConserved domain database for use with delta-blast
env_nrproteinNCBIProtein sequences for metagenomes (EXCLUDED from nr)
env_ntnucleotideNCBINucleotide sequences for metagenomes
human_genomenucleotideNCBICurrent RefSeq human genome assembly with various database masking
ITS_eukaryote_sequencesnucleotideNCBIInternal transcribed spacer region (ITS) for eukaryotic sequences
ITS_RefSeq_FunginucleotideNCBIInternal transcribed spacer region (ITS) from Fungi type and reference material (BioProject PRJNA177353)
landmarkproteinNCBIProteomes of 27 model organisms. The landmark database includes complete proteomes from a few selected representative genomes spanning a wide taxonomic range, the main database used by the SmartBLAST services.
LSU_eukaryote_rRNAnucleotideNCBILarge subunit ribosomal RNA sequences for eukaryotic sequences
LSU_prokaryote_rRNAnucleotideNCBILarge subunit ribosomal RNA sequences for prokaryotic sequences
mitonucleotideNCBINCBI Genomic Mitochondrial Reference Sequences
mouse_genomenucleotideNCBICurrent RefSeq mouse genome assembly with various database masking
nrproteinNCBINon-redundant protein sequences from GenPept, Swissprot, PIR, PDF, PDB, and NCBI RefSeq
ntnucleotideNCBIPartially non-redundant nucleotide sequences from all traditional divisions of GenBank, EMBL, and DDBJ
pataaproteinNCBIPatent protein sequences
patntnucleotideNCBIPatent nucleotide sequences. Both patent databases are directly from the USPTO, or from the EPO/JPO via EMBL/DDBJ
pdbaaproteinNCBISequences for the protein structure from the Protein Data Bank
pdbntnucleitideNCBISequences for the nucleotide structure from the Protein Data Bank. They are NOT the protein coding sequences for the corresponding pdbaa entries.
ref_euk_rep_genomesnucleotideNCBIRefseq Representative Eukaryotic genomes (1000+ organisms)
ref_prok_rep_genomesnucleotideNCBIRefseq Representative Prokaryotic genomes (5700+ organisms)
ref_viroid_rep_genomesnucleotideNCBIRefseq Representative Viroid genomes (46 organisms)
ref_viruses_rep_genomesnucleotideNCBIRefseq Representative Virus genomes (9000+ organisms)
refseq_proteinproteinNCBINCBI protein reference sequences
refseq_rnanucleotideNCBINCBI Transcript reference sequences
refseq_select_protproteinNCBINCBI RefSeq protein sequences from human, mouse, and prokaryotes, restricted to the RefSeq Select set of proteins. RefSeq Select includes one representative protein per protein-coding gene for human and mouse, and RefSeq proteins annotated on reference and representative genomes for prokaryotes
refseq_select_rnanucleotideNCBINCBI RefSeq transcript sequences from human and mouse, restricted to the RefSeq Select set with one representative transcript per protein-coding gene
SSU_eukaryote_rRNAnucleotideNCBISmall subunit ribosomal RNA sequences for eukaryotic sequences
swissprotproteinNCBISwiss-Prot sequence database (last major update)
tsa_nrproteinNCBIProtein sequences from the Transcriptome Shotgun Assembly. Its entries are EXCLUDED from the nr database.
tsa_ntnucleotideNCBIA database with earlier non-project based Transcriptome Shotgun Assembly (TSA) entries. Project-based TSA entries are NOT included. Entries are EXCLUDED from the nt database.
uniprot_sprotproteinUniProtSwiss-Prot high quality manually annotated and non-redundant protein sequence database
uniprot_tremblproteinUniProtTrEMBL high quality but unreviewed protein sequence database
uniprot_sptremblproteinuniprot_sprot and uniprot_trembl combined
uniprot_allproteinalias for uniprot_sptrembl
uniprot_all.fastaproteinalias for uniprot_sptrembl
uniprot_sprot_varsplicproteinUniProtUniProt canonical and isoform sequences (see link)
uniprot_uniref50proteinUniProtClustered sets of 50%-similar protein sequences (see link)
uniprot_uniref90proteinUniProtClustered sets of 90%-similar protein sequences (see link)
uniprot_uniref100proteinUniProtClustered sets of identical protein sequences (see link)
UniVecnucleotideUniVecSequences commonly attached to cDNA/genomic DNA during the cloning process
UniVec_CorenucleotideUniVecA subset of UniVec chosen to minimise false positives
+

Additionally, taxdb.btd and taxdb.bti are downloaded, which provide additional taxonomy information for these databases. Local copies of the NCBI Taxonomy databases are also available; further details are available on a separate page.

+

For UniVec and UniVec_Core, Fasta-format files containing the vector sequences are also available with the given names (e.g., /sw/data/uppnex/blast_databases/UniVec), alongside the Blast-format databases built from the same Fasta files.

+

The exact times all databases were updated are provided by database.timestamp files located in the directory +Databases are available automatically after loading any blast module

+

When any of the blast modules is loaded, the BLASTDB environment variable is set to the location of the local database copies (/sw/data/uppnex/blast_databases). The various Blast tools can use this variable to find the locations of databases, so that only the name needs to be specified.

+
module load bioinfo-tools blast/2.7.1+
+blastp -db nr -query input.fasta
+
+

After loading the blast/2.7.1+ module, specifying blastp -db nr results in blastp searching the local copy of nr, because the BLASTDB environment variable is set when the module is loaded. Similarly, each of these would result in searching the local copy of the given database:

+
blastp -db pdbaa ...
+blastp -db uniprot_sprot ...
+blastp -db uniprot_uniref90 ...
+blastn -db nt ...
+blastn -db refseq_genomic ...
+
+

WGS and SRA sequence databases are not included

+

The NCBI Whole-Genome Shotgun is not available locally. NCBI provides special versions of Blast and other tools that can be used to search the remote versions of WGS and the Sequence Read Archive.

+

These special blast versions and other tools are part of NCBI's SRA Tools, which is available at Uppmax as the sratools module. We have also include auxiliary NCBI scripts in the sratools module to convert taxonomic IDs to WGS and SRA identifiers.

+

Note that NCBI's TSA database is available at UPPMAX, just use the database name tsa_nr or tsa_nt.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/diamond/index.html b/databases/diamond/index.html new file mode 100644 index 000000000..ef8a62b18 --- /dev/null +++ b/databases/diamond/index.html @@ -0,0 +1,3322 @@ + + + + + + + + + + + + + + + + + + + + + + + DIAMOND protein alignment databases - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

DIAMOND protein alignment databases

+

The DIAMOND protein aligner is a recent tool offering much faster (100× to 1000× faster than Blast) alignment of protein sequences against reference databases. On UPPMAX, DIAMOND is available by loading the diamond module, the most recent installed version of which which as of this writing is diamond/2.0.14.

+

As for BLAST databases, UPPMAX provides several pre-built databases suitable for direct usage with the --db flag to diamond, as well as runs diamond prepdb on each of its downloaded BLAST protein databases whenever they are installed. The BLAST databases are updated according to the schedule given on their webpage. The diamond-format NCBI protein databases are updated once a month.

+

For each of the databases listed below, the method of versioning is indicated. To determine the version at UPPMAX, check the path given below after removing the database name from the last position; latest is a symbolic link that points to a directory with a name equivalent to the version of the most recent update. Old database versions will be removed after updates, so please use latest rather than directly addressing a database version.

+

Each of the database locations below is also available in the indicated environment variable set when any version of the diamond module is loaded. These are simple to use, for example to search nr:

+
diamond --db $DIAMOND_NR ...
+
+

NCBI BLAST Protein Databases

+

Whenever the BLAST databases are updated and installed, diamond prepdb is run on each of the protein-format databases so that they can be searched directly by diamond. See the BLAST databases webpage for a description of these.

+

To search any of them using diamond, load the blast_databases/latest module. This defines the environment variable BLASTDB, which contains the directory holding these databases. Once this module is loaded, you can run diamond on any of the protein databases. For example:

+
diamond --db $BLASTDB/nr ...
+diamond --db $BLASTDB/cdd_delta ...
+diamond --db $BLASTDB/swissprot ...
+diamond --db $BLASTDB/pdbaa ...
+
+

According to DIAMOND's developer, +these are faster to load than DIAMOND's own .dmnd-format databases. +So, you may want to load the blast_databases/latest data module +and use --db $BLASTDB/nr for your NCBI nr searches, +for example, instead of --db $DIAMOND_NR.

+

Diamond-format NCBI Protein Databases

+

Downloaded from ftp://ftp.ncbi.nlm.nih.gov/blast/db/FASTA. +These are updated frequently at NCBI, +so they are versioned here by the monthly download date. +There is no longer a separate FASTA version of env_nr, +so its Blast database is downloaded from +ftp://ftp.ncbi.nlm.nih.gov/blast/db and FASTA sequences +are extracted using blastdbcmd -entry all from module blast/2.12.0+.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DatabaseEnvironment variable for diamond --dbUPPMAX path
nrDIAMOND_NR/sw/data/diamond_databases/Blast/latest/nr
env_nrDIAMOND_ENV_NR/sw/data/diamond_databases/Blast/latest/env_nr
swissprotDIAMOND_SWISSPROT/sw/data/diamond_databases/Blast/latest/swissprot
pdbaaDIAMOND_PDBAA/sw/data/diamond_databases/Blast/latest/pdbaa
+

NCBI RefSeq Proteins

+

RefSeq protein databases are downloaded from +ftp://ftp.ncbi.nlm.nih.gov/refseq/release/complete/, +with an update occurring if there is a new release as indicated by the contents of +ftp://ftp.ncbi.nlm.nih.gov/refseq/release/RELEASE_NUMBER.

+ + + + + + + + + + + + + + + + + + + + +
DatabaseEnvironment variable for diamond --dbUPPMAX path
complete.nonredundant_protein.proteinDIAMOND_REFSEQ_NONREDUNDANT/sw/data/diamond_databases/RefSeq/latest/complete.nonredundant_protein.protein
complete.proteinDIAMOND_REFSEQ/sw/data/diamond_databases/RefSeq/latest/complete.protein
+

UniRef90

+

The UniRef90 protein database is downloaded as Fasta from its UK mirror +at ftp://ftp.expasy.org/databases/uniprot/current_release/uniref/uniref90/, +with an update occurring if there is a new version as indicated +by the <version> tag in the XML description available +at ftp://ftp.expasy.org/databases/uniprot/current_release/uniref/uniref90/RELEASE.metalink.

+ + + + + + + + + + + + + + + +
DatabaseEnvironment variable for diamond --dbUPPMAX path
uniref90DIAMOND_UNIREF90/sw/data/diamond_databases/UniRef90/latest/uniref90
+

UniProt Reference Proteomes

+

The UniProt Reference Proteomes protein database is downloaded as Fasta +from its UK mirror at +ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/reference_proteomes, +with an update occurring if there is a new version as indicated by the <version> tag +in the XML description available at ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/reference_proteomes/RELEASE.metalink. +If there is a new release, then the file Reference_Proteomes_RELEASE.tar.gz is downloaded, +with RELEASE replaced by the release number. The reference_proteomes.dmnd +database is created from this file using the protocol described after the table.

+ + + + + + + + + + + + + + + +
DatabaseEnvironment variable for diamond --dbUPPMAX path
UniProt Reference ProteomesDIAMOND_REFERENCE_PROTEOMES/sw/data/diamond_databases/reference_proteomes/latest/reference_proteomes
+

The reference_proteomes.dmnd database is created using the following protocol for the BlobToolKit. This uses UPPMAX's most recently downloaded NCBI taxonomy database for its taxonomic metadata.

+
module load bioinfo-tools
+module load diamond/2.0.14
+module load ncbi_taxonomy/latest
+
+

after downloading

+
tar xf Reference_Proteomes_RELEASE.tar.gz
+touch reference_proteomes.fasta.gz
+find . -mindepth 2 | grep "fasta.gz" | grep -v 'DNA' | grep -v 'additional' | xargs cat >> reference_proteomes.fasta.gz
+printf "accession\taccession.version\ttaxid\tgi\n" > reference_proteomes.taxid_map
+zcat */*/*.idmapping.gz | grep "NCBI_TaxID" | awk '{print $1 "\t" $1 "\t" $3 "\t" 0}' >> reference_proteomes.taxid_map
+diamond makedb --db reference_proteomes.dmnd --in reference_proteomes.fasta.gz --threads 10 --taxonmap reference_proteomes.taxid_map --taxonnames $NCBI_TAXONOMY_ROOT/names.dmp --taxonnodes $NCBI_TAXONOMY_ROOT/nodes.dmp
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/ncbi/index.html b/databases/ncbi/index.html new file mode 100644 index 000000000..6e37a616a --- /dev/null +++ b/databases/ncbi/index.html @@ -0,0 +1,3277 @@ + + + + + + + + + + + + + + + + + + + + + + + NCBI taxonomy databases - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

NCBI taxonomy databases

+

Uppmax maintains local copies of the full set of NCBI Taxonomy databases. Note that:

+
    +
  • The local copies are found at /sw/data/ncbi_taxonomy/latest
  • +
  • The data module ncbi_taxonomy/latest defines the environment variable NCBI_TAXONOMY_ROOT to this location. We recommend loading this module and using this environment variable to access these data.
  • +
  • This also contains the subdirectories new_taxdump, accession2taxid and biocollections containing those databases, see the tables below for their contents
  • +
  • latest is a symbolic link to a directory named from the date of the most recent update
  • +
  • There is also a subdirectory download containing the files as downloaded from NCBI
  • +
  • The installation of new versions begins Sunday of each week at 00.10. The update may take several minutes up to an hour, depending on network speeds.
  • +
  • When new versions are successfully installed, the latest/ symbolic link is updated to point to the new dated directory
  • +
  • The previous version of the taxonomy databases are removed when the new versions have completed installation
  • +
+

See the links for each database for specifics on file format and contents. Many tools know how to make use of these databases; follow each tool's specific instructions. The files can be found in the indicated directories.

+

The databases available within /sw/data/ncbi_taxonomy/latest are below. For more on each, see the links.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameSourceNotes
taxdumpNCBINCBI taxonomic database, in multiple .dmp files (see taxdump_readme.txt or link)
taxcatNCBINCBI taxonomic categories, in categories.dmp (see taxcat_readme.txt or link)
taxdump_readme.txtNCBINCBI taxdump file description
taxcat_readme.txtNCBINCBI taxcat file description
gi_taxid_nucl.dmpNCBIMappings of nucleotide GI to taxid (DEPRECATED)
gi_taxid_prot.dmpNCBIMappings of protein GI to taxid (DEPRECATED)
+

The databases available within /sw/data/ncbi_taxonomy/latest/new_taxdump are below. For more on each, see the links.

+ + + + + + + + + + + + + + + + + + + + +
NameSourceNotes
new_taxdumpNCBINCBI new-format taxonomic database, in multiple .dmp files (see this taxdump_readme.txt or link)
taxdump_readme.txtNCBINCBI new-format taxonomic database file description
+

The databases available within /sw/data/ncbi_taxonomy/latest/accession2taxid are below. The dead_ files contain accession-to-TaxID mappings for dead (suppressed or withdrawn) sequence records. For more on each, see the links.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameSourceNotes
nucl_wgs.accession2taxidNCBITaxID mapping for nucleotide records of type WGS or TSA
nucl_gb.accession2taxidNCBITaxID mapping for nucleotide records not of the above types
prot.accession2taxidNCBITaxID mapping for protein records
pdb.accession2taxidNCBITaxID mapping for PDB protein records
dead_nucl.accession2taxidNCBITaxID mapping for dead nucleotide records
dead_prot.accession2taxidNCBITaxID mapping for dead protein records
dead_wgs.accession2taxidNCBITaxID mapping for dead WGS or TSA records
+

The biocollections databases contain collections location information. coll_dump.txt is located within the /sw/data/ncbi_taxonomy/latest directory. Those marked biocollections are located within the /sw/data/ncbi_taxonomy/latest/biocollections directory.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameSourceNotes
coll_dump.txtNCBI.
Collection_codes.txtNCBIbiocollections
Institution_codes.txtNCBIbiocollections
Unique_institution_codes.txtNCBIbiocollections
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/other_local/index.html b/databases/other_local/index.html new file mode 100644 index 000000000..2c11033c1 --- /dev/null +++ b/databases/other_local/index.html @@ -0,0 +1,3158 @@ + + + + + + + + + + + + + + + + + + + + + + + Other bioinformatics-oriented local data resources - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Other bioinformatics-oriented local data resources

+

Haplotype Reference Consortium

+

The Haplotype Reference Consortium VCF database is a large reference panel of human haplotypes produced by combining together sequencing data from multiple cohorts. Version r1.1 is installed on all systems as data module HaplotypeReferenceConsortium/r1.1. +GnomAD: Genome Aggregation Database

+

The Genome Aggregation Database (gnomAD) VCF database is downloaded and located in /sw/data/gnomad_data/vcf/{exomes, genomes}. +ExAC: Exome Aggregation Consortium

+

The ExAC Exome Aggregation Consortium database releases 0.1, 0.2, 0.3 and 0.3.1 are downloaded in their entirety and are available at /sw/data/ExAC/release{0.1,0.2,0.3,0.3.1}. +Pfam

+

The Pfam database versions 2011, 28.0, 31.0 and 35.0 are downloaded in their entirety and available via the data modules Pfam/{2011,28.0,31.0,35.0} which each define the environment variable PFAM_ROOT to the location of the Pfam downloads. See the appropriate module help for further information. In particular, the family-specific trees are available in $PFAM_ROOT/trees. The given directory can be used for the -dir argument to the pfam_scan.pl script provided by the pfam_scan modules, which each load the appropriate Pfam data module. Module version pfam_scan/1.5 is for Pfam/28.0, and module version pfam_scan/1.6 is for Pfam/31.0. This latter module might also work with Pfam/35.0

+
pfam_scan.pl -dir $PFAM_ROOT ...
+
+

The pfam_scan.pl script is designed to work with the Pfam database. +dbCAN

+

The dbCAN 4.0 database for automated carbohydrate-active enzyme annotation is now available in directory /sw/data/dbCAN/4.0 on Uppmax servers. The database is formatted for use with the hmmer/3.1b1-{gcc,intel} modules. For more information see /sw/data/dbCAN/4.0/readme.txt or the remote version.

+

The local path to the script for post-processing hmmscan --domtblout output is /sw/data/dbCAN/4.0/hmmscan-parser.sh. The CAZyDB trees have also been unpacked and are available in /sw/data/dbCAN/4.0/CAZyDB-phylogeny. +Variant Effect Predictor cache files

+

A local cache for all database files available for Ensembl Variant Effect Predictor 87, 89 and 91 are available in directories /sw/data/vep/{87,89,91}. When module version vep/89 or vep/91 is loaded, the environment variable VEP_CACHE is set to the directory for the appropriate version. Local caches for versions 82, 84 and 86 exist only for homo_sapiens. To use the cached databases, run the script using the --cache option to indicate the use of a locally-cached database, and the --dir option to specify where this is:

+
vep --cache --dir $VEP_CACHE  ...
+
+

If you are using vep/89, use:

+
variant_effect_predictor.pl --cache --dir $VEP_CACHE  ...
+
+

All plugins are also available. For more script options, see its online help page. +CDD - Position-Specific Scoring Matrices for CD-Search

+

The CDD database versions 3.14 and 3.16 are downloaded in their entirety and are available at /sw/data/cdd/{3.14,3.16}. These directories contains collections of position-specific scoring matrices (PSSMs) that have been created for the CD-Search service.

+

The PSSMs are meant to be used for compiling RPS-BLAST search databases, +which can be used with the standalone RPS-BLAST programs (rpsblast and rpsblastn). +These programs, as well as the makeprofiledb application +needed to convert files in this directory, +are part of the BLAST+ executables (available on Uppmax as part of bioinfo-tools, +e.g., module blast/2.2.31+). +The makeprofiledb application is described at http://www.ncbi.nlm.nih.gov/books/NBK1763.

+

More information is available in the CDD README either via FTP or its local copy /sw/data/cdd/README. +iGenomes - Collection of reference sequences and annotation files

+

A local copy of illumina's iGenomes collection of commonly analyzed organisms is available at /sw/data/igenomes. In addition to the annotations provided by the collection, Bismark and STAR indexes have been added. +UK Biobank institutional data set (GENETICS)

+

The UKBB data set is available for eligible projects in the system for sensitive research SNIC-SENS Bianca. If you believe you are eligible, contact Professor Tove Fall to gain access.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/overview/index.html b/databases/overview/index.html new file mode 100644 index 000000000..1f665c176 --- /dev/null +++ b/databases/overview/index.html @@ -0,0 +1,3139 @@ + + + + + + + + + + + + + + + + + + + + + + + Overview - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Overview of databases

+

Many commonly used data sources are stored locally at UPPMAX. This page provides an index to pages where they are described in more detail. +Available databases:

+ +

In order for you to access Swegen or 1000-genomes +you must first send an email to datacentre@scilifelab.se and ask for access. +When they approve you, they will contact UPPMAX and we will grant access to Swegen.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/reference_genomes/index.html b/databases/reference_genomes/index.html new file mode 100644 index 000000000..6b89cd48a --- /dev/null +++ b/databases/reference_genomes/index.html @@ -0,0 +1,3196 @@ + + + + + + + + + + + + + + + + + + + + + + + Reference genomes - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Reference genomes

+

NOTE: The Illumina igenomes are also available at UPPMAX, with additional indices built for Bismarck and STAR. The scripts used to build the additional indices are available at the UPPMAX/bio-data github repository.

+

Many next-generation sequencing applications involves alignment of the sequence reads to a reference genome. We store reference sequences in a directory that is accessible for all users in the system. The table below shows all currently available genomes.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Reference genomeAssembly version
Homo sapiensFeb. 2009 (GRCh37/hg19)
Pan troglodytesMar. 2006 (CGSC2.1/PanTro2)
Macaca mulattaJan. 2006 (RheMac2)
Sus scrofaApr. 2009 (Sscrofa9)
Canis familiarisSep. 2011 (CanFam3)
Mus musculusJuly 2007 (NCBIM37/mm9), Jan. 2012 (GRCm38)
Gallus gallusMay 2006 (WASHUC2/galGal3)
Taeniopygia guttataMar. 2010 (TaeGut3.2.4)
Saccharomyces cerevisiaeMar 2010 (ScereEF2)
Equus caballusSep. 2007 (EquCab2)
Pichia stipitisPicst3
Rattus norvegicusNov. 2004 (RGSC3.4.61)
Schizosaccharomyces pombe20090701
+

Directory structure

+

The data files are located at /sw/data/reference and the directory structure is e.g.: Homo_sapiens/GRCh37.

+

Each directory contains several subdirectories, explained below:

+

dna_ftp.ensembl.org_ contains the original data files from the ENSEMBL ftp server, and should not be modified.

+

chromosomes contains fasta files for individual chromosomes.

+

chromosomes_rm contains the same files, masked with RepeatMasker.

+

concat contains most of the fasta files in "chromosome" concatenated into a single fasta file. The exceptions are alternate contig files and DNA not mapped to any chromosome.

+

concat_rm contains most of the fasta files in "chromosome_rm" concatenated into a single fasta file. The exceptions are alternate contig files and DNA not mapped to any chromosome.

+

program_files contains index files and metadata for software packages used to work with reference genomes, e.g. SAMtools and aligners such as Bowtie, BWA.

+

Requests for additional reference genomes or software data/index files should be directed to UPPMAX support.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/simons_genome/index.html b/databases/simons_genome/index.html new file mode 100644 index 000000000..3b1b38c1b --- /dev/null +++ b/databases/simons_genome/index.html @@ -0,0 +1,3146 @@ + + + + + + + + + + + + + + + + + + + + + + + Simons Genome Diversity Project datasets - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Simons Genome Diversity Project datasets

+

The Simons Foundation's Genome Diversity Project datasets are now available on UPPMAX. These represent deep human genome sequence data sampled to represent as much diversity as possible:

+

sgdp geographical distribution

+

There are currently approximately 14 TB of data, in the form of CRAM files with associated indices and summaries of the BAM files from which the CRAM files werre derived.

+

Our current SGDP data are those aligned to human reference genome GRCh38DH +found at ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/simons_diversity_data/. +The local UPPMAX directory for these data is /sw/data/SGDP/. +The command used to collect the data was

+
echo "mirror data" | lftp ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/simons_diversity_data
+
+

As a result, the local UPPMAX archive is found at /sw/data/SGDP/data/. +Within this directory are subdirectories for each of the populations included in the full dataset, with individual samples found within each population directory. For example,

+
rackham1: /sw/data/SGDP $ ls -l data/Greek
+total 8
+drwxr-s--- 3 douglas kgp 4096 Apr 29 14:03 SAMEA3302732
+drwxr-s--- 3 douglas kgp 4096 Apr 29 14:03 SAMEA3302763
+
+

and one of these sample directories contains

+
rackham1: /sw/data/SGDP $ ls -l data/Greek/SAMEA3302732/alignment/
+total 34529204
+-rw-r----- 1 douglas kgp         635 Nov 30  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.bam.bas
+-rw-r----- 1 douglas kgp 35355769475 Nov 30  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.cram
+-rw-r----- 1 douglas kgp     2079029 Dec  1  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.cram.crai
+
+

To access this data, please request membership in the kgp group by emailing support@uppmax.uu.se. As for the 1000 Genomes Project, this is not to restrict access in any way, but rather to make it easier to inform UPPMAX users using the datasets of any relevant changes. Because the local copies of these datasets are hosted on UPPMAX systems, access is restricted to UPPMAX users; non-UPPMAX users will need to follow the procedures described on the SGDP website to download their own copies of the datasets.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/databases/swegen/index.html b/databases/swegen/index.html new file mode 100644 index 000000000..d6bbcbdee --- /dev/null +++ b/databases/swegen/index.html @@ -0,0 +1,3104 @@ + + + + + + + + + + + + + + + + + + + Access to Swegen - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Access to Swegen

+

In order for you to access Swegen (or 1000 genomes) +you must first send an email to datacentre@scilifelab.se and ask for access.

+

When they approve you, they will contact UPPMAX and we will grant access to Swegen.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/files/c_557912-l_1-k_whitepaper_datacompression.pdf b/files/c_557912-l_1-k_whitepaper_datacompression.pdf new file mode 100644 index 000000000..631fa3d83 Binary files /dev/null and b/files/c_557912-l_1-k_whitepaper_datacompression.pdf differ diff --git a/files/uppsala.Desktop.zip b/files/uppsala.Desktop.zip new file mode 100644 index 000000000..23587a796 Binary files /dev/null and b/files/uppsala.Desktop.zip differ diff --git a/getting_started/bianca_usage_prerequisites/index.html b/getting_started/bianca_usage_prerequisites/index.html new file mode 100644 index 000000000..dfe5c28d7 --- /dev/null +++ b/getting_started/bianca_usage_prerequisites/index.html @@ -0,0 +1,3213 @@ + + + + + + + + + + + + + + + + + + + Prerequisites for using Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Prerequisites for using Bianca

+

To be allowed to log in to Bianca, +one needs all of these:

+ +

These prerequisites are discussed in detail below.

+

An active research project

+

One prerequisite for using Bianca +is that you need to be a member of an active SNIC SENS +or SIMPLER research project (these are called sens[number] or simp[number], +where [number] represent a number, for example sens123456 or simp123456).

+
+Forgot your Bianca projects? +

One easy way to see your Bianca projects is to use the +Bianca remote desktop login screen at https://bianca.uppmax.uu.se/.

+

The Bianca remote desktop login screen

+
+

SUPR (the 'Swedish User and Project Repository') +is the website that allows one to request access to Bianca +and to get an overview of the requested resources.

+
+How does the SUPR website look like? +

First SUPR page

+
+

First SUPR page

+
+

SUPR 2FA login

+
+

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

+
+
+

After logging in, the SUPR +website will show all projects you are a member of, +under the 'Projects' tab.

+
+How does the 'Projects' tab of the SUPR website look like? +

Example overview of SUPR projects

+
+

Example overview of SUPR projects

+
+
+

To see if a project has access to Bianca, click on the +project and scroll to the 'Resources' section. In the 'Compute' subsection, +there is a table. Under 'Resource' it should state 'Bianca @ UPPMAX'.

+
+How does the 'Resources' page of an example project look like? +

The 'Resources' page of an example project.

+
+

The 'Resources' page of an example project.

+
+
+

Note that the 'Accounts' tab can be useful to verify your username.

+
+How does the 'Accounts' tab help me find my username? +

An example of a SUPR 'Accounts' tab

+
+

An example of a SUPR 'Accounts' tab. +The example user has username sven-sens2023598, +which means his/her UPPMAX username is sven

+
+
+

You can become a member of an active SNIC SENS by:

+
    +
  • request membership to an existing project in SUPR
  • +
  • create a project. See the UPPMAX page on + how to submit a project application here
  • +
+

An UPPMAX user account

+

Another prerequisite for using Bianca +is that you must have a personal UPPMAX user account.

+

An UPPMAX password

+

Another prerequisite for using Bianca +is that you need to know your UPPMAX password. +If you change it, it may take up to an hour before changes are reflected in Bianca.

+

For advice on handling sensitive personal data correctly on Bianca, see our FAQ page.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/change_uppmax_password/index.html b/getting_started/change_uppmax_password/index.html new file mode 100644 index 000000000..8ee02f962 --- /dev/null +++ b/getting_started/change_uppmax_password/index.html @@ -0,0 +1,3144 @@ + + + + + + + + + + + + + + + + + + + Change your UPPMAX password - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Change your UPPMAX password

+
+Prefer a video? +

See the YouTube video 'How to reset your UPPMAX password' +at 1:53

+
+

If you know your UPPMAX password, +here is how to change it.

+
+Forgot your UPPMAX password? +

Go to How to reset your UPPMAX password.

+
+

Procedure

+

1. Log in to your favorite UPPMAX cluster

+

See How to login to an UPPMAX cluster.

+

2. Open a terminal

+

When logged in to an UPPMAX cluster, open a terminal. +If you've logged in via SSH, you are already in a terminal :-)

+

3. Set your own password

+

In that terminal, type:

+
passwd
+
+

Now you will be asked to repeat the old password and set a new one!

+

Your new password will work immediately!

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/first_job/index.html b/getting_started/first_job/index.html new file mode 100644 index 000000000..f3c0ebdf5 --- /dev/null +++ b/getting_started/first_job/index.html @@ -0,0 +1,3199 @@ + + + + + + + + + + + + + + + + + + + Run your first job - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Run your first job

+
+

This page guides you through a possible workflow

+
    +
  • This is an example and gives you a quick start of the steps that may be required for you to do your work.
  • +
  • There are links to topics on the way, but you should be able to follow the steps anyway.
  • +
+
+
    +
  • managing directory
  • +
  • transferring
  • +
  • loading modules
  • +
  • writing batch script
  • +
  • view your CPU hours and disk usage
  • +
+

Transferring some files

+

Graphical file manager

+
+Want more detailed information of file transfer to/from Rackham using a graphical tool? +

More detailed information of file transfer to/from Rackham using a graphical tool +can be found here

+
+
    +
  • This is good if you want to move many files between host and local and cannot use wildcards.
  • +
+
+
+
+

More detailed information of file transfer to/from Rackham using a graphical tool +can be found here

+

One such graphical tool is FileZilla:

+

Filezilla

+
+
+ +
+
+ +
+
+
+
+

Type-along

+

TODO

+
+

Using the compute nodes

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/get_inside_sunet/index.html b/getting_started/get_inside_sunet/index.html new file mode 100644 index 000000000..57442ef8a --- /dev/null +++ b/getting_started/get_inside_sunet/index.html @@ -0,0 +1,3306 @@ + + + + + + + + + + + + + + + + + + + + + + + Get inside the university networks - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Get inside the university networks

+

One cannot connect to all UPPMAX clusters everywhere around the world. +Instead, one needs to get inside the university networks first. +This page described how to get inside the university networks, +or, to use more precise language, to obtain a SUNET Internet Protocol ('IP') address.

+
+How do I know if I am inside the university networks? +

Go to https://bianca.uppmax.uu.se/.

+
    +
  • If nothing happens, you are outside of the university networks
  • +
+

A user that is outside of the university network sees nothing

+
+

A user that is outside of the university network sees nothing.

+
+
    +
  • If you so a login screen, you are inside of the university networks
  • +
+

A user that is outside of the university network sees a login screen

+
+

A user that is outside of the university network sees a login screen.

+
+
+

There are these ways to do this:

+
    +
  • Physically move inside SUNET
  • +
  • Use a VPN (a 'virtual private network')
  • +
  • Use an HPC cluster within SUNET
  • +
+

Each of these three ways are described below.

+
flowchart TD
+
+    subgraph sub_outside[IP outside SUNET]
+      outside(Physically outside SUNET)
+    end
+
+    subgraph sub_inside[IP inside SUNET]
+      physically_inside(Physically inside SUNET)
+      inside_using_vpn(Inside SUNET using VPN)
+      inside_using_rackham(Inside SUNET using Rackham)
+    end
+
+    %% Outside SUNET
+    outside-->|Move physically|physically_inside
+    outside-->|Use a VPN|inside_using_vpn
+    outside-->|Login to Rackham|inside_using_rackham
+
+    %% Inside SUNET
+    physically_inside-.->inside_using_rackham
+    physically_inside-.->inside_using_vpn
+

Physically move inside SUNET

+

To connect to all UPPMAX clusters, one must be inside SUNET.

+

All Swedish university buildings are within SUNET. +Hence, working from a University building +is a non-technical solution to get direct access to Bianca.

+

Use a virtual private network

+
+Want a video to see how to install the UU VPN? + +
+

To connect to all UPPMAX clusters, one must be inside SUNET.

+

A virtual private network (VPN) allows one to access all UPPMAX clusters indirectly: +your computer connects to the VPN within SUNET, where that VPN accesses +your favorite UPPMAX cluster.

+

To setup a VPN, see the UPPMAX documentation on how to setup a VPN.

+
+Want a video to see how the UU VPN is used? + +
+

Use an HPC cluster within SUNET

+

To connect to all UPPMAX clusters, one must be inside SUNET.

+

An HPC cluster within SUNET (for example, Rackham) +allows one to access all other clusters: +your computer connects to the HPC cluster within SUNET, +after which one accesses all other clusters.

+

However, when using this method, one can only use the console +environments (i.e. no remote desktop environment).

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/get_started/index.html b/getting_started/get_started/index.html new file mode 100644 index 000000000..8c86c8b79 --- /dev/null +++ b/getting_started/get_started/index.html @@ -0,0 +1,3390 @@ + + + + + + + + + + + + + + + + + + + + + + + Get started - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Get started here

+

In order to use UPPMAX resources, you need to be a member of a project and a user account.

+

PIs

+

Do you or members in your research group need compute and storage resources +on a HPC cluster or Infrastructure-as-a-Service cloud? +Learn how to apply for a project by following the link below:

+ +

Are you interested in other services, e.g. large volume data storage? +Let us know by contacting UPPMAX Support!

+

Users

+

Once you or someone in your group or collaboration has a project, you must apply for a user account by following the link below.

+ +

Have an account already? Then check out these basic user guides:

+ +

Students

+

Are you taking a university course that uses UPPMAX and need help? Ask your instructor! If they can't help, contact us through IT Support.

+

Getting started: First login to UPPMAX

+

See Log in to an UPPMAX cluster.

+

Changing your password

+

See How to change your UPPMAX password

+

Copying files from/to your UPPMAX account

+

See How to transfer files from/to your UPPMAX account

+

Where are my files? Or, what are the different file systems?

+

See files on UPPMAX

+

Modules

+

In order to run installed programs, +one uses the module +system.

+

How to run jobs

+

All jobs should be run using the job scheduler.

+

UPPMAX homepage

+

Please check our homepage regularly for information, news and announcements. +We will announce maintenance stops and down time there.

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/get_uppmax_2fa/index.html b/getting_started/get_uppmax_2fa/index.html new file mode 100644 index 000000000..7fe8233f0 --- /dev/null +++ b/getting_started/get_uppmax_2fa/index.html @@ -0,0 +1,3241 @@ + + + + + + + + + + + + + + + + + + + Setting up two factor authentication for UPPMAX - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Setting up two factor authentication for UPPMAX

+

Two factor authentication (abbreviated to '2FA') +increases the security of your UPPMAX account +and is mandatory is multiple contexts.

+
+Why is this important? +

See Why is 2FA important?

+
+

This page describes how to set this up.

+

Procedure

+
+Prefer a video? +

Watch the YouTube video 'Get your UPPMAX 2FA'

+
+

1. Install a 2FA app

+

Install an app to use for 2FA.

+
+Which app do you recommend? +

Any app that works for you.

+

Search for '2FA' or 'OTP' (short for +'one time password')..

+

Wikipedia maintains a list of 2FA apps +here.

+
+

2. Go to https://suprintegration.uppmax.uu.se/bootstrapotp/

+

In your web browser, go to https://suprintegration.uppmax.uu.se/bootstrapotp/.

+
+How does this look like? +

Here is how https://suprintegration.uppmax.uu.se/bootstrapotp/ +looks like:

+

https://suprintegration.uppmax.uu.se/bootstrapotp

+
+

This will take you to the UU page to request a second factor for your UPPMAX account.

+

3. Click on 'Continue'

+

At this page, click on 'Continue' to be sent to a 'Login to SUPR' page.

+

Click on 'Continue' to be sent to a 'Login to SUPR' page.

+

4. Log in to SUPR

+

At the 'Login to SUPR' page, log in, in any way that works for you.

+
+How does this look like? +

Login to SUPR page when requesting a 2FA

+
+

In case you are not logged in already, log in to SUPR.

+

5. Press the button 'Prove My Identity to UPPMAX'

+

Acknowledge to SUPR that they may tell UPPMAX who you are, +by pressing the button "Prove My Identity to UPPMAX" on the page.

+

6. Scan the QR-code with your 2FA app

+

Scan the QR-code with your 2FA app.

+
+How does that look like? +

Getting an UPPMAX 2FA QR code

+
+

More details here.

+

7. Enter the code on the webpage

+

Your application will show you a code, enter this code on the same webpage.

+

More details here.

+

8. See acknowledgement that the new two factor has been registered

+

You should see an acknowledgement that the new two factor has been registered.

+
+How does that look like? +

The final page

+
+

9. Wait for a confirmation email

+

After this procedure, it takes around 15 minutes before you can use +the 2FA to log in.

+

FAQ

+
+How does the use of a 2FA app looks like? +

UPPMAX 2FA set up for a fictional UPPMAX user called sven

+
+

UPPMAX 2FA set up for a fictional UPPMAX user called sven

+
+
+
+How do I know I used my new 2FA too early? +

Simple answer: when you've used your new 2FA before having +received an email.

+

Another way to find out: go to +and try to use your new 2FA. You will get a 'Authentication failed' +error when your new 2FA is not active yet.

+

Authentication failed

+
+
+How long does it take before my 2FA is active? +

This is a matter of minutes.

+

It takes a little while before your newly registered factor is usable, +but this should be a matter of minutes, not days.

+
+
+Will I get an email when my 2FA is active? +

No.

+

There is no extra mail sent to let you know that the newly registered +factor is usable, just the confirmation mail that mentions +that it will be activated soon.

+
+

Troubleshooting

+

Some of the common problems we've seen include

+
    +
  • Not having an account at UPPMAX. This is required to get the second factor for your account.
  • +
  • Using a device having it's time set differently from our systems. There are services on the internet (e.g. https://time.is/) you can visit from the device you try to manage the code on that will show you if your device settings are problematic.
  • +
  • Noting the code given at first and trying to use it every time when asked for a code. The code to give will change every thirty second and you should give whatever code is shown at the time.
  • +
  • Expecting something else to be sent to you. You register the new second factor as part of the process. A confirmation mail is sent as well, but this is mostly to let you know in case your account details in SUPR have gone astray and someone else has registered a second factor for your account.
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/get_uppmax_2fa_qr/index.html b/getting_started/get_uppmax_2fa_qr/index.html new file mode 100644 index 000000000..6e7e94672 --- /dev/null +++ b/getting_started/get_uppmax_2fa_qr/index.html @@ -0,0 +1,3123 @@ + + + + + + + + + + + + + + + + + + + Setting up the QR code for two factor authentication for UPPMAX - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Setting up the QR code for two factor authentication for UPPMAX

+

Part of setting up two factor authentication for UPPMAX +is to get a QR code.

+

Getting an UPPMAX 2FA QR code

+

You need to scan this QR code to add your account to your software. +Most softwares call this "Add account" or similar +and will offer an option to scan a QR code using the smartphone camera +or select an area of the screen where the code is.

+

Note that this must often be done from +within the app for two factor authentication.

+

If you see a string similar to

+
otpauth://totp/username@UPPMAX?secret=SOMETEXT&issuer=UPPMAX
+
+

it didn't work and you probably need to do something different +(such as starting the app and select scan from within).

+

Once you've scanned the code, you are often allowed to change the name the +software will use for the account before you add it. +You can change the name if you want - changing the name does not affect the +codes generated. +Finish adding the account to the software.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/get_uppmax_2fa_qr_code/index.html b/getting_started/get_uppmax_2fa_qr_code/index.html new file mode 100644 index 000000000..080eb4342 --- /dev/null +++ b/getting_started/get_uppmax_2fa_qr_code/index.html @@ -0,0 +1,3115 @@ + + + + + + + + + + + + + + + + + + + Setting up the QR code for two factor authentication for UPPMAX - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Setting up the QR code for two factor authentication for UPPMAX

+

Once you have the new account, you should get one time codes for it when you +have it selected. +To finish the registration at UPPMAX, you need to enter the code +that is displayed in the field where it says "Code:" and submit. +The codes will change over time, don't worry about this, +you just need to use whatever code is current.

+

Once you have entered and submitted the current code, +you should see a final page:

+

The final page

+

When you see that page, it will take a little while and the token will +be activated (you should also receive an e-mail about the new token).

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/img/2fa_uppmax_and_supr.png b/getting_started/img/2fa_uppmax_and_supr.png new file mode 100644 index 000000000..bd7e6c008 Binary files /dev/null and b/getting_started/img/2fa_uppmax_and_supr.png differ diff --git a/getting_started/img/Basic_linux.odp b/getting_started/img/Basic_linux.odp new file mode 100644 index 000000000..8cb5c8218 Binary files /dev/null and b/getting_started/img/Basic_linux.odp differ diff --git a/getting_started/img/Basic_linux.pdf b/getting_started/img/Basic_linux.pdf new file mode 100644 index 000000000..d4b4d7696 Binary files /dev/null and b/getting_started/img/Basic_linux.pdf differ diff --git a/getting_started/img/Bild1.png b/getting_started/img/Bild1.png new file mode 100644 index 000000000..de5c88614 Binary files /dev/null and b/getting_started/img/Bild1.png differ diff --git a/getting_started/img/IMG_5111.jpeg b/getting_started/img/IMG_5111.jpeg new file mode 100644 index 000000000..cefdc4ec4 Binary files /dev/null and b/getting_started/img/IMG_5111.jpeg differ diff --git a/getting_started/img/Intro.docx b/getting_started/img/Intro.docx new file mode 100644 index 000000000..3853ccdc6 Binary files /dev/null and b/getting_started/img/Intro.docx differ diff --git a/getting_started/img/IntroToLinux.pptx b/getting_started/img/IntroToLinux.pptx new file mode 100644 index 000000000..d18d33bab Binary files /dev/null and b/getting_started/img/IntroToLinux.pptx differ diff --git a/getting_started/img/Intro_slides.odp b/getting_started/img/Intro_slides.odp new file mode 100644 index 000000000..e7879ebab Binary files /dev/null and b/getting_started/img/Intro_slides.odp differ diff --git a/getting_started/img/Intro_slides.pdf b/getting_started/img/Intro_slides.pdf new file mode 100644 index 000000000..cb908a598 Binary files /dev/null and b/getting_started/img/Intro_slides.pdf differ diff --git "a/getting_started/img/Introduction to Linux_Bj\303\266rn.pptx" "b/getting_started/img/Introduction to Linux_Bj\303\266rn.pptx" new file mode 100644 index 000000000..4004937d5 Binary files /dev/null and "b/getting_started/img/Introduction to Linux_Bj\303\266rn.pptx" differ diff --git a/getting_started/img/Linux.odp b/getting_started/img/Linux.odp new file mode 100644 index 000000000..5c61d76a4 Binary files /dev/null and b/getting_started/img/Linux.odp differ diff --git a/getting_started/img/Linux.pdf b/getting_started/img/Linux.pdf new file mode 100644 index 000000000..2c85357ca Binary files /dev/null and b/getting_started/img/Linux.pdf differ diff --git a/getting_started/img/Mac_terminal.png b/getting_started/img/Mac_terminal.png new file mode 100644 index 000000000..11f80b0ae Binary files /dev/null and b/getting_started/img/Mac_terminal.png differ diff --git a/getting_started/img/RP4_cluster.pptx b/getting_started/img/RP4_cluster.pptx new file mode 100644 index 000000000..176a5b6eb Binary files /dev/null and b/getting_started/img/RP4_cluster.pptx differ diff --git a/getting_started/img/Shellshare.jpg b/getting_started/img/Shellshare.jpg new file mode 100644 index 000000000..a347f6012 Binary files /dev/null and b/getting_started/img/Shellshare.jpg differ diff --git a/getting_started/img/UPPMAX.odp b/getting_started/img/UPPMAX.odp new file mode 100644 index 000000000..2ee03c413 Binary files /dev/null and b/getting_started/img/UPPMAX.odp differ diff --git a/getting_started/img/UPPMAX.pdf b/getting_started/img/UPPMAX.pdf new file mode 100644 index 000000000..97d33cc13 Binary files /dev/null and b/getting_started/img/UPPMAX.pdf differ diff --git a/getting_started/img/accounts.jpg b/getting_started/img/accounts.jpg new file mode 100644 index 000000000..bfb8e65ff Binary files /dev/null and b/getting_started/img/accounts.jpg differ diff --git a/getting_started/img/bianca_architecture.png b/getting_started/img/bianca_architecture.png new file mode 100644 index 000000000..5309a2c95 Binary files /dev/null and b/getting_started/img/bianca_architecture.png differ diff --git a/getting_started/img/bianca_environments.png b/getting_started/img/bianca_environments.png new file mode 100644 index 000000000..01633d6cf Binary files /dev/null and b/getting_started/img/bianca_environments.png differ diff --git a/getting_started/img/bianca_environments_926_x_261.png b/getting_started/img/bianca_environments_926_x_261.png new file mode 100644 index 000000000..0efd3faf6 Binary files /dev/null and b/getting_started/img/bianca_environments_926_x_261.png differ diff --git a/getting_started/img/bianca_gui_login_1st.png b/getting_started/img/bianca_gui_login_1st.png new file mode 100644 index 000000000..c5018ca4f Binary files /dev/null and b/getting_started/img/bianca_gui_login_1st.png differ diff --git a/getting_started/img/bianca_gui_login_2nd.png b/getting_started/img/bianca_gui_login_2nd.png new file mode 100644 index 000000000..cef3f4679 Binary files /dev/null and b/getting_started/img/bianca_gui_login_2nd.png differ diff --git a/getting_started/img/bianca_remote_desktop.png b/getting_started/img/bianca_remote_desktop.png new file mode 100644 index 000000000..f558f09fd Binary files /dev/null and b/getting_started/img/bianca_remote_desktop.png differ diff --git a/getting_started/img/bianca_remote_desktop_463_x_262.png b/getting_started/img/bianca_remote_desktop_463_x_262.png new file mode 100644 index 000000000..a0d70976f Binary files /dev/null and b/getting_started/img/bianca_remote_desktop_463_x_262.png differ diff --git a/getting_started/img/bianca_remote_desktop_login_shows_sens_projects.png b/getting_started/img/bianca_remote_desktop_login_shows_sens_projects.png new file mode 100644 index 000000000..d5cf1a721 Binary files /dev/null and b/getting_started/img/bianca_remote_desktop_login_shows_sens_projects.png differ diff --git a/getting_started/img/biancaorganisation-01.png b/getting_started/img/biancaorganisation-01.png new file mode 100644 index 000000000..5309a2c95 Binary files /dev/null and b/getting_started/img/biancaorganisation-01.png differ diff --git a/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts.pptx.pdf b/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts.pptx.pdf new file mode 100644 index 000000000..db5d67c63 Binary files /dev/null and b/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts.pptx.pdf differ diff --git a/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts2022.pptx b/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts2022.pptx new file mode 100644 index 000000000..db5d67c63 Binary files /dev/null and b/getting_started/img/c_560271-l_1-k_uppmax-bash-scripts2022.pptx differ diff --git a/getting_started/img/cat.png b/getting_started/img/cat.png new file mode 100644 index 000000000..49f97b3f8 Binary files /dev/null and b/getting_started/img/cat.png differ diff --git a/getting_started/img/caution.png b/getting_started/img/caution.png new file mode 100644 index 000000000..e872b277d Binary files /dev/null and b/getting_started/img/caution.png differ diff --git a/getting_started/img/cross.png b/getting_started/img/cross.png new file mode 100644 index 000000000..5e95f762c Binary files /dev/null and b/getting_started/img/cross.png differ diff --git a/getting_started/img/edit.png b/getting_started/img/edit.png new file mode 100644 index 000000000..73b7509b3 Binary files /dev/null and b/getting_started/img/edit.png differ diff --git a/getting_started/img/empty b/getting_started/img/empty new file mode 100644 index 000000000..89e5656e1 --- /dev/null +++ b/getting_started/img/empty @@ -0,0 +1 @@ +emptyfile diff --git a/getting_started/img/find_inst.PNG b/getting_started/img/find_inst.PNG new file mode 100644 index 000000000..ab9d5b5a0 Binary files /dev/null and b/getting_started/img/find_inst.PNG differ diff --git a/getting_started/img/finished_jobs.jpg b/getting_started/img/finished_jobs.jpg new file mode 100644 index 000000000..ce28059df Binary files /dev/null and b/getting_started/img/finished_jobs.jpg differ diff --git a/getting_started/img/flavours.png b/getting_started/img/flavours.png new file mode 100644 index 000000000..43689a705 Binary files /dev/null and b/getting_started/img/flavours.png differ diff --git a/getting_started/img/folders.png b/getting_started/img/folders.png new file mode 100644 index 000000000..2dae62572 Binary files /dev/null and b/getting_started/img/folders.png differ diff --git a/getting_started/img/fz3_osx_main.png b/getting_started/img/fz3_osx_main.png new file mode 100644 index 000000000..bf32dfbf4 Binary files /dev/null and b/getting_started/img/fz3_osx_main.png differ diff --git a/getting_started/img/get_uppmax_2fa_authentication_failed_directly_after_new_2fa.png b/getting_started/img/get_uppmax_2fa_authentication_failed_directly_after_new_2fa.png new file mode 100644 index 000000000..42e3b842a Binary files /dev/null and b/getting_started/img/get_uppmax_2fa_authentication_failed_directly_after_new_2fa.png differ diff --git a/getting_started/img/get_uppmax_2fa_qr.png b/getting_started/img/get_uppmax_2fa_qr.png new file mode 100644 index 000000000..8b880b3b2 Binary files /dev/null and b/getting_started/img/get_uppmax_2fa_qr.png differ diff --git a/getting_started/img/get_uppmax_2fa_qr_code.png b/getting_started/img/get_uppmax_2fa_qr_code.png new file mode 100644 index 000000000..7e6356608 Binary files /dev/null and b/getting_started/img/get_uppmax_2fa_qr_code.png differ diff --git a/getting_started/img/gnu.png b/getting_started/img/gnu.png new file mode 100644 index 000000000..449d85820 Binary files /dev/null and b/getting_started/img/gnu.png differ diff --git a/getting_started/img/hardware.png b/getting_started/img/hardware.png new file mode 100644 index 000000000..c4c9777ee Binary files /dev/null and b/getting_started/img/hardware.png differ diff --git a/getting_started/img/head.png b/getting_started/img/head.png new file mode 100644 index 000000000..54651d4e7 Binary files /dev/null and b/getting_started/img/head.png differ diff --git a/getting_started/img/images.jfif b/getting_started/img/images.jfif new file mode 100644 index 000000000..ecba3984c Binary files /dev/null and b/getting_started/img/images.jfif differ diff --git a/getting_started/img/login_bianca_remote_desktop_website_access_denied.png b/getting_started/img/login_bianca_remote_desktop_website_access_denied.png new file mode 100644 index 000000000..458ee5343 Binary files /dev/null and b/getting_started/img/login_bianca_remote_desktop_website_access_denied.png differ diff --git a/getting_started/img/login_bianca_remote_desktop_website_authentication_failed.png b/getting_started/img/login_bianca_remote_desktop_website_authentication_failed.png new file mode 100644 index 000000000..da34f5358 Binary files /dev/null and b/getting_started/img/login_bianca_remote_desktop_website_authentication_failed.png differ diff --git a/getting_started/img/login_bianca_remote_desktop_website_no_sunet.png b/getting_started/img/login_bianca_remote_desktop_website_no_sunet.png new file mode 100644 index 000000000..20b4d1aab Binary files /dev/null and b/getting_started/img/login_bianca_remote_desktop_website_no_sunet.png differ diff --git a/getting_started/img/login_bianca_remote_desktop_website_no_sunet_ubuntu.png b/getting_started/img/login_bianca_remote_desktop_website_no_sunet_ubuntu.png new file mode 100644 index 000000000..d32d162ee Binary files /dev/null and b/getting_started/img/login_bianca_remote_desktop_website_no_sunet_ubuntu.png differ diff --git a/getting_started/img/login_bianca_via_terminal_terminal.png b/getting_started/img/login_bianca_via_terminal_terminal.png new file mode 100644 index 000000000..a4b17989b Binary files /dev/null and b/getting_started/img/login_bianca_via_terminal_terminal.png differ diff --git a/getting_started/img/login_bianca_via_terminal_terminal_462_x_202.png b/getting_started/img/login_bianca_via_terminal_terminal_462_x_202.png new file mode 100644 index 000000000..282eba313 Binary files /dev/null and b/getting_started/img/login_bianca_via_terminal_terminal_462_x_202.png differ diff --git a/getting_started/img/login_rackham_via_terminal_terminal.png b/getting_started/img/login_rackham_via_terminal_terminal.png new file mode 100644 index 000000000..215a9c5fe Binary files /dev/null and b/getting_started/img/login_rackham_via_terminal_terminal.png differ diff --git a/getting_started/img/login_rackham_via_terminal_terminal_409_x_290.png b/getting_started/img/login_rackham_via_terminal_terminal_409_x_290.png new file mode 100644 index 000000000..eec83f03b Binary files /dev/null and b/getting_started/img/login_rackham_via_terminal_terminal_409_x_290.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_create_login_node.png b/getting_started/img/login_uppmax_bianca_website_create_login_node.png new file mode 100644 index 000000000..957e760b0 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_create_login_node.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_create_login_node_ubuntu.png b/getting_started/img/login_uppmax_bianca_website_create_login_node_ubuntu.png new file mode 100644 index 000000000..fe4fc5314 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_create_login_node_ubuntu.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser.png new file mode 100644 index 000000000..d2baaf57d Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser_short.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser_short.png new file mode 100644 index 000000000..126d42ebf Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_browser_short.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu.png new file mode 100644 index 000000000..284880cd3 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser.png new file mode 100644 index 000000000..4e08ec6c8 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser_short.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser_short.png new file mode 100644 index 000000000..7cb8f4df3 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_timeout_ubuntu_browser_short.png differ diff --git a/getting_started/img/login_uppmax_bianca_website_outside_sunet_ubuntu.png b/getting_started/img/login_uppmax_bianca_website_outside_sunet_ubuntu.png new file mode 100644 index 000000000..5f37f6646 Binary files /dev/null and b/getting_started/img/login_uppmax_bianca_website_outside_sunet_ubuntu.png differ diff --git a/getting_started/img/mobax.jpg b/getting_started/img/mobax.jpg new file mode 100644 index 000000000..80101aa4e Binary files /dev/null and b/getting_started/img/mobax.jpg differ diff --git a/getting_started/img/mobax_start.jpg b/getting_started/img/mobax_start.jpg new file mode 100644 index 000000000..a92fbb535 Binary files /dev/null and b/getting_started/img/mobax_start.jpg differ diff --git a/getting_started/img/mobax_start1.jpg b/getting_started/img/mobax_start1.jpg new file mode 100644 index 000000000..7caf635b0 Binary files /dev/null and b/getting_started/img/mobax_start1.jpg differ diff --git a/getting_started/img/mv_inbox.png b/getting_started/img/mv_inbox.png new file mode 100644 index 000000000..3ff6b3d73 Binary files /dev/null and b/getting_started/img/mv_inbox.png differ diff --git a/getting_started/img/node.png b/getting_started/img/node.png new file mode 100644 index 000000000..235b4ed9f Binary files /dev/null and b/getting_started/img/node.png differ diff --git a/getting_started/img/nodes.png b/getting_started/img/nodes.png new file mode 100644 index 000000000..073dd2aec Binary files /dev/null and b/getting_started/img/nodes.png differ diff --git a/getting_started/img/pdc_login_portal_with_username_and_key.png b/getting_started/img/pdc_login_portal_with_username_and_key.png new file mode 100644 index 000000000..ea62e752d Binary files /dev/null and b/getting_started/img/pdc_login_portal_with_username_and_key.png differ diff --git a/getting_started/img/permission.png b/getting_started/img/permission.png new file mode 100644 index 000000000..bae5ee5c8 Binary files /dev/null and b/getting_started/img/permission.png differ diff --git a/getting_started/img/pingvin.png b/getting_started/img/pingvin.png new file mode 100644 index 000000000..b2900c2c2 Binary files /dev/null and b/getting_started/img/pingvin.png differ diff --git a/getting_started/img/program_flags.png b/getting_started/img/program_flags.png new file mode 100644 index 000000000..573d635fa Binary files /dev/null and b/getting_started/img/program_flags.png differ diff --git a/getting_started/img/proj.PNG b/getting_started/img/proj.PNG new file mode 100644 index 000000000..4bed0e4c6 Binary files /dev/null and b/getting_started/img/proj.PNG differ diff --git a/getting_started/img/proj_red.PNG b/getting_started/img/proj_red.PNG new file mode 100644 index 000000000..c800f4110 Binary files /dev/null and b/getting_started/img/proj_red.PNG differ diff --git a/getting_started/img/proj_request.PNG b/getting_started/img/proj_request.PNG new file mode 100644 index 000000000..70f199955 Binary files /dev/null and b/getting_started/img/proj_request.PNG differ diff --git a/getting_started/img/queue1.png b/getting_started/img/queue1.png new file mode 100644 index 000000000..259e4bb03 Binary files /dev/null and b/getting_started/img/queue1.png differ diff --git a/getting_started/img/queue2.png b/getting_started/img/queue2.png new file mode 100644 index 000000000..58372444d Binary files /dev/null and b/getting_started/img/queue2.png differ diff --git a/getting_started/img/queue3.png b/getting_started/img/queue3.png new file mode 100644 index 000000000..055b64c0a Binary files /dev/null and b/getting_started/img/queue3.png differ diff --git a/getting_started/img/rackham_gui_uppmax_uu_se_empty.png b/getting_started/img/rackham_gui_uppmax_uu_se_empty.png new file mode 100644 index 000000000..99b6a45e1 Binary files /dev/null and b/getting_started/img/rackham_gui_uppmax_uu_se_empty.png differ diff --git a/getting_started/img/rackham_gui_uppmax_uu_se_empty_dialog_only.png b/getting_started/img/rackham_gui_uppmax_uu_se_empty_dialog_only.png new file mode 100644 index 000000000..ab927d795 Binary files /dev/null and b/getting_started/img/rackham_gui_uppmax_uu_se_empty_dialog_only.png differ diff --git a/getting_started/img/rackham_gui_uppmax_uu_se_empty_on_ubuntu.png b/getting_started/img/rackham_gui_uppmax_uu_se_empty_on_ubuntu.png new file mode 100644 index 000000000..ddf33198f Binary files /dev/null and b/getting_started/img/rackham_gui_uppmax_uu_se_empty_on_ubuntu.png differ diff --git a/getting_started/img/rackham_remote_desktop_via_website.png b/getting_started/img/rackham_remote_desktop_via_website.png new file mode 100644 index 000000000..9eaba1e61 Binary files /dev/null and b/getting_started/img/rackham_remote_desktop_via_website.png differ diff --git a/getting_started/img/rackham_remote_desktop_via_website_480_x_270.png b/getting_started/img/rackham_remote_desktop_via_website_480_x_270.png new file mode 100644 index 000000000..9d1c0566e Binary files /dev/null and b/getting_started/img/rackham_remote_desktop_via_website_480_x_270.png differ diff --git a/getting_started/img/rackham_via_remote_desktop.jpg b/getting_started/img/rackham_via_remote_desktop.jpg new file mode 100644 index 000000000..71fef5394 Binary files /dev/null and b/getting_started/img/rackham_via_remote_desktop.jpg differ diff --git a/getting_started/img/rackham_via_remote_desktop_50.jpg b/getting_started/img/rackham_via_remote_desktop_50.jpg new file mode 100644 index 000000000..42488f1f6 Binary files /dev/null and b/getting_started/img/rackham_via_remote_desktop_50.jpg differ diff --git a/getting_started/img/reg_form.PNG b/getting_started/img/reg_form.PNG new file mode 100644 index 000000000..6bdaa3c83 Binary files /dev/null and b/getting_started/img/reg_form.PNG differ diff --git a/getting_started/img/reg_new_person.PNG b/getting_started/img/reg_new_person.PNG new file mode 100644 index 000000000..1357a5858 Binary files /dev/null and b/getting_started/img/reg_new_person.PNG differ diff --git a/getting_started/img/remote_desktop_thinlinc_profile_chooser.png b/getting_started/img/remote_desktop_thinlinc_profile_chooser.png new file mode 100644 index 000000000..f8d2fd66d Binary files /dev/null and b/getting_started/img/remote_desktop_thinlinc_profile_chooser.png differ diff --git a/getting_started/img/remote_desktop_thinlinc_profile_chooser_xfce.png b/getting_started/img/remote_desktop_thinlinc_profile_chooser_xfce.png new file mode 100644 index 000000000..b21bcc173 Binary files /dev/null and b/getting_started/img/remote_desktop_thinlinc_profile_chooser_xfce.png differ diff --git a/getting_started/img/request_account.jpg b/getting_started/img/request_account.jpg new file mode 100644 index 000000000..b70bf3b62 Binary files /dev/null and b/getting_started/img/request_account.jpg differ diff --git a/getting_started/img/screen.png b/getting_started/img/screen.png new file mode 100644 index 000000000..1a93074b0 Binary files /dev/null and b/getting_started/img/screen.png differ diff --git a/getting_started/img/setup_vpn_uu_linux_1.png b/getting_started/img/setup_vpn_uu_linux_1.png new file mode 100644 index 000000000..c5ede225b Binary files /dev/null and b/getting_started/img/setup_vpn_uu_linux_1.png differ diff --git a/getting_started/img/setup_vpn_uu_linux_2.png b/getting_started/img/setup_vpn_uu_linux_2.png new file mode 100644 index 000000000..9d4434b67 Binary files /dev/null and b/getting_started/img/setup_vpn_uu_linux_2.png differ diff --git a/getting_started/img/setup_vpn_uu_linux_3.png b/getting_started/img/setup_vpn_uu_linux_3.png new file mode 100644 index 000000000..1af8bc9b1 Binary files /dev/null and b/getting_started/img/setup_vpn_uu_linux_3.png differ diff --git a/getting_started/img/shell.jpg b/getting_started/img/shell.jpg new file mode 100644 index 000000000..bf38e501b Binary files /dev/null and b/getting_started/img/shell.jpg differ diff --git a/getting_started/img/supr_2fa.png b/getting_started/img/supr_2fa.png new file mode 100644 index 000000000..ff1320845 Binary files /dev/null and b/getting_started/img/supr_2fa.png differ diff --git a/getting_started/img/supr_accounts.png b/getting_started/img/supr_accounts.png new file mode 100644 index 000000000..12b70cb60 Binary files /dev/null and b/getting_started/img/supr_accounts.png differ diff --git a/getting_started/img/supr_add_resource_pelle_add.png b/getting_started/img/supr_add_resource_pelle_add.png new file mode 100644 index 000000000..7668503b6 Binary files /dev/null and b/getting_started/img/supr_add_resource_pelle_add.png differ diff --git a/getting_started/img/supr_centre_local_compute_rounds_click_go_to_uppmax_local.png b/getting_started/img/supr_centre_local_compute_rounds_click_go_to_uppmax_local.png new file mode 100644 index 000000000..ab8cc09be Binary files /dev/null and b/getting_started/img/supr_centre_local_compute_rounds_click_go_to_uppmax_local.png differ diff --git a/getting_started/img/supr_compute_rounds_click_go_to_centre_local_compute.png b/getting_started/img/supr_compute_rounds_click_go_to_centre_local_compute.png new file mode 100644 index 000000000..4afe31a65 Binary files /dev/null and b/getting_started/img/supr_compute_rounds_click_go_to_centre_local_compute.png differ diff --git a/getting_started/img/supr_create_new_proposal_for_uppmax_local_click_create_new.png b/getting_started/img/supr_create_new_proposal_for_uppmax_local_click_create_new.png new file mode 100644 index 000000000..c5624f145 Binary files /dev/null and b/getting_started/img/supr_create_new_proposal_for_uppmax_local_click_create_new.png differ diff --git a/getting_started/img/supr_first_page.png b/getting_started/img/supr_first_page.png new file mode 100644 index 000000000..4972f2ae6 Binary files /dev/null and b/getting_started/img/supr_first_page.png differ diff --git a/getting_started/img/supr_login.PNG b/getting_started/img/supr_login.PNG new file mode 100644 index 000000000..d22577734 Binary files /dev/null and b/getting_started/img/supr_login.PNG differ diff --git a/getting_started/img/supr_login_for_2fa.png b/getting_started/img/supr_login_for_2fa.png new file mode 100644 index 000000000..0c99cfb18 Binary files /dev/null and b/getting_started/img/supr_login_for_2fa.png differ diff --git a/getting_started/img/supr_naiss_add_resource_click_cloud.png b/getting_started/img/supr_naiss_add_resource_click_cloud.png new file mode 100644 index 000000000..0ce917ea2 Binary files /dev/null and b/getting_started/img/supr_naiss_add_resource_click_cloud.png differ diff --git a/getting_started/img/supr_naiss_add_resource_cloud.png b/getting_started/img/supr_naiss_add_resource_cloud.png new file mode 100644 index 000000000..4238855e8 Binary files /dev/null and b/getting_started/img/supr_naiss_add_resource_cloud.png differ diff --git a/getting_started/img/supr_naiss_added_resource_cloud.png b/getting_started/img/supr_naiss_added_resource_cloud.png new file mode 100644 index 000000000..490f68746 Binary files /dev/null and b/getting_started/img/supr_naiss_added_resource_cloud.png differ diff --git a/getting_started/img/supr_naiss_click_submit.png b/getting_started/img/supr_naiss_click_submit.png new file mode 100644 index 000000000..a094cde4c Binary files /dev/null and b/getting_started/img/supr_naiss_click_submit.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_click_create.png b/getting_started/img/supr_naiss_create_new_proposal_click_create.png new file mode 100644 index 000000000..a9c001942 Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_click_create.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small.png b/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small.png new file mode 100644 index 000000000..815511121 Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small_resources.png b/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small_resources.png new file mode 100644 index 000000000..5175a643d Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_for_naiss_sens_small_resources.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_for_simpler.png b/getting_started/img/supr_naiss_create_new_proposal_for_simpler.png new file mode 100644 index 000000000..ada12826a Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_for_simpler.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_for_simpler_ubuntu.png b/getting_started/img/supr_naiss_create_new_proposal_for_simpler_ubuntu.png new file mode 100644 index 000000000..195be6a6d Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_for_simpler_ubuntu.png differ diff --git a/getting_started/img/supr_naiss_create_new_proposal_for_simpler_uncropped.png b/getting_started/img/supr_naiss_create_new_proposal_for_simpler_uncropped.png new file mode 100644 index 000000000..c7c483427 Binary files /dev/null and b/getting_started/img/supr_naiss_create_new_proposal_for_simpler_uncropped.png differ diff --git a/getting_started/img/supr_naiss_naiss_sens_rounds_click_go_to_small.png b/getting_started/img/supr_naiss_naiss_sens_rounds_click_go_to_small.png new file mode 100644 index 000000000..bcf4e4e83 Binary files /dev/null and b/getting_started/img/supr_naiss_naiss_sens_rounds_click_go_to_small.png differ diff --git a/getting_started/img/supr_naiss_no_resources.png b/getting_started/img/supr_naiss_no_resources.png new file mode 100644 index 000000000..9985ac118 Binary files /dev/null and b/getting_started/img/supr_naiss_no_resources.png differ diff --git a/getting_started/img/supr_naiss_open_for_proposals_click_create_new_sens.png b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_sens.png new file mode 100644 index 000000000..8d7e8b959 Binary files /dev/null and b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_sens.png differ diff --git a/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler.png b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler.png new file mode 100644 index 000000000..3119e5e0b Binary files /dev/null and b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler.png differ diff --git a/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_ubuntu.png b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_ubuntu.png new file mode 100644 index 000000000..e9abe4aab Binary files /dev/null and b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_ubuntu.png differ diff --git a/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_uncropped.png b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_uncropped.png new file mode 100644 index 000000000..e947fd8f9 Binary files /dev/null and b/getting_started/img/supr_naiss_open_for_proposals_click_create_new_simpler_uncropped.png differ diff --git a/getting_started/img/supr_naiss_projects_active_projects_and_request_1.png b/getting_started/img/supr_naiss_projects_active_projects_and_request_1.png new file mode 100644 index 000000000..cbcb9d49d Binary files /dev/null and b/getting_started/img/supr_naiss_projects_active_projects_and_request_1.png differ diff --git a/getting_started/img/supr_naiss_projects_active_projects_and_request_2.png b/getting_started/img/supr_naiss_projects_active_projects_and_request_2.png new file mode 100644 index 000000000..7ab715325 Binary files /dev/null and b/getting_started/img/supr_naiss_projects_active_projects_and_request_2.png differ diff --git a/getting_started/img/supr_naiss_request_membership_in_project.png b/getting_started/img/supr_naiss_request_membership_in_project.png new file mode 100644 index 000000000..0a13a61e1 Binary files /dev/null and b/getting_started/img/supr_naiss_request_membership_in_project.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_compute_rounds.png b/getting_started/img/supr_naiss_rounds_click_go_to_compute_rounds.png new file mode 100644 index 000000000..83dcca894 Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_compute_rounds.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_naiss_sens.png b/getting_started/img/supr_naiss_rounds_click_go_to_naiss_sens.png new file mode 100644 index 000000000..ce44c15bb Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_naiss_sens.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_scc.png b/getting_started/img/supr_naiss_rounds_click_go_to_scc.png new file mode 100644 index 000000000..76e7ce443 Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_scc.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_simpler.png b/getting_started/img/supr_naiss_rounds_click_go_to_simpler.png new file mode 100644 index 000000000..da9b4df99 Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_simpler.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_simpler_ubuntu.png b/getting_started/img/supr_naiss_rounds_click_go_to_simpler_ubuntu.png new file mode 100644 index 000000000..f303bc48f Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_simpler_ubuntu.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_go_to_simpler_uncropped.png b/getting_started/img/supr_naiss_rounds_click_go_to_simpler_uncropped.png new file mode 100644 index 000000000..32e96f0a6 Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_go_to_simpler_uncropped.png differ diff --git a/getting_started/img/supr_naiss_rounds_click_small_compute.png b/getting_started/img/supr_naiss_rounds_click_small_compute.png new file mode 100644 index 000000000..b5766e8fd Binary files /dev/null and b/getting_started/img/supr_naiss_rounds_click_small_compute.png differ diff --git a/getting_started/img/supr_naiss_start_click_projects.png b/getting_started/img/supr_naiss_start_click_projects.png new file mode 100644 index 000000000..ae7f4e853 Binary files /dev/null and b/getting_started/img/supr_naiss_start_click_projects.png differ diff --git a/getting_started/img/supr_naiss_start_click_rounds.png b/getting_started/img/supr_naiss_start_click_rounds.png new file mode 100644 index 000000000..b8c97ea58 Binary files /dev/null and b/getting_started/img/supr_naiss_start_click_rounds.png differ diff --git a/getting_started/img/supr_project_naiss2024-22-49_resources.png b/getting_started/img/supr_project_naiss2024-22-49_resources.png new file mode 100644 index 000000000..8ca1b5b75 Binary files /dev/null and b/getting_started/img/supr_project_naiss2024-22-49_resources.png differ diff --git a/getting_started/img/supr_project_sens2023598_resources.png b/getting_started/img/supr_project_sens2023598_resources.png new file mode 100644 index 000000000..b5ba76b75 Binary files /dev/null and b/getting_started/img/supr_project_sens2023598_resources.png differ diff --git a/getting_started/img/supr_projects.png b/getting_started/img/supr_projects.png new file mode 100644 index 000000000..a562b9c75 Binary files /dev/null and b/getting_started/img/supr_projects.png differ diff --git a/getting_started/img/supr_request_2fa.png b/getting_started/img/supr_request_2fa.png new file mode 100644 index 000000000..842b11646 Binary files /dev/null and b/getting_started/img/supr_request_2fa.png differ diff --git a/getting_started/img/supr_resources_scc_click_small.png b/getting_started/img/supr_resources_scc_click_small.png new file mode 100644 index 000000000..491be2c59 Binary files /dev/null and b/getting_started/img/supr_resources_scc_click_small.png differ diff --git a/getting_started/img/supr_uppmax_local_click_create_new_proposal.png b/getting_started/img/supr_uppmax_local_click_create_new_proposal.png new file mode 100644 index 000000000..3b4ebc76d Binary files /dev/null and b/getting_started/img/supr_uppmax_local_click_create_new_proposal.png differ diff --git a/getting_started/img/supr_uppmax_local_project_add_pelle.png b/getting_started/img/supr_uppmax_local_project_add_pelle.png new file mode 100644 index 000000000..ab41e2aaf Binary files /dev/null and b/getting_started/img/supr_uppmax_local_project_add_pelle.png differ diff --git a/getting_started/img/supr_uppmax_local_project_add_pelle_add.png b/getting_started/img/supr_uppmax_local_project_add_pelle_add.png new file mode 100644 index 000000000..700c3d4aa Binary files /dev/null and b/getting_started/img/supr_uppmax_local_project_add_pelle_add.png differ diff --git a/getting_started/img/supr_uppmax_local_project_added_pelle.png b/getting_started/img/supr_uppmax_local_project_added_pelle.png new file mode 100644 index 000000000..62c3f098c Binary files /dev/null and b/getting_started/img/supr_uppmax_local_project_added_pelle.png differ diff --git a/getting_started/img/supr_uppmax_local_project_start.png b/getting_started/img/supr_uppmax_local_project_start.png new file mode 100644 index 000000000..02a3ccca6 Binary files /dev/null and b/getting_started/img/supr_uppmax_local_project_start.png differ diff --git a/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp.png b/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp.png new file mode 100644 index 000000000..3c9ed7c9e Binary files /dev/null and b/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp.png differ diff --git a/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp_ubuntu.png b/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp_ubuntu.png new file mode 100644 index 000000000..281307ffa Binary files /dev/null and b/getting_started/img/suprintegration_uppmax_uu_se_bootstrapotp_ubuntu.png differ diff --git a/getting_started/img/tab.png b/getting_started/img/tab.png new file mode 100644 index 000000000..60c59ecff Binary files /dev/null and b/getting_started/img/tab.png differ diff --git a/getting_started/img/tail.png b/getting_started/img/tail.png new file mode 100644 index 000000000..792796d2d Binary files /dev/null and b/getting_started/img/tail.png differ diff --git a/getting_started/img/terminal.png b/getting_started/img/terminal.png new file mode 100644 index 000000000..5306fd823 Binary files /dev/null and b/getting_started/img/terminal.png differ diff --git a/getting_started/img/unix_architecture.jpg b/getting_started/img/unix_architecture.jpg new file mode 100644 index 000000000..6c94d6db8 Binary files /dev/null and b/getting_started/img/unix_architecture.jpg differ diff --git a/getting_started/img/uppmax-light2.jpg b/getting_started/img/uppmax-light2.jpg new file mode 100644 index 000000000..88bfc4788 Binary files /dev/null and b/getting_started/img/uppmax-light2.jpg differ diff --git a/getting_started/img/uppmax_2fa.png b/getting_started/img/uppmax_2fa.png new file mode 100644 index 000000000..32db9307b Binary files /dev/null and b/getting_started/img/uppmax_2fa.png differ diff --git a/getting_started/img/uppmax_dark1.png b/getting_started/img/uppmax_dark1.png new file mode 100644 index 000000000..0e7dc2528 Binary files /dev/null and b/getting_started/img/uppmax_dark1.png differ diff --git a/getting_started/img/uppmax_dark2.jpg b/getting_started/img/uppmax_dark2.jpg new file mode 100644 index 000000000..a10bda861 Binary files /dev/null and b/getting_started/img/uppmax_dark2.jpg differ diff --git a/getting_started/img/uppmax_light.jpg b/getting_started/img/uppmax_light.jpg new file mode 100644 index 000000000..d42297236 Binary files /dev/null and b/getting_started/img/uppmax_light.jpg differ diff --git a/getting_started/img/usr_agree.PNG b/getting_started/img/usr_agree.PNG new file mode 100644 index 000000000..4488865f4 Binary files /dev/null and b/getting_started/img/usr_agree.PNG differ diff --git a/getting_started/img/wildcards_bear.png b/getting_started/img/wildcards_bear.png new file mode 100644 index 000000000..6d0ed1216 Binary files /dev/null and b/getting_started/img/wildcards_bear.png differ diff --git a/getting_started/join_existing_project/index.html b/getting_started/join_existing_project/index.html new file mode 100644 index 000000000..d5143555d --- /dev/null +++ b/getting_started/join_existing_project/index.html @@ -0,0 +1,3159 @@ + + + + + + + + + + + + + + + + + + + Join an existing project - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Join an existing project

+

To use an UPPMAX cluster, one needs to apply to a project. +This page describes how to join an existing project.

+

Procedure

+

1. Go to https://supr.naiss.se/

+

Example SUPR NAISS main page

+
+

Example SUPR NAISS main page

+
+

2. Click on 'Projects'

+

On the main page, click on 'Projects'

+

On the main page, click on 'Projects'

+
+

On the main page, click on 'Projects'

+
+

3. Scroll to 'Request Membership in Project'

+

At the 'Projects' page, scroll down to 'Request Membership in Project'.

+

At the 'Projects' page, scroll down to 'Request Membership in Project'

+
+

At the 'Projects' page, scroll down to 'Request Membership in Project'

+
+

This is the 'Request Membership in Project' section:

+

At the 'Projects' page, here is the 'Request Membership in Project'

+
+

At the 'Projects' page, here is the 'Request Membership in Project'

+
+

4. Search for a project

+

At 'Request Membership in Project' in the 'Projects' page, enter a search term and click 'Search for project'

+

At 'Request Membership in Project' in the 'Projects' page, enter a search term and click 'Search for project'

+
+

At 'Request Membership in Project' in the 'Projects' page, enter a search term and click 'Search for project'. +In this example, the search term is 'DNA'

+
+

5. Search for a project

+

At the 'Request Membership in Project', click on 'Request' for the project you want to request membership of.

+

At the 'Request Membership in Project', click on 'Request' for the project you want to request membership of.

+
+

At the 'Request Membership in Project', click on 'Request' for the project you want to request membership of.

+
+

6. Wait for email

+

After your request, the PI of the project will receive an email +and will accept or reject your proposal.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/linux/index.html b/getting_started/linux/index.html new file mode 100644 index 000000000..aa16a7a51 --- /dev/null +++ b/getting_started/linux/index.html @@ -0,0 +1,3517 @@ + + + + + + + + + + + + + + + + + + + + + + + Working in Linux - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Linux

+
    +
  • The "operating system" of the UPPMAX and most of the other clusters is Linux.
  • +
+
+

Questions

+
    +
  • What is Linux?
  • +
  • How to use the command line?
  • +
+
+
+

Objectives

+
    +
  • We'll briefly get an overview of Linux
  • +
  • How the command line works
  • +
  • Some text editors
  • +
  • Things to be aware of
  • +
+
+
+Want a video? +

Here is video +that gives an introduction to Linux

+
+

What is Linux?

+

Tux

+
    +
  • Daily speaking: The Linux Operating system is a UNIX like and UNIX compatible Operating system.
  • +
  • Linux is a "Kernel" on which many different programs can run.
  • +
  • The shell (bash, sh, ksh, csh, tcsh and many more) is one such program.
  • +
+

Content

+
    +
  • Actually, for it to be an OS, it is supplied with GNU software and other additions giving us the name GNU/Linux. +
  • +
+

Content

+
    +
  • Linux has a multiuser platform at its base which means permissions and security comes easy.
  • +
+

Linux comes in different distributions, dialects or, say, flavours

+
    +
  • UPPMAX runs CentOS and RedHat
  • +
+

Content

+
+

Local Linux environment

+
    +
  • You may sometimes benefit from having a local Linux environment.
  • +
  • Examples:
      +
    • Mimic cluster environment to work with your local files and data as on the Cluster
    • +
    • get used to Linux (!)
    • +
    +
  • +
  • Mac is UNIX and very Linux-like
  • +
  • Windows requires WSL (Windows subsystem for Linux)
  • +
+
+
+For windows users who wants to get started with WSL (not covered here) + +
+

Using the command line

+

Below usage of the command line is discussed in text. +If you prefer video, here +is how to use the command-line on the UPPMAX Bianca cluster.

+

Command line with bash (Bourne Again Shell)

+
    +
  • A Unix shell and command language.
  • +
  • Often default shell
  • +
+

Content

+
    +
  • The command-line interface: the bash prompt $
  • +
  • bash can be seen as a program that finds and runs other programs
  • +
  • bash is scripting language that is referred to as a shell
      +
    • (because it sits around the kernel making it easy to interact with)
    • +
    +
  • +
+

Content

+

The prompt

+
[info]$ word1 word2 word3 [...]
+
+
    +
  • +

    [info] is configurable, and usually tells you who you are, on what system, and where in the file system.

    +
      +
    • Example:
    • +
    +
    [bjornc@rackham3 linux_tutorial]$
    +
    + +
  • +
+

folders

+

Example bash command

+

mv inbox

+

program flags

+
    +
  • Terminal screen shows
  • +
+

Content

+

Tab Completion

+

Content

+
    +
  • +

    Whenever you’re writing a path or filename on the bash prompt, you can strike the ‘tab’ key to +ask Bash to complete what you’re writing.

    +
  • +
  • +

    Get in the habit of this — it will save you many hours!

    +
  • +
+

Editing files

+

Edit

+

To edit files, you will use a text editor. +The UPPMAX HPC clusters have multiple text editors installed, +which are described at the UPPMAX 'Text editors' page here.

+
+

Example

+

Start nano and save a file called first.txt

+
$ nano first.txt
+
+
    +
  • Type test text
  • +
  • End and save with <ctrl>-X followed by Y and <enter>.
  • +
+
+

Typical sources of error

+

Content

+
+

Warning

+
    +
  • Capitalization matters in file names and program names
  • +
  • Spaces matter.
      +
    • Always have a space after the program name.
    • +
    • Don’t add spaces within file names.
    • +
    +
  • +
  • Check that you are in the right place in the file system.
  • +
  • File permissions. Check that the right read, write and execute permission are set. See next session.
  • +
+
+

Caution

+

Content

+
+

Warning

+
    +
  • There is no undo for:
      +
    • copy (cp),
    • +
    • move (mv), and
    • +
    • remove (rm).
    • +
    +
  • +
  • Beware of overwriting files and deleting the wrong ones.
  • +
+
+
+

Tip

+
    +
  • +
      +
    • +

      Within a session: Type in the command prompt

      +

      alias rm='rm -i'

      +
    • +
    +

    Tip: make "rm" ask if you really want to erase:

    +
      +
    • +

      Override asking with

      +

      rm –f <>

      +
    • +
    • +

      Edit file .bashrc in home directory by adding the alias line for this to start everytime.

      +
    • +
    +
  • +
  • +

    This will also work for mv and cp!

    +
  • +
+
+
+

Note

+
    +
  • If you do destroy your data, email UPPMAX support, we may be able to help.
  • +
+
+
+

Keypoints

+
    +
  • Linux Operating system is a UNIX-like and UNIX compatible Operating system.
  • +
  • Typical command: + $ program word1 word2 word3 […]
  • +
  • Use text editors to edit files
  • +
  • Tips
      +
    • use Tab completion
    • +
    • capitalization and spaces matters
    • +
    • no undo:s for copying, moving and removing
        +
      • Solution: alias rm='rm -i'
      • +
      +
    • +
    +
  • +
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/linux_basics/index.html b/getting_started/linux_basics/index.html new file mode 100644 index 000000000..e82416635 --- /dev/null +++ b/getting_started/linux_basics/index.html @@ -0,0 +1,4023 @@ + + + + + + + + + + + + + + + + + + + + + + + Basic Linux commands - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Basic toolkit

+

Caption

+
+

Objectives

+
    +
  • Let's dig into the most important BASH commands
  • +
  • We'll do a type-along session
  • +
+
+
+Like videos? +

Below usage of the command line is discussed in text. +If you prefer video, here +is how to use the command-line on the UPPMAX Bianca cluster.

+
+

We will cover these commands

+ +
    +
  1. pwd   present directory
  2. +
  3. ls  list content
  4. +
  5. cd  change directory
  6. +
  7. mkdir  make directory
  8. +
  9. cp  copy
  10. +
  11. scp  securely remotely copy
  12. +
  13. mv  move
  14. +
  15. rm  remove
  16. +
  17. rmdir  remove empty directory
  18. +
+

Read files and change file properties

+
    +
  1. cat  print content on screen
  2. +
  3. head  print first part
  4. +
  5. tail  print last part
  6. +
  7. less  browse content
  8. +
  9. tar  compress or extract file
  10. +
  11. chmod  change file permissions
  12. +
  13. man  info about a command
  14. +
+

File system Navigation

+

pwd — where are you now? “Print name of current/Working Directory`

+
pwd
+
+pwd -P
+
+
    +
  • -P gives you the physical path,
      +
    • ignores how you got there
    • +
    +
  • +
+

ls — list directory contents

+

Type ls to display the contents of the current directory.

+
ls -a
+
+

-a also shows hidden files and directories.

+
ls -l
+
+

-l gives you listed and detailed information.

+
ls -lt
+
+

-lt sorts things by time modified.

+
ls -lrt
+
+

-r gives reversed order, so in this case newest in last line.

+
man ls
+
+
    +
  • for complete information about a command.
  • +
  • TIP: -$ man <command> works for almost any command!
      +
    • scroll with arrows and quit with q.
    • +
    +
  • +
+

cd — Change the shell working Directory

+
    +
  • To change directory, use cd <target>
  • +
+
+

Warning

+
    +
  • Some of following steps will only be available for the Introduction course members.
  • +
  • These involve the `/proj/introtouppmax`` directory
  • +
+
+
cd /proj/introtouppmax
+
+pwd
+
+ls
+
+cd labs
+
+pwd
+
+
+

Challenge

+
    +
  • Experiment with cd
  • +
  • Try adding <spaces> or extra / in various places
  • +
  • +

    Use tab completion to avoid typos and typing ls a lot

    +
  • +
  • +

    Figure out the use of the following:

    +
  • +
+
$ cd -
+
+$ cd ..
+
+$ cd
+
+$ cd ~
+
+
+Solution +
    +
  • +

    cd - : goes back to your last directory

    +
  • +
  • +

    cd .. : goes a level up in the hierarchy

    +
  • +
  • +

    cd : goes to home directory

    +
  • +
  • +

    cd ~ : also goes to home directory

    +
  • +
+
+
+

Copy, Create, Move

+

mkdir — make directories

+
+

Warning

+
    +
  • Make sure you’re in your home directory by cd ~
  • +
+
+
    +
  • Create a new directory uppmax-intro
  • +
+
cd ~
+mkdir uppmax-intro
+
+
    +
  • Go in there:
  • +
+
cd uppmax-intro/
+
+

cp — copy files and directories

+
    +
  • Copy files with: cp <source> <target>
  • +
  • Set target to . to keep name and to point at present directory.
  • +
+
cp /proj/introtouppmax/labs/linux_tutorial/ .
+
+
    +
  • Well, that didn’t work. What does the error say?
  • +
  • So... try
  • +
+
cp -r /proj/introtouppmax/labs/linux_tutorial/ .
+
+

-r is for recursive, meaning including files and subdirectories!

+
    +
  • Move to your just created linux_tutorial/
  • +
+
cd linux_tutorial
+
+
    +
  • Make a copy of the file newfile in the same directory:
  • +
+
cp newfile copyfile
+
+

scp — secure copy (remote file copy program)

+
    +
  • Linux/MacOS: To copy data to/from Rackham, you can use scp from the terminal on your local machine:
  • +
+

Download from Rackham

+
    +
  • Download
  • +
+
[bob@macbook]$ scp bob@rackham.uppmax.uu.se:~/mydata copyofmydata
+
+[bob@macbook]$ scp bob@rackham.uppmax.uu.se:~/mydata .                      # (keeping file name)
+
+
+

Example

+

Download the file first.txt

+
    +
  • In your local terminal:
  • +
+
[bob@macbook]$ scp <username>@rackham.uppmax.uu.se:~/first.txt .                      # (keeping file name)
+
+
+

Upload to Rackham

+
    +
  • Upload from present directory on local machine to your home directory on cluster.
      +
    • Example:
    • +
    +
  • +
+
[bob@macbook]$ scp myinput bob@rackham.uppmax.uu.se:~/copyofmyinput
+
+[bob@macbook]$ scp myinput bob@rackham.uppmax.uu.se:~/                      # (keeping filename)
+
+
+

Example

+

upload the file first.txt after some modification

+
    +
  1. Open the file you just downloaded in any editor.
  2. +
  3. Add a row, like: A new row
  4. +
  5. Save and quit.
  6. +
  7. Upload your file but save it as second.txt on Rackham. In your local terminal:
  8. +
+
[bob@macbook]$ scp first.txt <username>@rackham.uppmax.uu.se:~/second.txt                     # (new filename)
+
+
+ +

mv — move/rename file

+
    +
  • Moving files works just like copying files:
  • +
  • mv <source> <target>
  • +
  • Move the copy you just made to another place:
  • +
+
mv copyfile ../
+
+
    +
  • Rename it.
  • +
+
mv ../copyfile ../renamedfile
+
+

Archiving

+

tar — archiving and compression

+
    +
  • We’re going to need more files. Let's extract the tar.gz file (tared and gzipped file)
  • +
+
tar -vxzf files.tar.gz
+
+
    +
  • The flags mean: + - v*erbosely + - extract + - gzipped + - f**ilename
  • +
  • Order of flags may matter!
      +
    • f should be in the start or in the end!
    • +
    +
  • +
  • You should see a list of files being extracted
  • +
+
+

Tip

+
    +
  • To compress use the flag -cinstead of -x
  • +
+
$ tar -czfv <tar file> <path/to/directory/file(s)-or-directory>
+
+
+

Deleting

+

rm — delete files or directories

+
+

Note

+
    +
  • Tip: make "rm" ask if you really want to erase:
  • +
  • Within a session: Type in the command prompt
  • +
+
alias rm='rm -i'
+
+
    +
  • Override asking with
  • +
+
rm -f <>
+
+
    +
  • Do you want this to be the case everytime you start a new session?
      +
    • Edit file .bashrc in /home directory by adding the above alias line on any but the first line.
    • +
    +
  • +
  • These steps will also work for mv and cp.
  • +
+
+
    +
  • +

    Deleting files works just like copying or moving them: rm <target>

    +
      +
    • Try it out:
    • +
    +
  • +
+
rm ../renamedfile
+
+rm this_is_empty
+
+
    +
  • hmmmm...
  • +
+

rmdir — delete an empty directory

+
    +
  • We need another command to delete directories
  • +
+
rmdir this_is_empty
+
+rmdir this_has_a_file
+
+
    +
  • +

    Problem again??

    +
  • +
  • +

    Is there a way to use rm to delete directories?

    +
  • +
+
+

Solution

+
    +
  • Recursive commands -r are applied to directories and their contents
  • +
+
$ rm -r this_has_a_file
+
+
+

Help

+

man — manual, look up the right flags

+
    +
  • Nobody can remember whether it’s -R or -r for recursive, or if -f lets you choose a file or forces an action.
  • +
+
man ls
+
+
    +
  • shows you how to use ls and all its options
  • +
  • Type /<keyword> to search for a keyword, use n (forward) and ´N` (backward) to scan through hits.
  • +
  • Scroll with arrows.
  • +
  • Type q to quit.
  • +
+
+

Challenge

+
    +
  • Spend some time now to browse the man pages for the commands you’ve just learned!
  • +
+
+ + +

Let’s get wild with Wildcards

+

Caption

+
ls many_files
+
+ls many_files/*.txt
+
+ls many_files/file_1*1.docx
+
+
    +
  • Want to clean out temporary files ending in .tmp in all the subdirectories?
  • +
+
+

Warning

+
    +
  • It could be wise to do ls -a */*.tmp first to see what will be deleted...
  • +
+
$ rm */*.tmp
+
+
+
+

Challenge

+
    +
  • Exercise: Create a new directory and move all .txt files in many_files to it.
  • +
+
+

Reading files

+
    +
  • In Linux, you can (if you wish) also display files without being able to change them
  • +
+
cd old_project
+
+ls
+
+
    +
  • Hmm, which of these files are useful?
  • +
+

cat - concatenate files and print on the standard output

+

cat in action

+
    +
  • cat dumps the contents of files to the terminal as text
  • +
+
cat the_best
+
+
    +
  • Yummy!
  • +
+
cat a
+
+
    +
  • +

    What's this????

    +
  • +
  • +

    Concatenate files with this wizardry:

    +
  • +
+
cat a the_best > combinedfiles.txt
+
+
    +
  • File a is written first and the_best is appended
  • +
+

head — display the top (heading) of a file

+

head in action

+
head a
+
+
    +
  • You can choose how many lines to display (default 10)
  • +
+
head -n 4 a
+
+

tail — display the end of a file

+

tail in action

+
    +
  • Tail is the same as head, but for the other end.
  • +
+
tail -n 5 a
+
+
    +
  • Handy to look at log files or to figure out the structure of a text file.
  • +
+

less — read a whole file

+
    +
  • cat doesn’t really work for long files
  • +
+
 less a
+
+
    +
  • Search with /<keyword> and n/N
  • +
  • Hit q to quit.
  • +
  • scroll with arrows.
  • +
  • +

    man uses `less!

    +

    “less is more`

    +
  • +
+

History

+
    +
  • history shows previous commands
  • +
  • You can rerun earlier commands by:
      +
    • copy-pasting and pressing <enter>
    • +
    • !990 will run the command of line 990 of last history output.
    • +
    +
  • +
  • Search for earlier commands you just remember parts of:
      +
    • history | grep 'jobstats'
    • +
    +
  • +
  • More info
  • +
+

File permissions

+

Caption

+

Example

+
$ ls -l
+
+drwxrwxr-x 2 marcusl marcusl 4096 Sep 19 2012 external_hdd
+-rwxr-xr-x 1 marcusl marcusl 17198 Jul 16 14:12 files.tar.gz
+
+
    +
  • Leading symbol:
      +
    • d directory
    • +
    • - regular file
    • +
    • l symbolic link (more on this tomorrow)
    • +
    • Others exist, but you can ignore them for now
    • +
    +
  • +
+
$ ls -l
+
+  drwxrwxr-x 2 marcusl marcusl 4096 Sep 19 2012 external_hdd
+
+  -rwxr-xr-x 1 marcusl marcusl 17198 Jul 16 14:12 files.tar.gz
+
+
    +
  • +

    Three sets of “rwx` permissions

    +
      +
    • rwx: r ead, w rite, ex ecute
    • +
    • User: the user account that owns the file (usually the one that created it)
    • +
    • Group: the group that owns the file (usually the project group in /proj/xyz or the user’s group elsewhere)
    • +
    • Others: everyone else on the system (literally a thousand strangers)
    • +
    +
  • +
  • +

    r - read

    +
      +
    • Files: Read the contents of the file
    • +
    • Directories: List the files in the directory
    • +
    +
  • +
  • +

    w - write

    +
      +
    • Files: Modify the file
    • +
    • Directories: Add, rename, or delete files in the directory
    • +
    +
  • +
  • +

    x - execute

    +
      +
    • Files: Run the file as a program
    • +
    • Directories: Traverse the directory (e.g. with “cd`)
    • +
    +
  • +
+

Changing permissions

+

chmod — change file mode bits

+

If you own, i.e. created, the file or directory, you can modify the content.

+
+

Common issues

+
    +
  • Files with w can be modified and destroyed by accident. Protect your data!
  • +
  • If you want to share data or scripts with a person not in your project (e.g. support staff like me), you can!
  • +
  • If you want to keep non-members from even seeing which files you have, you can!
  • +
+
+

Syntax

+

chmod <mode> <files>

+
    +
  • <mode> is of the form: For whom, Modify, What permission(s)
  • +
  • For whom?
      +
    • u: user/owner
    • +
    • g: group, often the members to a certain project
    • +
    • o: others
    • +
    • a: all
    • +
    • if not set changes are applied for user AND group
    • +
    +
  • +
  • Modify?
      +
    • +: add permissions,
    • +
    • -: remove
    • +
    • =: set equal to
        +
      • = usually causes unmentioned bits to be removed except that a directory's unmentioned set user and group ID bits are not affected.
      • +
      +
    • +
    +
  • +
  • What permissions?
      +
    • r, w, x, i.e. the actual permission
    • +
    +
  • +
+

Examples

+
    +
  • +

    <mode> can be e.g.:

    +
      +
    • u+x : lets You (owner) run a script you just wrote
    • +
    • -w : no write permissions for owner+group
        +
      • warning: if w was already set for others it will be kept!!
      • +
      +
    • +
    • +rw : let user and group members read and edit this file, not others if not already set
    • +
    • =xw : let group members go into your directory and put files there, but not see which files are there, others are not affected
    • +
    • a=xw : set xw for everyone
    • +
    +
  • +
  • +

    chmod takes flags as usual, e.g.

    +
      +
    • -R for recursive (i.e. all files and sub-directories therein)
    • +
    +
  • +
+
+

chmod 755 style — binary sum — octal bit mask

+
    +
  • +

    Online, you will come across e.g. chmod 755 <file/dir>. What does this mean? It’s an "octal bit mask`:

    +
  • +
  • +

    Each digit corresponds to the binary sum for the owner, group and others, respectively.

    +
      +
    • 7 = 4 + 2 + 1 = r + w + x All permissions
    • +
    • 5 = 4 + 0 + 1 = r + + x Read and execute permission
    • +
    +
  • +
  • +

    755 then means all permissions for owner, but limiting write permissions for the group and all others

    +
  • +
  • +

    What number would rw be?

    +
  • +
+
+Solution +

6

+
+
+
+chmod — Hands-on +
    +
  • In your locally created linux_tutorial directory, find important files and old saved data that you wouldn’t want to lose (imagine).
  • +
  • Directories: important_results/, old_project/
  • +
  • File: last_years_data
  • +
  • Use chmod to remove write permission from those files and directories (use the -R flag (not -r) to also do the files in the directories).
  • +
  • Take a moment to play around with chmod and explore the effects of permissions on files and directories.
  • +
+
+Solution +
$ chmod -wR <target>
+
+
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login/index.html b/getting_started/login/index.html new file mode 100644 index 000000000..64ec73efe --- /dev/null +++ b/getting_started/login/index.html @@ -0,0 +1,3143 @@ + + + + + + + + + + + + + + + + + + + Log in - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in

+

One needs to log in into an UPPMAX cluster to use it.

+

There are two environments one can login to:

+
    +
  • a remote desktop environment
      +
    • using a webbrowser
    • +
    • using a local ThinLinc client
    • +
    +
  • +
  • a console environment, using an SSH client
  • +
+

The Bianca environments

+
+

The two environments to work on Bianca. +At the left is a remote desktop environment. +At the the right is the console environment.

+
+

Because logging in differs between clusters, each cluster +has its own login page:

+ +

Go to those pages for more details.

+

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

Other things to log in to, shown for completeness:

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca/index.html b/getting_started/login_bianca/index.html new file mode 100644 index 000000000..58b1785ae --- /dev/null +++ b/getting_started/login_bianca/index.html @@ -0,0 +1,3305 @@ + + + + + + + + + + + + + + + + + + + + + + + Log in to Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Bianca

+

The Bianca environments

+
+

The two Bianca environments to work on Bianca. +At the left is a remote desktop environment. +At the the right is the console environment.

+
+

There are multiple UPPMAX clusters one can log in to. +Here it is described how to log in to Bianca:

+

Which way to log in to Bianca

+

After you've fulfilled all prerequisites for using Bianca, +there are many ways to log in to Bianca.

+

Here is the decision tree, with more detailed explanation below it:

+
flowchart TD
+  in_sunet(A.Can you get inside the university networks?)
+  need_remote_desktop(B.Need/prefer a remote desktop?)
+  how_login(C.How to log in?)
+  need_remote_desktop_no_sunet(B.Need/prefer a remote desktop?)
+  how_login_no_sunet(C.How to log in?)
+
+  use_website[1.Use the Bianca remote desktop website]
+  use_password[2.Use a terminal and password to access Bianca directly]
+  use_ssh_keys[3.Use a terminal and SSH keys to access Bianca directly]
+
+  use_website_no_sunet[4.Use the Rackham remote desktop website]
+  use_password_no_sunet[5.Use a terminal and password via Rackham]
+  use_ssh_keys_no_sunet[Use a terminal and SSH keys via Rackham]
+
+  in_sunet --> |yes| need_remote_desktop
+
+  need_remote_desktop --> |no| how_login
+  need_remote_desktop --> |yes| use_website
+
+  how_login --> |Using a password| use_password
+  how_login --> |Using SSH keys| use_ssh_keys
+
+  in_sunet --> |no| need_remote_desktop_no_sunet
+
+  need_remote_desktop_no_sunet --> |no| how_login_no_sunet
+  need_remote_desktop_no_sunet --> |yes| use_website_no_sunet
+
+  how_login_no_sunet --> |Using a password| use_password_no_sunet
+  how_login_no_sunet --> |Using SSH keys| use_ssh_keys_no_sunet
+
+

Decision tree on how to log in to Bianca

+
+

Question A, 'Can you get inside the university networks?' is commonly answered +'yes' for anyone with an email address at a university in Sweden. +The UPPMAX documentation on how to get inside the university networks +should allow anyone to do so.

+

Question B, 'Need/prefer a remote desktop?' is about if you prefer a +visual/graphical environment to work with Bianca, which will be similar to +what most of us are used to. A 'yes' is more suitable for new users, +although it is considered a more clunky (it responds slower on user input) +and clumsy (copy-pasting to it needs multiple mouse clicks) way to work. +A 'no' is more suitable for users comfortable with a terminal and works +smoothly.

+
+How does the Bianca remote desktop look like? +

One can pick multiple remote desktop environments, +such as GNOME and XFCE (and KDE, don't pick KDE!).

+

The Bianca remote desktop

+
+

The Bianca XFCE remote desktop environment

+
+

A more populated Bianca remote desktop

+
+

A more populated Bianca XFCE remote desktop

+
+
+
    +
  • A remote desktop environment, also called 'graphical environment', + 'GUI environment', 'ThinLinc environment'
  • +
+
+How does the Bianca console environment look like? +

The Bianca console environment

+
+

The Bianca console environment

+
+
+
    +
  • A console environment, also called 'terminal environment' or 'terminal'
  • +
+

Question C, 'How to log in?' is about how you prefer to login. +The option 'Using a password' is more suitable for new users, +as it is easy to setup and understand. However, one does need to type +his/her password every time one logs in. 'Using SSH keys' is harder +to setup, yet more convenient.

+
+Will a local ThinLinc client work too? +

No.

+

One really can only access the Bianca remote desktop environment +via the website.

+
+

Here are the ways to log in to Bianca:

+ +

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_console_password/index.html b/getting_started/login_bianca_console_password/index.html new file mode 100644 index 000000000..fbebb8c85 --- /dev/null +++ b/getting_started/login_bianca_console_password/index.html @@ -0,0 +1,3356 @@ + + + + + + + + + + + + + + + + + + + Login to the Bianca console environment with a password - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Bianca console environment with a password

+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a terminal and a password:

+ +

Procedure

+
+Video: how to use a terminal and SSH to access the Bianca console environment +

This video shows how to use a terminal and SSH to access +the Bianca console environment: YouTube

+
+

1. Get inside the university networks

+

Get inside the university networks.

+
+Forgot how to get within the university networks? +

See the 'get inside the university networks' page here

+
+

2. Use ssh to log in

+

From a terminal, use ssh to log in:

+
ssh [user]-[project name]@bianca.uppmax.uu.se
+
+

For example:

+
ssh sven-sens2023598@bianca.uppmax.uu.se
+
+
+How does this look like (when inside of SUNET)? +
ssh sven-sens2023598@bianca.uppmax.uu.se
+
+

After which a password will be asked. Go to the next step.

+
+
+How does it look like when outside of SUNET? +
$ ssh sven-sens2023598@bianca.uppmax.uu.se
+
+

After which there is only waiting...

+
+
+Why no -A? +

On Bianca, one can use -A:

+
ssh -A username@bianca.uppmax.uu.se
+
+

this option is only useful when you want to +log in to Bianca via the console using an SSH key. +As we here use passwords (i.e. no SSH keys) +to access Bianca, -A is unused +and hence we simplify this documentation by omitting it.

+
+
+Why no -X? +

On Rackham, one can use -X:

+
ssh -X username@rackham.uppmax.uu.se
+
+

However, on Bianca, this so-called +X forwarding is disabled. +Hence, we do not teach it :-)

+
+

3. Type your UPPMAX password with 2FA

+

Type your UPPMAX password, +directly followed by the UPPMAX 2-factor authentication number, +for example verysecret678123, then press enter. +In this case, the password is verysecret and 678123 +is the 2FA number.

+
+How does this look like? +
sven@sven-N141CU:~/GitHubs/UPPMAX-documentation/docs/getting_started$ ssh sven-sens2023598@bianca.uppmax.uu.se
+sven-sens2023598@bianca.uppmax.uu.se's password: 
+
+
+

After which you'll asked for another password. Go to the next step.

+

After authenticated using the UPPMAX password and 2FA, +you are logged in on Bianca's shared network, +on a so-called 'jumphost'.

+

However, you will still need to login to your own +private virtual project cluster. +As you are already properly authenticated (i.e. using an UPPMAX password +and UPPMAX 2FA), you don't need 2FA anymore.

+
+What is a virtual project cluster? +

As Bianca holds sensitive data, by regulations, +each Bianca project must be isolated from each other +and are not allowed to, for example, share the same memory.

+

One way to achieve this, would be to build one HPC cluster +per project. While this would guarantee isolated project environments, +this would be quite impractical.

+

Instead, we create isolated project environments by using software, +that creates so-called virtual clusters, as if they would be +physical clusters. Like physical clusters, a virtual cluster +has a guaranteed isolated project environment.

+
+

When you login to Bianca's shared network, +you will get a message of your project's login node status. +It can be up and running or down. +If it is down, the virtual cluster is started, +which may take some minutes.

+

4. Type your UPPMAX password

+

Type your UPPMAX password, +for example verysecret

+
+How does this look like? +
Last login: Thu Sep 19 08:54:12 2024 from vpnpool188-186.anst.uu.se
+Notice(s) for upcoming maintenance on bianca:
+
+  Lost e-mail support tickets, working again
+  http://status.uppmax.uu.se/2024-09-19/lost-tickets/
+
+****************************************************************************
+* Login node up and running. Redirecting now!                              *
+* Notice! No second factor if you use password.                            *
+* If you use ssh keys, you can get rid of this (second) prompt.            *
+****************************************************************************
+
+sven@sens2023598-bianca.uppmax.uu.se's password: 
+
+
+

5. You are in

+

Enjoy! You are in! Or, to be precise, +you are on the login node of your own virtual project cluster.

+
+How does this look like? +

```bash + _ _ _ __ __ _ __ __ +| | | | _ | _ | \/ | / \/ / | System: sens2023598-bianca +| | | | |) | |) | |\/| | / _ / | User: sven +| || | /| /| | | |/ ___ / | + ___/|| || || |// _\//_ |

+
+
    User Guides: http://www.uppmax.uu.se/support/user-guides
+    FAQ: http://www.uppmax.uu.se/support/faq
+
+    Write to support@uppmax.uu.se, if you have questions or comments.
+
+

````

+
+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

By default, this node has one core, +hence if you need more memory or more CPU power, +you submit a job (interactive or batch), +and an idle node will be moved into your project cluster.

+

Troubleshooting

+

Here are some common errors and their solutions:

+

Permission denied, please try again

+
Permission denied, please try again.
+
+

Here are the questions we will ask to solve your problem:

+
flowchart TD
+    error[Permission denied, please try again.]
+    correct_password[Is your password correct?]
+    added_2fa[Have you added a 2FA number at the end of your password?]
+    added_correct_2fa[Have you added the correct 2FA number at the end of your password?]
+    in_sunet[Are you within the university networks?]
+    active_bianca_project[Is that Bianca project active?]
+    member_of_bianca_project[Are you a member of that Bianca project]
+    contact_support[Contact support]
+
+    error --> correct_password
+    error --> in_sunet
+
+    in_sunet --> |yes| active_bianca_project
+
+    correct_password --> |yes| added_2fa
+    added_2fa --> |yes| added_correct_2fa
+    active_bianca_project -->  |yes| member_of_bianca_project
+    member_of_bianca_project --> |yes| contact_support
+    added_correct_2fa --> |yes| contact_support
+
+How do I know my password is correct? +

You don't.

+

It could be a typo: you don't see your password when you type (this is a +security measure), so a typo is likely to occur. Also check if 'Caps Lock' +is off.

+

It could be that you've forgotten your password. That can happen to all of +us. You can then reset your password at https://suprintegration.uppmax.uu.se/getpasswd

+
+
+What do you mean 'Have you added a 2FA number at the end of your password?'? +

When you type your password, this needs to be followed by a two-factor authentication +number.

+

For example, if your password is verysecret and 314159 is the 2FA number, +you should type verysecret314159

+
+
+What is the correct 2FA number? +

The UPPMAX one, titled [username]@UPPMAX, for example sven@UPPMAX.

+

When using UPPMAX, one needs to create other 2FAs too, such as for SUPR +or the Uppsala VPN. Don't use those numbers to login to Bianca.

+
+
+How do I know if I am within the university networks? +

If you login via eduroam you are within the university networks.

+

When unsure, go to the Bianca remote desktop website at +https://bianca.uppmax.uu.se: +if this page does not load, you are outside of the university networks.

+

See How to get inside of the university networks +if you outside of the university networks.

+
+
+How do I know if the Bianca project is active? +

A quick way to confirm your Bianca project is active: +go to https://bianca.uppmax.uu.se +and type your username. If the project is displayed, it is active.

+

To confirm your project is active or inactive, use the SUPR NAISS website. +See the UPPMAX documentation on projects +how to see if your project is active?

+
+
+How do I know if I am a member of the Bianca project? +

A quick way to confirm you are a member of the Bianca project: +go to https://bianca.uppmax.uu.se +and type your username. If the project is displayed, +you are a member of the Bianca project.

+

To confirm your project is active or inactive, use the SUPR NAISS website. +See the UPPMAX documentation on projects +how to see which projects you are a member of.

+
+

See the UPPMAX page on contacting support +on how to contact us.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_console_password_no_sunet/index.html b/getting_started/login_bianca_console_password_no_sunet/index.html new file mode 100644 index 000000000..bb9204e09 --- /dev/null +++ b/getting_started/login_bianca_console_password_no_sunet/index.html @@ -0,0 +1,3134 @@ + + + + + + + + + + + + + + + + + + + Login to the Bianca console environment with a password from outside of the Swedish university networks - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Bianca console environment with a password from outside of the Swedish university networks

+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a terminal and a password +from outside of the Swedish university networks.

+

Procedure

+

1. Log in to Rackham's console environment

+

See the UPPMAX documentation on how to log in to Rackham +how to do so.

+

2. From Rackham, log in to Bianca

+

From Rackham (which is inside of the university networks), +log in to Bianca. +See the UPPMAX documentation on how to log in to Bianca +how to do so.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_console_ssh_key/index.html b/getting_started/login_bianca_console_ssh_key/index.html new file mode 100644 index 000000000..6db554035 --- /dev/null +++ b/getting_started/login_bianca_console_ssh_key/index.html @@ -0,0 +1,3246 @@ + + + + + + + + + + + + + + + + + + + Login to the Bianca console environment using SSH keys - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Bianca console environment using SSH keys

+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a terminal and an SSH key pair.

+

1. Get inside SUNET

+

When inside SUNET, one can access a Bianca console environment +using SSH and SSH keys.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

This is considered a bit harder to setup, +but one only needs to type one password to login to Bianca. +If you don't mind typing your UPPMAX password twice, +an easier setup is log in to the Bianca console environment with a password.

+

2. Use ssh to log in

+

From a terminal, use ssh to log in:

+
ssh -A [user]-[project name]@bianca.uppmax.uu.se
+
+

For example:

+
ssh -A sven-sens2023598@bianca.uppmax.uu.se
+
+
+How does it look like when outside of SUNET? +

Here you can +see how this looks like when outside of SUNET.

+

Spoiler: quite dull, as nothing happens until these is a timeout.

+
+
+Why no -X? +

On Rackham, one can use -X:

+
ssh -X username@rackham.uppmax.uu.se
+
+

However, on Bianca, this so-called +X forwarding is disabled. +Hence, we do not teach it :-)

+
+

3. Type your UPPMAX password and 2FA

+

Type your UPPMAX password, +directly followed by the UPPMAX 2-factor authentication number, +for example verysecret678123, then press enter. +In this case, the password is verysecret and 678123 +is the 2FA number.

+

4. You are in

+

Enjoy! You are in! To be precise, +you are on a Bianca login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

In a Bianca console environment:

+
    +
  • Text display is limited to 50kBit/s. + This means that if you create a lot of text output, + you will have to wait some time before you get your prompt back.
  • +
  • Cut, copy and paste work as usual. + Be careful to not copy-paste sensitive data!
  • +
+
+Why does one need two passwords? +

The first password is needed to get into the shared Bianca environment. +This password contains both an UPPMAX password and an UPPMAX 2FA number.

+

The second password is needed to go to the login node +of a project's virtual cluster.

+
flowchart TD
+
+    %% Give a white background, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+    classDef focus_node fill:#fff,color:#000,stroke:#000,stroke-width:4px
+
+    subgraph sub_bianca_shared_env[Bianca shared network]
+      bianca_shared_console[Bianca console environment login]
+      bianca_shared_remote_desktop[Bianca remote desktop login]
+      subgraph sub_bianca_private_env[The project's private virtual project cluster]
+        bianca_private_console[Bianca console environment]
+        bianca_private_remote_desktop[Bianca remote desktop]
+        bianca_private_terminal[Terminal]
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    %% style sub_inside fill:#fcc,color:#000,stroke:#fcc
+    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc
+    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc
+
+    %% Shared Bianca
+    bianca_shared_console --> |UPPMAX password|bianca_private_console
+    bianca_shared_remote_desktop-->|UPPMAX password|bianca_private_remote_desktop
+
+    %% Private Bianca
+    bianca_private_console---|is a|bianca_private_terminal
+    bianca_private_remote_desktop-->|must also use|bianca_private_terminal
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_console_ssh_key_no_sunet/index.html b/getting_started/login_bianca_console_ssh_key_no_sunet/index.html new file mode 100644 index 000000000..d61aac7ca --- /dev/null +++ b/getting_started/login_bianca_console_ssh_key_no_sunet/index.html @@ -0,0 +1,3134 @@ + + + + + + + + + + + + + + + + + + + Login to the Bianca console environment using SSH keys from outside of the Swedish university networks - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Bianca console environment using SSH keys from outside of the Swedish university networks

+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a terminal and an SSH key pair +from outside of the Swedish university networks.

+

Procedure

+

1. Log in to Rackham's console environment

+

See the UPPMAX documentation on how to log in to Rackham +how to do so.

+

2. From Rackham, log in to Bianca

+

From Rackham (which is inside of the university networks), +log in to Bianca. +See the UPPMAX documentation on how to log in to Bianca +how to do so.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_remote_desktop_local_thinlinc_client/index.html b/getting_started/login_bianca_remote_desktop_local_thinlinc_client/index.html new file mode 100644 index 000000000..e01d6db93 --- /dev/null +++ b/getting_started/login_bianca_remote_desktop_local_thinlinc_client/index.html @@ -0,0 +1,3109 @@ + + + + + + + + + + + + + + + + + + + Login to the Bianca remote desktop environment via a ThinLinc client - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_remote_desktop_website/index.html b/getting_started/login_bianca_remote_desktop_website/index.html new file mode 100644 index 000000000..859a7eaf2 --- /dev/null +++ b/getting_started/login_bianca_remote_desktop_website/index.html @@ -0,0 +1,3317 @@ + + + + + + + + + + + + + + + + + + + Log in to the Bianca remote desktop environment website - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to the Bianca remote desktop environment website

+

The Bianca remote desktop environment

+
+

The Bianca remote desktop environment

+
+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a remote desktop that is accessible from a webbrowser.

+

Procedure

+
+Prefer a video? +

See this page explained in +a YouTube video here

+
+

1. Get inside SUNET

+

As Bianca is an HPC cluster for sensitive data, +one needs to be within SUNET to be able to access her.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

Bianca does not support any so-called +X forwarding (unlike Rackham), +so instead UPPMAX maintains a website that uses +ThinLinc to get a full remote desktop environment. +All you should need is a rather modern browser on any platform.

+
+How does it look like to try to access a remote desktop from outside of SUNET? +

Nothing will appear in your browser

+
+

When accessing the Bianca UPPMAX login website from outside of SUNET, +nothing will appear in your browser.

+
+

You can see it in action in this video you can +see how this looks like when outside of SUNET.

+

It looks quite dull, as nothing happens until these is a timeout.

+
+
+Will a local ThinLinc client work too? +

No.

+

One really can only access Bianca remote desktop environment via a website

+
+

When inside SUNET, one can access a remote desktop environment using a website:

+

2. Go to https://bianca.uppmax.uu.se

+

When inside SUNET, +in your web browser, go to https://bianca.uppmax.uu.se.

+
+How does it look like when outside of SUNET? +

Nothing will appear in your browser

+
+

When accessing the Bianca UPPMAX login website from outside of SUNET, +nothing will appear in your browser.

+
+

You can see it in action in this video you can +see how this looks like when outside of SUNET.

+

It looks quite dull, as nothing happens until these is a timeout.

+
+

3. Fill in the first dialog

+

Fill in the first dialog.

+

Do use the UPPMAX 2-factor authentication (i.e. not SUPR!)

+
+How do I setup 2-factor authentication? +

See the guide at 2-factor authentication +to setup an UPPMAX 2-factor authentication method.

+

You really need to use the UPPMAX 2-factor authentication, +i.e not the SUPR one, to login to Bianca.

+

Screenshot of a two-factor authentication app

+
+

Screenshot of a two-factor authentication app. +Use the 2-factor authentication called 'UPPMAX' +to access Bianca

+
+
+
+How does that web page look like? +

Bianca login, first dialog

+
+

The first page of https://bianca.uppmax.uu.se

+
+
+

Sometimes a webpage will be shown that asks you to wait. +Simply do that :-)

+
+How does that web page look like? +

No c Web Access active

+
+

No c Web Access active +The login node for your project cluster is probably asleep. Boot initiated. The startup can take from 2 to 8 minutes.

+

This page will attempt to automatically reload. If nothing happens even after multiple minutes, you can do so manually. It is a bit more controlled in text mode.

+

When this takes long, your original second factor code might expire. In that scenario, you'll be redirected to the first login page again.

+
+

This is the webpage that is shown when a login node needs to be created.

+
+

4. Fill in the second dialog, using your regular password

+

Fill in the second dialog, using your regular password (i.e. no need for two-factor authentication).

+
+How does that web page look like? +

Bianca login, second dialog

+
+

The second Bianca remote desktop login dialog. +Note that it uses ThinLinc to establish this connection

+
+
+

5. Picking a remote desktop flavor, but not KDE

+

When picking a remote desktop flavor, pick GNOME or XFCE, avoid picking KDE.

+
+How does that look like? +

Here you are told you will need to pick a remote desktop flavor

+
+

Here you are told you will need to pick a remote desktop flavor

+
+

Pick a remote desktop flavor

+
+

Here you are asked to pick a remote desktop flavor, +with Xfce as the default. +Pick any, except KDE.

+
+
+
+

Avoid choosing KDE

+

Avoid choosing the KDE desktop, as it gives problems when running interactive sessions.

+

Instead, we recommend GNOME or XFCE.

+
+

6. You are in

+

Enjoy! You are in: you are now on a Bianca login node.

+
+How do I copy-paste text? +

The Bianca remote desktop environment via a website +uses ThinLinc.

+

At the ThinLinc page you can find +how to work with its interface.

+
+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+
+What is the difference between 'disconnect session' and 'end session'? +

'disconnect session' will save the current state of your session. +When you connect again, you will get the remote desktop back +in exactly in the same place you left the system. +For example: if you were editing a file before disconnecting, +your prompt will be in the same place you left it.

+

'end session' will not save the current state of your session. +Instead, you will start with a clean slate at the next login.

+
+

Bianca has a automatically disconnect after 30 minutes of inactivity. +In the future it is possible that we implement some kind +of "automatic log out from active graphical session".

+
flowchart TD
+
+    subgraph sub_inside[IP inside SUNET]
+
+      user(User)
+
+      subgraph sub_bianca_shared_env[Bianca shared network]
+        bianca_shared_remote_desktop[Bianca remote desktop login]
+        subgraph sub_bianca_private_env[The project's private virtual project cluster]
+          bianca_private_remote_desktop[Bianca remote desktop]
+
+          %% Ensure the innermost square gets big enough
+          END:::hidden
+        end
+      end
+    end
+
+    %% Inside SUNET
+    user-->|Bianca website, UPPMAX password and 2FA|bianca_shared_remote_desktop
+
+    bianca_shared_remote_desktop --> |UPPMAX password| bianca_private_remote_desktop
+

Troubleshooting

+

Access denied

+
+How does that look like? +

Log in to Bianca's remote desktop environment and getting an 'Access denied' error

+
+

Contact support.

+

Authentication failed

+
+How does that look like? +

Log in to Bianca's remote desktop environment and getting an 'Authentication failed' error

+
+

Contact support.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_bianca_remote_desktop_website_no_sunet/index.html b/getting_started/login_bianca_remote_desktop_website_no_sunet/index.html new file mode 100644 index 000000000..a531e5227 --- /dev/null +++ b/getting_started/login_bianca_remote_desktop_website_no_sunet/index.html @@ -0,0 +1,3152 @@ + + + + + + + + + + + + + + + + + + + Log in to the Bianca remote desktop environment website from outside of the Swedish university networks - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to the Bianca remote desktop environment website from outside of the Swedish university networks

+

The Bianca remote desktop environment via the Rackham remote desktop environment

+
+

The Bianca remote desktop environment via the Rackham remote desktop environment

+
+

There are multiple ways to log in to Bianca.

+

This page describes how to log in to Bianca +using a remote desktop that is accessible from a webbrowser +from outside of the Swedish university networks.

+

Procedure

+
+Prefer a video? +

See this page explained in +a YouTube video here

+
+

1. Go to https://rackham-gui.uppmax.uu.se

+

In a webbrowser, go to https://rackham-gui.uppmax.uu.se.

+
    +
  • In the first field, fill in your UPPMAX username, e.g. sven
  • +
  • In the second field, fill in your UPPMAX password (e.g. password + and your UPPMAX 2FA (e.g. 123456) + together, without a space (e.g. `password123456)
  • +
+
+How does that page look like? +

The page looks like this

+
+

After login, you will be on the Rackham remote desktop environment.

+

2. Log in to the Bianca remote desktop environment website

+

In the web browser of the Rackham remote desktop environment +(which is inside the university networks), +follow the steps at how to log in to the Bianca remote desktop environment website.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_dardel/index.html b/getting_started/login_dardel/index.html new file mode 100644 index 000000000..77592ad63 --- /dev/null +++ b/getting_started/login_dardel/index.html @@ -0,0 +1,3310 @@ + + + + + + + + + + + + + + + + + + + + + + + Log in to Dardel (at PDC) - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Dardel

+

There are multiple clusters one can log in to. +Here it is described how to login to Dardel.

+

Procedure

+
+Prefer a video? +

Go to a YouTube video on how to log in to Dardel +to view what to do from step 3 and onwards.

+
+

First, we are here to help. +Please contact support if you run into problems +when trying the guide below.

+

Note that step 1 requires some hours of waiting +and step 2 requires an overnight wait.

+
flowchart TD
+  get_supr_project[1.Access to a SUPR project with Dardel]
+  get_pdc_account[2.Access to a PDC account]
+  create_ssh_key[3.Create temporary SSH keys]
+  add_ssh_key[4.Add the SSH keys to the PDC Login Portal]
+  login[5. Login]
+
+  get_supr_project --> |needed for| get_pdc_account
+
+  create_ssh_key --> |needed for| add_ssh_key
+  get_pdc_account --> |needed for| add_ssh_key
+  add_ssh_key --> |needed for| login
+
+

Overview of the migration process. +Note that step 1 requires some hours of waiting +and step 2 requires an overnight wait.

+
+

1. Get access to a SUPR project with Dardel

+

First step is to get get access to a SUPR project with Dardel. +This is described at PDC's page on getting access to Dardel. +You will get an email when you are added to a project, +this can take some hours.

+
+How do I know I have access to a Dardel project? +

Login to https://supr.naiss.se/. +If there is a PDC project, +you may have access to a project with Dardel.

+

Example user that has access to a PDC project

+
+

Example user that has access to a PDC project

+
+

If you may a PDC project that does not use Dardel, +click on the project to go the the project overview.

+

Example PDC project overview

+
+

Example PDC project overview

+
+

From there, scroll down to 'Resources'. +If you see 'Dardel' among the compute resources, +you have confirmed you have access to a Dardel project.

+

Resources from an example PDC project

+
+

Resources from an example PDC project

+
+
+

2. Get a PDC account via SUPR

+

Get a PDC account via SUPR. +This is described at the PDC page on getting access. +You will get a PDC account overnight.

+
+How do I know I have a PDC account? +

Login to https://supr.naiss.se/. +and click on 'Accounts' in the main menu bar at the left.

+

If you see 'Dardel' among the resources, and status 'Enabled' +in the same row, you have a PDC account!

+

Example of a user having an account at PDC's Dardel HPC cluster

+
+

Example of a user having an account at PDC's Dardel HPC cluster

+
+
+
+How do I find out my PDC username? +

In the PDC login portal, after logging +in, you can see your Dardel username in the top-right corner:

+

PDC login portal with username and key

+
+

Example screenshot of the PDC login portal. +The Dardel username of this user is svenbi

+
+
+

3. Create SSH key pair

+

Create SSH key and add it to the PDC Login Portal.

+
    +
  • Create the password less SSH key in a Linux terminal (e.g. from Rackham):
  • +
+
module load darsync
+
+
darsync sshkey
+
+

4. Add the public key to the PDC Login Portal

+

When creating the SSH key pair, darsync will already +display the public key.

+

If, however, you missed it, +you can view the public SSH key again; in a terminal logged into Rackham:

+
cat ~/id_ed25519_pdc.pub
+
+
+How does that look like? +

The text will look similar to this:

+
ssh-ed25519 AAAA69Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se
+
+
+

Open the PDC Login Portal.

+

Follow our step-by-step instructions on how to add SSH keys.

+

5. Login

+ +

On a terminal, do:

+
ssh -X [username]@dardel.pdc.kth.se
+
+

where [username] is your PDC username, for example ssh -X sven@dardel.pdc.kth.se.

+
+Why the -X? +

The -X is for so-called X forwarding. +It allows you to view graphical things, +such as viewing plots or running graphical programs

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_rackham/index.html b/getting_started/login_rackham/index.html new file mode 100644 index 000000000..f84ca6fa0 --- /dev/null +++ b/getting_started/login_rackham/index.html @@ -0,0 +1,3266 @@ + + + + + + + + + + + + + + + + + + + + + + + Log in to Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Rackham

+

There are multiple UPPMAX clusters one can log in to. +Here we describe how to log in to Rackham.

+ +

Which way to login?

+

There are multiple ways to log in to Rackham:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
LoginDescriptionScreenshot
WebsiteRemote desktop, no installation needed, slowThe Rackham remote desktop via the website
TerminalConsole environment, recommendedThe Rackham console environment
Local ThinLinc clientRemote desktop, recommended, need installationThe Rackham remote desktop via the a local ThinLinc client
+

Here is a decision tree, to determine which way to log in:

+
flowchart TD
+  need_gui(Need to run a graphical program?)
+  use_terminal[Use a terminal]
+  use_website[Use the remote desktop website]
+  need_easy_or_speedy(Need easiest or fastest?)
+  use_local[Use a local ThinLinc client]
+
+  need_gui --> |no| use_terminal
+  need_gui --> |yes| need_easy_or_speedy
+  need_easy_or_speedy --> |easiest| use_website
+  need_easy_or_speedy --> |fastest| use_local
+
+  how_login(How to log in?)
+
+  use_password[Use password. Start here]
+  use_ssh_keys[Use SSH keys. No more password needed]
+
+  use_terminal --> how_login
+  how_login --> use_password
+  how_login --> use_ssh_keys
+

The procedures can be found at:

+ +

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_rackham_console_password/index.html b/getting_started/login_rackham_console_password/index.html new file mode 100644 index 000000000..03badf18e --- /dev/null +++ b/getting_started/login_rackham_console_password/index.html @@ -0,0 +1,3196 @@ + + + + + + + + + + + + + + + + + + + Login to the Rackham console environment with a password - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Rackham console environment with a password

+

There are multiple ways to log in to Rackham. +This page describes how to do so using a terminal and a password.

+

If you want to get rid of using a password every time, +see login to the Rackham console environment with an SSH key.

+

Procedure

+
+Prefer a video? +

This procedure is also shown by this YouTube video.

+
+

1. Use ssh to log in

+

From a terminal, use ssh to log in:

+
ssh -X [username]@rackham.uppmax.uu.se
+
+

[username] is your UPPMAX username, for example, sven, +resulting in:

+
ssh -X sven@rackham.uppmax.uu.se
+
+

-X enables so-called X forwarding, +which allows you to run programs that require light graphics, +such as eog to display an image.

+
+Can I log in without -X? +

Yes!

+

If you do not need X forwarding +to run programs that require light graphics, +omitting the -X is just fine.

+
+
+Why no -A? +

On Rackham, one can use -A:

+
ssh -A username@rackham.uppmax.uu.se
+
+

this option is only useful when you want to +log in to Rackham via the console using an SSH key. +As we here use passwords (i.e. no SSH keys) +to access Rackham, -A is unused +and hence we simplify this documentation by omitting it.

+
+

2. Type your UPPMAX password

+

Type your UPPMAX password and press enter. +You will see no asterisks to indicate how many +characters you've typed in.

+

If you are outside +the university networks +you will be asked for your UPPMAX 2-factor authentication number.

+

3. You are in

+

Enjoy! You are in! Or, to be precise, +you are in your home folder on a Rackham login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_rackham_console_ssh_key/index.html b/getting_started/login_rackham_console_ssh_key/index.html new file mode 100644 index 000000000..3511fca75 --- /dev/null +++ b/getting_started/login_rackham_console_ssh_key/index.html @@ -0,0 +1,3190 @@ + + + + + + + + + + + + + + + + + + + Login to the Rackham console environment using SSH keys - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Login to the Rackham console environment using SSH keys

+

There are multiple ways to log in to Rackham.

+

This page describes how to log in to Rackham +using a terminal and an SSH key pair.

+

1. Get inside SUNET

+

When inside SUNET, one can access a Rackham console environment +using SSH and SSH keys.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

This is considered a bit harder to setup, +but one only needs to type one password to login to Rackham. +If you don't mind typing your UPPMAX password twice, +an easier setup is log in to the Rackham console environment with a password.

+

2. Use ssh to log in

+

From a terminal, use ssh to log in:

+
ssh -AX [user]@rackham.uppmax.uu.se
+
+

For example:

+
ssh -AX sven@rackham.uppmax.uu.se
+
+

3. Type your UPPMAX password

+

Type your UPPMAX password.

+

4. You are in

+

Enjoy! You are in! To be precise, you are on a Rackham login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

In a Rackham console environment:

+
    +
  • Text display is limited to 50kBit/s. + This means that if you create a lot of text output, + you will have to wait some time before you get your prompt back.
  • +
  • Cut, copy and paste work as usual. + Be careful to not copy-paste sensitive data!
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_rackham_remote_desktop_local_thinlinc_client/index.html b/getting_started/login_rackham_remote_desktop_local_thinlinc_client/index.html new file mode 100644 index 000000000..15969827e --- /dev/null +++ b/getting_started/login_rackham_remote_desktop_local_thinlinc_client/index.html @@ -0,0 +1,3218 @@ + + + + + + + + + + + + + + + + + + + Log in to Rackham's remote desktop environment using a local ThinLinc client - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Rackham's remote desktop environment using a local ThinLinc client

+

The Rackham remote desktop environment via a local ThinLinc client

+

There are multiple ways to log in to Rackham. +This page described how to log in to its remote desktop environment +via a local ThinLinc client.

+

Procedure

+
+Prefer a video? +

This procedure is also shown by this YouTube video.

+
+

1. Install ThinLinc

+

Install ThinLinc. For help, see the UPPMAX page on ThinLinc.

+

2. Start ThinLinc

+

In the ThinLinc login dialog, set the server name to rackham-gui.uppmax.uu.se.

+
+How does that look like? +

ThinLinc login

+
+

The ThinLinc login dialog

+
+
+
+Why not use https://www.rackham-gui.uppmax.uu.se? +

Because that does not work :-)

+
+

3. Forward the ThinLinc Welcome dialog

+

On the ThinLinc 'Welcome' dialog, click 'Forward'

+
+How does that look like? +

The ThinLinc 'Welcome' dialog

+
+

4. Select a ThinLinc profile

+

On the ThinLinc 'Select profile' dialog, select a profile:

+ + + + + + + + + + + + + + + + + + + + + +
ProfileRecommendation
GNOMERecommended
KDEAvoid
XFCERecommended
+
+

Avoid choosing KDE

+

Avoid choosing the KDE desktop, as it gives problems +when running interactive sessions.

+

Instead, we recommend GNOME or XFCE.

+
+
+How does that look like? +

The ThinLinc 'Select profile' dialog

+
+

Here you are asked to pick a remote desktop flavor, +with Xfce as the default. +Pick any, except KDE.

+
+
+

5. You are in

+

You are in! Well done!

+

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+
+How does that look like? +

The Rackham remote desktop environment via a local ThinLinc client

+
+

Usage

+

For tips on how to work with this environment, +see the UPPMAX ThinLinc page.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_rackham_remote_desktop_website/index.html b/getting_started/login_rackham_remote_desktop_website/index.html new file mode 100644 index 000000000..95550a753 --- /dev/null +++ b/getting_started/login_rackham_remote_desktop_website/index.html @@ -0,0 +1,3170 @@ + + + + + + + + + + + + + + + + + + + Log in to Rackham's remote desktop via a webbrowser - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Rackham's remote desktop via a webbrowser

+

Rackham's remote desktop environment

+
+

Rackham's remote desktop environment via a webbrowser

+
+

There are multiple ways to log in to Rackham. +This page described how to log in to its remote desktop environment +via a web browser.

+

Procedure

+
+Prefer a video? +

This procedure is also shown by this YouTube video.

+
+

This is a procedure with one step. +Most work will be to fulfill all Rackham usage prerequisites.

+

1. Go to https://rackham-gui.uppmax.uu.se

+

In a webbrowser, go to https://rackham-gui.uppmax.uu.se.

+
    +
  • In the first field, fill in your UPPMAX username, e.g. sven
  • +
  • In the second field, fill in your UPPMAX password (e.g. password + and your UPPMAX 2FA (e.g. 123456) + together, without a space (e.g. `password123456)
  • +
+
+How does that page look like? +

The page looks like this

+
+

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

Usage

+

For tips on how to work with this environment, +see the UPPMAX ThinLinc page +(as that software is used to do the heavy lifting for that website).

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/login_snowy/index.html b/getting_started/login_snowy/index.html new file mode 100644 index 000000000..e3a398fb0 --- /dev/null +++ b/getting_started/login_snowy/index.html @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + + + + + Log in to Snowy - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Log in to Snowy

+

There are multiple UPPMAX clusters one can log in to. +Here we describe how to log in to Snowy

+

One needs to be allowed to use Snowy. +These prerequisites describes what is needed before one can use Snowy.

+

To make Snowy do a calculation, one needs to log in to a Rackham login node, +which is described here.

+

After login, you will be on a login node.

+
+

How to behave on a login node

+

On a login node, one can and should do simple things only: +it is a resource shared with all other users on that node.

+

If you need to do more intense calculations, +use the Slurm job scheduler.

+

If you need to do more intense calculations interactively, +use an interactive node.

+
+

After logging in, one can

+ +
graph LR
+
+  subgraph "Snowy"
+    snowy_calculation_node[Calculation nodes]
+  end
+
+
+  subgraph "Rackham"
+    login_node[Login node]
+  end
+
+  login_node --> |interactive or sbatch| snowy_calculation_node
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project/index.html b/getting_started/project/index.html new file mode 100644 index 000000000..6ad7175ca --- /dev/null +++ b/getting_started/project/index.html @@ -0,0 +1,3260 @@ + + + + + + + + + + + + + + + + + + + UPPMAX project - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX project

+

To use UPPMAX resources, one needs:

+ +

This page is about UPPMAX projects:

+ +

View your UPPMAX projects

+

SUPR (the 'Swedish User and Project Repository') +is the website that allows one to request access to Swedish computational +resources and to get an overview of the requested resources.

+
+How does the SUPR website look like? +

First SUPR page

+
+

First SUPR page

+
+

SUPR 2FA login

+
+

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

+
+
+

After logging in, the SUPR +website will show all projects you are a member of, +under the 'Projects' tab.

+
+How does the 'Projects' tab of the SUPR website look like? +

An example overview of SUPR projects

+
+

An example overview of SUPR projects

+
+
+

How to convert my project name to an account name for the job scheduler?

+

Here is a simple conversion table:

+ + + + + + + + + + + + + + + + + +
Project nameAccount name for the job scheduler?
NAISS 2024/22-49naiss2024-22-49
sens2017625sens2017625
+

Else, on an UPPMAX cluster do:

+
cd /proj
+ls
+
+

and look for a project folder that resembles the name of your project. +The name of that folder is the name of your account.

+
+How does that look like? +

Here is part of the output:

+
naiss2023-22-57           naiss2024-22-227  snic2015-10-19           snic2018-8-136  snic2020-15-16   snic2021-22-513   snic2022-22-1164  snic2022-5-333    uppstore2019112
+naiss2023-22-570          naiss2024-22-24   snic2015-10-25           snic2018-8-139  snic2020-15-161  snic2021-22-517   snic2022-22-117   snic2022-5-334    uppstore2019113
+naiss2023-22-574          naiss2024-22-244  snic2015-10-8            snic2018-8-14   snic2020-15-162  snic2021-22-521   snic2022-22-1172  snic2022-5-339    uppstore2019114
+naiss2023-22-577          naiss2024-22-247  snic2015-1-142           snic2018-8-141  snic2020-15-163  snic2021-22-522   snic2022-22-1173  snic2022-5-34     uppstore2019115
+naiss2023-22-578          naiss2024-22-253  snic2015-1-164           snic2018-8-143  snic2020-15-164  snic2021-22-525   snic2022-22-1178  snic2022-5-343    uppstore2019117
+naiss2023-22-58           naiss2024-22-257  snic2015-1-176           snic2018-8-144  snic2020-15-165  snic2021-22-526   snic2022-22-1179  snic2022-5-364    uppstore2019118
+naiss2023-22-580          naiss2024-22-26   snic2015-1-177           snic2018-8-145  snic2020-15-17   snic2021-22-529   snic2022-22-1180  snic2022-5-373    uppstore2019119
+naiss2023-22-582          naiss2024-22-270  snic2015-1-201           snic2018-8-146  snic2020-15-172  snic2021-22-530   snic2022-22-1181  snic2022-5-376    uppstore2019120
+naiss2023-22-583          naiss2024-22-275  snic2015-1-204           snic2018-8-147  snic2020-15-173  snic2021-22-535   snic2022-22-1184  snic2022-5-392    uppstore2019121
+naiss2023-22-586          naiss2024-22-281  snic2015-1-228           snic2018-8-148  snic2020-15-175  snic2021-22-537   snic2022-22-1186  snic2022-5-403    uppstore2019123
+naiss2023-22-590          naiss2024-22-282  snic2015-1-242           snic2018-8-149  snic2020-15-177  snic2021-22-538   snic2022-22-1194  snic2022-5-407    uppstore2021-23-134
+naiss2023-22-598          naiss2024-22-295  snic2015-1-259           snic2018-8-15   snic2020-15-178  snic2021-22-541   snic2022-22-1195  snic2022-5-408    uu_1dl550_2021
+naiss2023-22-600          naiss2024-22-299  snic2015-1-268           snic2018-8-150  snic2020-15-179  snic2021-22-544   snic2022-22-1197  snic2022-5-415    uucompbiochem
+naiss2023-22-608          naiss2024-22-3    snic2015-1-281           snic2018-8-151  snic2020-15-18   snic2021-22-546   snic2022-22-1198  snic2022-5-42     var_inf_sim_alex
+naiss2023-22-62           naiss2024-22-301  snic2015-1-315           snic2018-8-152  snic2020-15-182  snic2021-22-547   snic2022-22-12    snic2022-5-423    viher_snic2022
+naiss2023-22-620          naiss2024-22-303  snic2015-1-33            snic2018-8-153  snic2020-15-183  snic2021-22-550   snic2022-22-1200  snic2022-5-428    viscaria_pilot
+naiss2023-22-621          naiss2024-22-305  snic2015-1-345           snic2018-8-154  snic2020-15-185  snic2021-22-554   snic2022-22-1207  snic2022-5-432    vrognas
+naiss2023-22-623          naiss2024-22-307  snic2015-1-364           snic2018-8-155  snic2020-15-186  snic2021-22-555   snic2022-22-1208  snic2022-5-443    wamr
+naiss2023-22-624          naiss2024-22-308  snic2015-1-37            snic2018-8-156  snic2020-15-188  snic2021-22-557   snic2022-22-121   snic2022-5-451    wave_energy_parks
+naiss2023-22-627          naiss2024-22-310  snic2015-1-398           snic2018-8-157  snic2020-15-189  snic2021-22-559   snic2022-22-1214  snic2022-5-454    wheatrnaseq
+naiss2023-22-632          naiss2024-22-319  snic2015-1-399           snic2018-8-158  snic2020-15-19   snic2021-22-56    snic2022-22-1216  snic2022-5-461    wheatrnaseq2
+naiss2023-22-633          naiss2024-22-322  snic2015-1-410           snic2018-8-159  snic2020-15-190  snic2021-22-562   snic2022-22-1224  snic2022-5-466    wiosym
+naiss2023-22-634          naiss2024-22-324  snic2015-1-451           snic2018-8-16   snic2020-15-191  snic2021-22-563   snic2022-22-1227  snic2022-5-484    xfooli
+naiss2023-22-64           naiss2024-22-326  snic2015-1-466           snic2018-8-161  snic2020-15-192  snic2021-22-564   snic2022-22-1228  snic2022-5-503    yeast1000storage
+naiss2023-22-640          naiss2024-22-330  snic2015-1-475           snic2018-8-162  snic2020-15-193  snic2021-22-565   snic2022-22-123   snic2022-5-506    yeast-genomics
+naiss2023-22-648          naiss2024-22-332  snic2015-1-52            snic2018-8-163  snic2020-15-195  snic2021-22-569   snic2022-22-1231  snic2022-5-51     yeast_hybrid_barcode
+naiss2023-22-652          naiss2024-22-339  snic2015-16-12           snic2018-8-164  snic2020-15-196  snic2021-22-570   snic2022-22-1233  snic2022-5-52     zengkun
+naiss2023-22-654          naiss2024-22-341  snic2015-16-27           snic2018-8-165  snic2020-15-197  snic2021-22-571   snic2022-22-1234  snic2022-5-528    zinc22
+naiss2023-22-655          naiss2024-22-345  snic2015-16-34           snic2018-8-166  snic2020-15-198  snic2021-22-572   snic2022-22-1236  snic2022-5-530
+naiss2023-22-658          naiss2024-22-347  snic2015-1-72            snic2018-8-167  snic2020-15-199  snic2021-22-573   snic2022-22-1237  snic2022-5-544
+naiss2023-22-659          naiss2024-22-351  snic2015-1-92            snic2018-8-168  snic2020-15-2    snic2021-22-574   snic2022-22-1238  snic2022-5-548
+naiss2023-22-660          naiss2024-22-354  snic2015-6-101           snic2018-8-169  snic2020-15-20   snic2021-22-579   snic2022-22-1247  snic2022-5-552
+naiss2023-22-662          naiss2024-22-358  snic2015-6-102           snic2018-8-170  snic2020-15-201  snic2021-22-580   snic2022-22-125   snic2022-5-555
+naiss2023-22-665          naiss2024-22-362  snic2015-6-104           snic2018-8-171  snic2020-15-202  snic2021-22-582   snic2022-22-1250  snic2022-5-560
+naiss2023-22-667          naiss2024-22-363  snic2015-6-107           snic2018-8-173  snic2020-15-203  snic2021-22-583   snic2022-22-1253  snic2022-5-568
+naiss2023-22-67           naiss2024-22-375  snic2015-6-109           snic2018-8-175  snic2020-15-204  snic2021-22-584   snic2022-22-1254  snic2022-5-582
+
+
+

Type of UPPMAX projects

+
    +
  • NAISS projects
  • +
  • UPPMAX projects
  • +
  • NGI Delivery projects
  • +
  • Course projects
  • +
+

Apply to an UPPMAX project

+

See the UPPMAX page on a 'Project application' here.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project_apply.mp3 b/getting_started/project_apply.mp3 new file mode 100644 index 000000000..bee6fa092 Binary files /dev/null and b/getting_started/project_apply.mp3 differ diff --git a/getting_started/project_apply/index.html b/getting_started/project_apply/index.html new file mode 100644 index 000000000..a47981042 --- /dev/null +++ b/getting_started/project_apply/index.html @@ -0,0 +1,3305 @@ + + + + + + + + + + + + + + + + + + + + + + + Applying for a Project - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Project application

+

To use UPPMAX resources, one needs:

+ +

Your user account is a personal log-in to our systems. Computer resources like CPU-hours and disk storage are allocated to projects.

+

The workflow is like this:

+
- Register in SUPR
+- Accept SUPR user agreement
+- Become a member of a project
+- Apply for an account at UPPMAX (or other resources)
+
+
+

Warning

+

Note that you can only get a user account on a resource if you belong to a project with allocations there!

+
+

SUPR account

+
+Get a SUPR account +
    +
  • You should visit the national project management platform SUPR and register there.
      +
    • Make sure that you don't already have an account at SUPR. You must not have more than one account in SUPR.
    • +
    +
  • +
  • All steps here.
  • +
+
+
+Accept SUPR user agreement +
    +
  • You must accept the user agreement in SUPR, either online or in paper form.
  • +
  • Details here.
  • +
+
+

Applying for an UPPMAX project (PI)

+

If you are a PI: apply for a project in SUPR.

+ +

Become a member of a project

+

If you are not a PI: Apply for membership in a project you want to join in SUPR, Wait for the PI to accept your application. Alternatively, the PI can add you directly.

+ +
+I just got an UPPMAX project, yet I cannot login to UPPMAX? +

It tends to be a matter of minutes to less than hours +before the changes propagate from SUPR to UPPMAX.

+

If after one night you cannot login, +please contact support.

+
+

Apply for an account at UPPMAX

+

If you don't already have an account at UPPMAX you are ready by now!

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project_apply_bianca/index.html b/getting_started/project_apply_bianca/index.html new file mode 100644 index 000000000..11af28bd5 --- /dev/null +++ b/getting_started/project_apply_bianca/index.html @@ -0,0 +1,3165 @@ + + + + + + + + + + + + + + + + + + + Project application for Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Project application for Bianca

+

To use an UPPMAX cluster, one needs to apply to a project. +This page describes how to apply to a Bianca project.

+

Procedure

+

1. Go to https://supr.naiss.se/

+

Example SUPR NAISS main page

+
+

Example SUPR NAISS main page

+
+

2. Click on 'Rounds'

+

On the main page, click on 'Rounds'

+

On the main page, click on 'Rounds'

+
+

On the main page, click on 'Rounds'

+
+

3. Click on 'Go to NAISS SENS'

+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+
+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+
+

4. Click on 'Go to NAISS SENS Small' for the current year

+

In the 'NAISS SENS Rounds' menu, click on 'Go to NAISS SENS Small' for the +current year:

+

In the 'NAISS SENS Rounds' menu, click on 'Go to NAISS SENS Small' for the current year

+
+

In the 'NAISS SENS Rounds' menu, click on 'Go to NAISS SENS Small' for the current year

+
+

5. Click 'Create New Proposal for NAISS SENS Small' for the current year

+

In the 'Open for Proposals' screen, click 'Create New Proposal for NAISS SENS Small' for the current year

+

In the 'Open for Proposals' screen, click 'Create New Proposal for NAISS SENS Small' for the current year

+
+

In the 'Open for Proposals' screen, click 'Create New Proposal for NAISS SENS Small' for the current year

+
+

6. Add a project title and click 'Create new proposal'

+

In the 'Create New Proposal for NAISS SENS Small 2024', add a project title and click 'Create new proposal'

+

In the 'Create New Proposal for NAISS SENS Small 2024', add a project title and click 'Create new proposal'

+
+

In the 'Create New Proposal for NAISS SENS Small 2024', add a project title and click 'Create new proposal'

+
+

After this, the procedure is straightforward.

+

Resource available for a NAISS SENS Small project

+
+

Resource available for a NAISS SENS Small project

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project_apply_pelle/index.html b/getting_started/project_apply_pelle/index.html new file mode 100644 index 000000000..d4d2f7c6d --- /dev/null +++ b/getting_started/project_apply_pelle/index.html @@ -0,0 +1,3233 @@ + + + + + + + + + + + + + + + + + + + Project application for Pelle - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Project application for Pelle

+

To use an UPPMAX cluster, one needs to apply to a project. +This page describes how to apply to a Pelle project.

+

Procedure

+
+Prefer a video? +

See the YouTube video 'Apply for an UPPMAX Pelle project'

+
+

1. Go to https://supr.naiss.se/

+
+How does that look like? +

Example SUPR NAISS main page

+
+

Example SUPR NAISS main page

+
+
+

2. Click on 'Rounds'

+

On the main page, click on 'Rounds'

+
+How does that look like? +

On the main page, click on 'Rounds'

+
+

On the main page, click on 'Rounds'

+
+
+

3. Click on 'Go to Compute Rounds'

+

In the 'Rounds' menu, click on 'Go to Compute Rounds'

+
+How does that look like? +

In the 'Rounds' menu, click on 'Go to Compute Rounds'

+
+

In the 'Rounds' menu, click on 'Go to Compute Rounds'

+
+
+

4. Click on 'Go to Centre Local Compute'

+

In the 'Compute Rounds' menu, click on 'Go to Centre Local Compute'

+
+How does that look like? +

In the 'Compute Rounds' menu, click on 'Go to Centre Local Compute'

+
+

In the 'Compute Rounds' menu, click on 'Go to Centre Local Compute'

+
+
+

5. Click on 'Go to UPPMAX Local'

+

In the 'Centre Local Compute Rounds' menu, click on 'Go to UPPMAX Local'

+
+How does that look like? +

In the 'Centre Local Compute Rounds' menu, click on 'Go to UPPMAX Local'

+
+

In the 'Centre Local Compute Rounds' menu, click on 'Go to UPPMAX Local'

+
+
+

6. Click on 'Create new proposal'

+

In the 'UPPMAX Local' menu, click on 'Create new proposal'

+
+How does that look like? +

In the 'UPPMAX Local' menu, click on 'Create new proposal'

+
+

In the 'UPPMAX Local' menu, click on 'Create new proposal'

+
+
+

7. Fill in a title and click on 'Create new proposal'

+

In the 'Create new proposal for UPPMAX local' menu, fill in a title and click on 'Create new proposal'

+
+How does that look like? +

In the 'Create new proposal for UPPMAX local' menu, fill in a title and click on 'Create new proposal'

+
+

In the 'Create new proposal for UPPMAX local' menu, fill in a title and click on 'Create new proposal'

+
+
+

You have just created an UPPMAX local compute project!

+
+How does that look like? +

An UPPMAX local compute project

+
+

An UPPMAX local compute project

+
+
+

8. Scroll down and add Pelle

+

In your UPPMAX local compute project, scroll down to 'Resources' and add Pelle.

+
+How does that look like? +

In your UPPMAX local compute project, scroll down to 'Resources' and add Pelle

+
+

In your UPPMAX local compute project, scroll down to 'Resources' and add Pelle

+
+
+

Click on 'Add resource to proposal' to add Pelle as a resource.

+
+How does that look like? +

In your UPPMAX local compute project, click 'Add resource to proposal'

+
+

In your UPPMAX local compute project, click 'Add resource to proposal'

+
+
+

9. Click on 'Create new proposal'

+

In the 'Add resource Pelle' menu, set the number of core hours and click 'Add resource'.

+
+How does that look like? +

In the 'Add resource Pelle' menu, set the number of core hours and click 'Add resource'

+
+

In the 'Add resource Pelle' menu, set the number of core hours and click 'Add resource'

+
+
+

10. Done

+

Now, Pelle is added to your UPPMAX local compute project. Well done!

+
+How does that look like? +

In your UPPMAX local compute project, Pelle is added

+
+

In your UPPMAX local compute project, Pelle is added

+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project_apply_scc/index.html b/getting_started/project_apply_scc/index.html new file mode 100644 index 000000000..f506cbe4d --- /dev/null +++ b/getting_started/project_apply_scc/index.html @@ -0,0 +1,3227 @@ + + + + + + + + + + + + + + + + + + + Project application for SCC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Project application for SCC

+

To use an UPPMAX cluster, one needs to apply to a project. +This page describes how to apply to a SCC project.

+

Procedure

+
+Prefer a video? +

...

+
+

1. Go to https://supr.naiss.se/

+
+How does that look like? +

Example SUPR NAISS main page

+
+

Example SUPR NAISS main page

+
+
+

2. Click on 'Rounds'

+

On the main page, click on 'Rounds'

+
+How does that look like? +

On the main page, click on 'Rounds'

+
+

On the main page, click on 'Rounds'

+
+
+

3. Click on 'Go to Swedish Science Cloud'

+

In the 'Rounds' menu, click on 'Go to Swedish Science Cloud'

+
+How does that look like? +

In the 'Rounds' menu, click on 'Go to Swedish Science Cloud'

+
+

In the 'Rounds' menu, click on 'Go to Swedish Science Cloud'

+
+
+

4. Click on 'Go to Swedish Science Cloud'

+

In the 'Cloud resource' menu, click on 'Go to NAISS Small Compute 2025'.

+
+How does that look like? +

In the 'Cloud resource' menu, click on 'Go to NAISS Small Compute 2025'

+
+

In the 'Cloud resource' menu, click on 'Go to NAISS Small Compute 2025'

+
+
+

5. Click on 'Create new proposal for NAISS Small Compute'

+

In the 'Small Compute Rounds' menu, click on 'Create new proposal for NAISS Small Compute'.

+
+How does that look like? +

In the 'Small Compute Rounds' menu, click on 'Create new proposal for NAISS Small Compute'

+
+

In the 'Small Compute Rounds' menu, click on 'Create new proposal for NAISS Small Compute'

+
+
+

6. Add a project title and click on 'Create new proposal'

+

In the 'Create new proposal' menu, add a project title and click on 'Create new proposal'

+
+How does that look like? +

In the 'Create new proposal' menu, add a project title and click on 'Create new proposal'

+
+

In the 'Create new proposal' menu, add a project title and click on 'Create new proposal'

+
+
+

7. Scroll down to 'Resources'

+

In this NAISS project proposal page, scroll down to 'Resources'.

+
+How does that look like? +

In this NAISS project proposal page, scroll down to 'Resources'

+
+

In this NAISS project proposal page, scroll down to 'Resources'

+
+
+

8. Select 'Cloud @ SCC'

+

In the 'Resources' dropbox, Select 'Cloud @ SCC'.

+
+How does that look like? +

In the 'Resources' dropbox, Select 'Cloud @ SCC'

+
+

In the 'Resources' dropbox, Select 'Cloud @ SCC'

+
+
+

9. Set the amount of coins and click 'Add Resource'

+

At the 'Add resource Cloud' page, set the amount of coins and click 'Add Resource'.

+
+How does that look like? +

At the 'Add resource Cloud' page, set the amount of coins and click 'Add Resource'.

+
+

At the 'Add resource Cloud' page, set the amount of coins and click 'Add Resource'.

+
+
+

The resource is now added to your project.

+
+How does that look like? +

'Resource Cloud added to proposal'

+
+

'Resource Cloud added to proposal'

+
+
+

10. Click 'Submit proposal'

+

In this NAISS project proposal page, +after all other details are filled in, +scroll down and click on 'Submit proposal'

+
+How does that look like? +

In this NAISS project proposal page, scroll down and click on 'Submit proposal'

+
+

In this NAISS project proposal page, scroll down and click on 'Submit proposal'

+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/project_apply_simpler/index.html b/getting_started/project_apply_simpler/index.html new file mode 100644 index 000000000..f0b32205f --- /dev/null +++ b/getting_started/project_apply_simpler/index.html @@ -0,0 +1,3171 @@ + + + + + + + + + + + + + + + + + + + SIMPLER project application - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

SIMPLER project application

+
+

SIMPLER is an abbreviation

+

SIMPLER is an abbreviation of +'Swedish Infrastructure for Medical Population-based Life-course +and Environmental Research'. +It does not meant to indicate that this is easier.

+
+

To use an UPPMAX cluster, one needs to apply to a project. +This page describes how to apply for a SIMPLER project.

+

Procedure

+
+Prefer a video? +

View the YouTube video that shows this procedure

+
+

1. Go to https://supr.naiss.se/

+

Example SUPR NAISS main page

+
+

Example SUPR NAISS main page

+
+

2. Click on 'Rounds'

+

On the main page, click on 'Rounds'

+

On the main page, click on 'Rounds'

+
+

On the main page, click on 'Rounds'

+
+

3. Click on 'Go to NAISS SENS'

+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+
+

In the 'Rounds' menu, click on 'Go to NAISS SENS'

+
+

4. Click on 'Go to SIMPLER' for the current year

+

In the 'Rounds' menu, click on 'Go to SIMPLER' for the current year.

+

In the 'Rounds' menu, click on 'Go to SIMPLER'

+
+

In the 'Rounds' menu, click on 'Go to SIMPLER' for the current year.

+
+

5. Click 'Create New Proposal for SIMPLER' for the current year

+

In the 'Open for Proposals' screen, click 'Create New Proposal for SIMPLER' for the current year

+

In the 'Open for Proposals' screen, click 'Create New Proposal for SIMPLER' for the current year

+
+

In the 'Open for Proposals' screen, click 'Create New Proposal for SIMPLER' for the current year

+
+

6. Add a project title and click 'Create new proposal'

+

In the 'Create New Proposal for SIMPLER 2024', add a project title and click 'Create new proposal'

+

In the 'Create New Proposal for SIMPLER 2024', add a project title and click 'Create new proposal'

+
+

In the 'Create New Proposal for SIMPLER 2024', add a project title and click 'Create new proposal'

+
+

After this, the procedure is straightforward.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/rackham_usage_prerequisites/index.html b/getting_started/rackham_usage_prerequisites/index.html new file mode 100644 index 000000000..2ccdc1388 --- /dev/null +++ b/getting_started/rackham_usage_prerequisites/index.html @@ -0,0 +1,3229 @@ + + + + + + + + + + + + + + + + + + + Prerequisites for using Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Prerequisites for using Rackham

+

To be allowed to log in to Rackham, +one needs all of these:

+ +

These prerequisites are discussed in detail below.

+

An active research project

+

One prerequisite for using Rackham +is that you need to be a member of an active SNIC +or SIMPLER research project (these can have many names such as uppmax[number], +snic[number] ornaiss[number]), +where [number] represent a number, for example uppmax2021-2-1, snic2022-6-230 or naiss2023-6-382).

+
+Forgot your Rackham projects? +

How to see you research projects is described at research projects.

+

Spoiler: go to https://supr.naiss.se

+
+

SUPR (the 'Swedish User and Project Repository') +is the website that allows one to request access to Rackham +and to get an overview of the requested resources.

+
+How does the SUPR website look like? +

First SUPR page

+
+

First SUPR page

+
+

SUPR 2FA login

+
+

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

+
+
+

After logging in, the SUPR +website will show all projects you are a member of, +under the 'Projects' tab.

+
+How does the 'Projects' tab of the SUPR website look like? +

Example overview of SUPR projects

+
+

Example overview of SUPR projects

+
+
+

To see if a project has access to Rackham, click on the +project and scroll to the 'Resources' section. In the 'Compute' subsection, +there is a table. Under 'Resource' it should state 'Rackham @ UPPMAX'.

+
+How does the 'Resources' page of an example project look like? +

The 'Resources' page of an example project

+
+

The 'Resources' page of an example project. This project has two compute +resources and two storage resources.

+
+
+

Note that the 'Accounts' tab can be useful to verify your username.

+
+How does the 'Accounts' tab help me find my username? +

An example of a SUPR 'Accounts' tab

+
+

An example of a SUPR 'Accounts' tab. +The example user has username sven-sens2023598, +which means his/her UPPMAX username is sven

+
+
+

You can become a member of an active SNIC SENS by:

+
    +
  • request membership to an existing project in SUPR
  • +
  • create a project. See the UPPMAX page on + how to submit a project application here
  • +
+

An UPPMAX user account

+

Another prerequisite for using Rackham +is that you must have a personal UPPMAX user account.

+

An UPPMAX password

+

Another prerequisite for using Rackham +is that you need to know your UPPMAX password. +See how to reset and set your UPPMAX password +to do so.

+

An UPPMAX 2FA

+

Another prerequisite for using Rackham, +but only for the Rackham remote desktop website) +is to have an UPPMAX 2FA. +See how to get an UPPMAX 2FA

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/reset_uppmax_password/index.html b/getting_started/reset_uppmax_password/index.html new file mode 100644 index 000000000..f4b9ebcee --- /dev/null +++ b/getting_started/reset_uppmax_password/index.html @@ -0,0 +1,3177 @@ + + + + + + + + + + + + + + + + + + + Reset your UPPMAX password - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Reset your UPPMAX password

+
+Prefer a video? +

See the YouTube video 'How to reset your UPPMAX password'

+
+

Procedure

+

1. Go to https://suprintegration.uppmax.uu.se/getpasswd

+

Go to https://suprintegration.uppmax.uu.se/getpasswd. +After authenticating yourself, you're password is reset immediately.

+

You will be sent an email in around 5 minutes.

+

2. Open email

+

Open the email and click on the link that it suggests you to click.

+
+How does that email look like? +

Your email will look similar to this:

+
Greetings,
+
+a new password has been generated for your account at UPPMAX.
+
+You can fetch it by visiting the link below.
+Note though that the link is only valid for 7 days and one (1) visit.
+
+You can retrieve the password at the following link:
+
+https://content.uppmax.uu.se/get-password2.php?sum=hvs.CAESIGOczS0[more_letters]
+
+If the password has expired, you can request a new password from our homepage
+https://www.uppmax.uu.se and the link "Lost your password?".
+
+Note that if you requested a new password because your account was locked,
+it may take some additional time (up to an hour) before that change is
+reflected everywhere.
+
+If you are unsure about what your user name is, this information is available
+in SUPR (https://supr.snic.se/) under Accounts.
+
+For general information about how to login, change your password and
+so on, please see our getting started guide at
+
+http://www.uppmax.uu.se/support/user-guides/guide--first-login-to-uppmax/
+
+regards, UPPMAX Support
+
+
+VARNING: Klicka inte på länkar och öppna inte bilagor om du inte känner igen avsändaren och vet att innehållet är säkert.
+CAUTION: Do not click on links or open attachments unless you recognise the sender and know the content is safe.
+
+

In this example, +https://content.uppmax.uu.se/get-password2.php?sum=hvs.CAESIGOczS0[more_letters] +is the link you should click

+
+

This will take you to a page with your new password.

+

3. Log in with your new password

+

At the page with your new password, you use that password to log in.

+

4. (optional) Change your password

+

If you want to change your password, see +How to change your UPPMAX password.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/setup_vpn/index.html b/getting_started/setup_vpn/index.html new file mode 100644 index 000000000..6f21e4f77 --- /dev/null +++ b/getting_started/setup_vpn/index.html @@ -0,0 +1,3134 @@ + + + + + + + + + + + + + + + + + + + Setup a VPN - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Setup a VPN

+

Some UPPMAX clusters require you +to have an IP address inside of SUNET. +A virtual private network (VPN) allows one to do so: it will use the +Swedish university networks to connect to the UPPMAX clusters.

+

To be able to use a VPN to get inside of SUNET:

+
    +
  • For Uppsala University: +
  • +
  • For Lund University: go to this page
  • +
  • For other Swedish universities, search their websites to get the required VPN credentials.
  • +
+
+Where do I go if I am no longer affiliated with a Swedish university? +

+ + +

+

In this case, one cannot use a VPN. Instead, log in to.

+

This is yet unknown. Please contact support.

+
+
+Want a video to see how the UU VPN is used? + +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/setup_vpn_uu_linux/index.html b/getting_started/setup_vpn_uu_linux/index.html new file mode 100644 index 000000000..0f7411f88 --- /dev/null +++ b/getting_started/setup_vpn_uu_linux/index.html @@ -0,0 +1,3129 @@ + + + + + + + + + + + + + + + + + + + Setup a VPN from Uppsala University for Linux - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Setup a VPN from Uppsala University for Linux

+

How to set up a VPN differs between universities +and differs between operating systems. +This page describes how to set up a VPN from Uppsala University for Linux.

+

Procedure

+

Here is the procedure, as suggested by UIT:

+

Setup a VPN from Uppsala University for Linux 1

+

Setup a VPN from Uppsala University for Linux 2

+

Setup a VPN from Uppsala University for Linux 3

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/snowy_usage_prerequisites/index.html b/getting_started/snowy_usage_prerequisites/index.html new file mode 100644 index 000000000..911704968 --- /dev/null +++ b/getting_started/snowy_usage_prerequisites/index.html @@ -0,0 +1,3216 @@ + + + + + + + + + + + + + + + + + + + Prerequisites for using Snowy - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Prerequisites for using Snowy

+

To be allowed to log in to Snowy, +one needs all of these:

+ +

These prerequisites are discussed in detail below.

+

An active research project

+

One prerequisite for using Snowy +is that you need to be a member of an active SNIC +or SIMPLER research project (these can have many names such as uppmax[number], +snic[number] ornaiss[number]), +where [number] represent a number, for example uppmax2021-2-1, snic2022-6-230 or naiss2023-6-382).

+
+Forgot your Snowy projects? +

How to see you research projects is described at research projects.

+

Spoiler: go to https://supr.naiss.se

+
+

SUPR (the 'Swedish User and Project Repository') +is the website that allows one to request access to Snowy +and to get an overview of the requested resources.

+
+How does the SUPR website look like? +

First SUPR page

+
+

First SUPR page

+
+

SUPR 2FA login

+
+

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

+
+
+

After logging in, the SUPR +website will show all projects you are a member of, +under the 'Projects' tab.

+
+How does the 'Projects' tab of the SUPR website look like? +

Example overview of SUPR projects

+
+

Example overview of SUPR projects

+
+
+

To see if a project has access to Snowy, click on the +project and scroll to the 'Resources' section. In the 'Compute' subsection, +there is a table. Under 'Resource' it should state 'Snowy @ UPPMAX'.

+
+How does the 'Resources' page of an example project look like? +

The 'Resources' page of an example project

+
+

The 'Resources' page of an example project. This project has two compute +resources and two storage resources. +A Snowy project would show the word 'Snowy' somewhere, +so this is not a Snowy project.

+
+
+

Note that the 'Accounts' tab can be useful to verify your username.

+
+How does the 'Accounts' tab help me find my username? +

An example of a SUPR 'Accounts' tab

+
+

An example of a SUPR 'Accounts' tab. +The example user has username sven-sens2023598, +which means his/her UPPMAX username is sven

+
+
+

You can become a member of an active SNIC SENS by:

+
    +
  • request membership to an existing project in SUPR
  • +
  • create a project. See the UPPMAX page on + how to submit a project application here
  • +
+

An UPPMAX user account

+

Another prerequisite for using Snowy +is that you must have a personal UPPMAX user account.

+

An UPPMAX password

+

Another prerequisite for using Snowy +is that you need to know your UPPMAX password. +If you change it, it may take up to an hour before changes are reflected in Snowy.

+

For advice on handling sensitive personal data correctly on Snowy, see our FAQ page.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/storage/index.html b/getting_started/storage/index.html new file mode 100644 index 000000000..6a717f383 --- /dev/null +++ b/getting_started/storage/index.html @@ -0,0 +1,3122 @@ + + + + + + + + + + + + + + + + + + + Data storage - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data storage

+ +
    +
  • UU guide on data storage https://www.uu.se/en/staff/gateway/research/research-handbook/research-data/store-data-and-cooperate (broken link)
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/supr/index.html b/getting_started/supr/index.html new file mode 100644 index 000000000..297787935 --- /dev/null +++ b/getting_started/supr/index.html @@ -0,0 +1,3152 @@ + + + + + + + + + + + + + + + + + + + SUPR - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

SUPR

+

SUPR ('Swedish User and Project Repository') is a website +at https://supr.naiss.se/ that +manages HPC accounts and projects.

+

Apply for an UPPMAX project

+

See the UPPMAX page on how to apply for an UPPMAX project.

+

Setting up an UPPMAX 2FA token

+

Go to https://suprintegration.uppmax.uu.se/bootstrapotp/ +to initiate the token +creation. This should take you to a landing page with some initial +information and let you know that you will be sent to SUPR to log in.

+
+How does that look like? +

SUPR request 2FA

+
+

Once you click "Continue", you'll be sent to SUPR where you should log in. +Once you've done so SUPR will let you know that you will be sent back +and the identity you are logged in with.

+
+For staff only +

SUPR API documentation, +requires the same certificate as RT

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/supr_register/index.html b/getting_started/supr_register/index.html new file mode 100644 index 000000000..b2556f099 --- /dev/null +++ b/getting_started/supr_register/index.html @@ -0,0 +1,3150 @@ + + + + + + + + + + + + + + + + + + + Register at SUPR - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Register at SUPR

+
    +
  • Go to https://supr.naiss.se/person/register/
  • +
  • If you already have an account you must use that account, otherwise you can register with or without SWAMID by clicking on the appropriate button.
  • +
+

register new person

+
    +
  • If you clicked on register via SWAMID you will have to choose the university that you belong to. Here as an example I choose Uppsala University:
  • +
+

Find institution

+
    +
  • Click on the University and then choose if you want to login via this SWAMID once
  • +
  • or if your browser should remember your choice and use every time you visit SUPR
  • +
  • Here again as an example is Uppsala University:
  • +
+

SUPR login

+
    +
  • You can now check the information your university has sent to SUPR and accept it to create a SUPR account.
  • +
  • If you instead click on Register without SWAMID you will have to fill in this:
  • +
+

Registration form

+
    +
  • Fill in the form and click the button to create your SUPR account.
  • +
  • You have to confirm your account by answering an email sent to the address you registered.
  • +
+

Accept the User Agreement

+
    +
  • After logging into your SUPR account you must accept the user agreement. Click on Handle User Agreement
  • +
+

User agreement

+
    +
  • Depending on how you take care of the User Agreement, it may be approved automatically or it may require manual checking (for example if you choose to use the paper form). You will get an email from SUPR when it has been approved.
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/user_account/index.html b/getting_started/user_account/index.html new file mode 100644 index 000000000..218b6709f --- /dev/null +++ b/getting_started/user_account/index.html @@ -0,0 +1,3173 @@ + + + + + + + + + + + + + + + + + + + UPPMAX User Accounts - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX User Accounts

+

An UPPMAX user account is needed to use UPPMAX +resources (together with an active UPPMAX research project) +and allows you to log in into the UPPMAX clusters.

+

Apply to an UPPMAX user account

+

To apply for an UPPMAX user account, you (the user) +and the PI of the project (the researcher +in charge of the research project) must complete the following steps:

+
    +
  • You should visit the national project management platform SUPR and register there. Instructions here.
  • +
  • Make sure that you don't already have an account at SUPR. + You must not have more than one account in SUPR.
  • +
  • You must accept the user agreement in SUPR, either online or in paper form. Details here.
  • +
  • Become a member of a project:
      +
    • If you are a PI: apply for a project in SUPR. Details here.
    • +
    • If you are not a PI: Apply for membership in a project you want to join in SUPR, Wait for the PI to accept your application. Alternatively, the PI can add you directly. Join an existing project
    • +
    +
  • +
  • You must apply for an account at UPPMAX in SUPR.
  • +
+

Apply for an account at UPPMAX

+
    +
  • When the PI has accepted your membership application. You will receive an email.
  • +
  • +

    Log in to SUPR and click on Accounts in the list to the left.

    +
  • +
  • +

    You will see the login accounts you already have at other NAISS centres if you have any. Under the "Possible Resource Account Requests" headings you find the UPPMAX resources you can apply for login account on. Just use the "Request Account on UPPMAX" button.

    +
  • +
+

Accounts

+
    +
  • You can then request a username. Then click Request Account
  • +
+

Accounts

+
    +
  • +

    After applying it might take up to 2 working days before you receive 2 emails with information on how to login to UPPMAX.

    +
  • +
  • +

    If you have any questions please contact us through the Support Form on how to access the UPPMAX resources.

    +
  • +
+
+

Note

+

After the 4 steps are completed your account will be created at UPPMAX within 2 working days and you will receive two emails with information

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/getting_started/why_2fa/index.html b/getting_started/why_2fa/index.html new file mode 100644 index 000000000..36374ed2b --- /dev/null +++ b/getting_started/why_2fa/index.html @@ -0,0 +1,3199 @@ + + + + + + + + + + + + + + + + + + + Why is 2FA important? - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Why is 2FA important?

+

By requiring a second factor, typically a cell phone or other physical device, +it becomes much harder for an attacker to gain access to your account +if they somehow have gotten hold of your password +(which in this case is the 1:st factor).

+

For security reasons you will have to use a two factor authentication system +if you are a) connecting to UPPMAX from outside of Sweden, or b) connecting +from a network within Sweden that does not support forward and reverse DNS +lookups (due to a misconfiguration in your network, you can ask your +internet service provider about this). More information about why can be +found below.

+

From outside Sweden

+

If you try to connect directly to our resources from computers outside Sweden +you will most likely be required to set up and use two factor +authentication (you will be asked for a code from your second factor +automatically if required).

+

Another alternative, if you need to access UPPMAX from outside Sweden, +may be to use a Swedish VPN service. +For example, if you're employed at Uppsala University, +then you can connect using the university's VPN service.

+

From within Sweden

+

If you are required to use two factor authentication, +and are connecting from a computer in Sweden, this is typically caused by +your computer not having a proper DNS name, or the forward and reverse name +resolution do not match.

+
+Why is that important? +

See here

+
+

If this is the case, please contact your ISP and ask them to correct this.

+

Note

+

You can check forward and reverse name resolution on this webpage:

+ +

To see what address the other side thinks you come from (which will likely be what our systems see), services like

+ +

can be helpful.

+

On Linux, you can also use these commands:

+
    +
  • Forward resolution: host mycomputername.domain.tld. + You have to replace mycomputername.domain.tld + with your computers actual name, for example:
  • +
+
host rackham2.uppmax.uu.se
+
+

will give:

+
rackham2.uppmax.uu.se has address 89.44.250.83
+
+
    +
  • Reverse resolution: host my_ipnumber. + You have to replace my_ipnumber with your computers actual IP number, + for example:
  • +
+
host 89.44.250.83t
+
+

which should give something similar to:

+
89.44.250.83.in-addr.arpa domain name pointer tintin1.uppmax.uu.se
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/clusters/bianca/index.html b/hardware/clusters/bianca/index.html new file mode 100644 index 000000000..219e67e09 --- /dev/null +++ b/hardware/clusters/bianca/index.html @@ -0,0 +1,3265 @@ + + + + + + + + + + + + + + + + + + + Bianca hardware - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Bianca hardware

+

Technical Summary

+
    +
  • 204 compute nodes with single or dual CPUs and one 4TB mechanical drive or 1TB SSD
  • +
  • Each CPU has 8 cores
  • +
  • 75 compute nodes, 256 GB memory each.
  • +
  • 15 compute nodes, 512 GB memory each
  • +
  • 10 compute nodes each equipped with 2xNVIDIA A100 (40GB) GPUs
  • +
  • Total number of CPU cores is 4800
  • +
  • Login nodes have 2vCPU each and 16GB memory
  • +
  • Dual 10 Gigabit Ethernet for all nodes
  • +
+

Parameters

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterBianca
PurposeSensitive data
Reserved forNAISS-SENS projects
Nodes (Intel)272 + 4 nodes á 2 NVIDIA A100 GPUs
Cores per node16/64
Memory per node128GB
Fat nodes256 & 512GB
Local disk (scratch)4TB
NetworkDual 10Gbit/s
Operating SystemCentOS 7
Login nodesYes (2 cores and 15 GB)
"Home" storageCastor
"Project" StorageCastor
+

CPU

+

GPU

+

Network

+

Storage

+

Security

+

Since Bianca is designed to handle sensitive personal data security is a key aspect of the configuration. In order to ensure that the data is safe we have implemented a series of security measures including, but not limited to:

+
    +
  • One virtualized cluster per project, no resources are shared between projects.
  • +
  • Separate storage volumes per project.
  • +
  • Detailed logging of file transfers in and out of the cluster.
  • +
  • Two factor authentication
  • +
  • No internet access inside the clusters.
  • +
  • Locked racks for the hardware
  • +
  • Destruction of broken hard drives
  • +
+

Uppsala University has decided on the following KRT classifications for Bianca:

+
    +
  • 321 for project directories
  • +
  • 322 for home directories
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/clusters/miarka/index.html b/hardware/clusters/miarka/index.html new file mode 100644 index 000000000..0a5aa7dfa --- /dev/null +++ b/hardware/clusters/miarka/index.html @@ -0,0 +1,3096 @@ + + + + + + + + + + + + + + + + + + + Miarka - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Miarka

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/clusters/rackham/index.html b/hardware/clusters/rackham/index.html new file mode 100644 index 000000000..42f9de505 --- /dev/null +++ b/hardware/clusters/rackham/index.html @@ -0,0 +1,3199 @@ + + + + + + + + + + + + + + + + + + + Rackham hardware - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Rackham hardware

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NodesCPUsCoresMemoryScratchGPUsNameComment
2722x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz)20 (2 x 10)128GB3/4TBN/Ar33-r304.
322x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz)20 (2 x 10)256GB3/4TBN/Ar1-r32.
42x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz)20 (2 x 10)1TB3/4TBN/A?.
42x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz)20 (2 x 10)256GB3/4TBNvidia Quatro K2200rackham1-rackham3Login nodes
+

The Rackham cluster was introduced in February 2017. Rackham is a NAISS resource +and is estimated to be in production until first of January 2023. The major +features of Rackham and its storage system Crex is found below. For more +technical data please see the end of this article.

+

CPU

+

Network

+

Storage

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/clusters/snowy/index.html b/hardware/clusters/snowy/index.html new file mode 100644 index 000000000..7c01dbaaa --- /dev/null +++ b/hardware/clusters/snowy/index.html @@ -0,0 +1,3225 @@ + + + + + + + + + + + + + + + + + + + Snowy hardware - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Snowy hardware

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NodesCPUsCoresMemoryScratchGPUsNameComment
1222x Xeon E5-2660 2.2 GHz16 (2 x 8)128GB3/4TBN/As1-s12, s14-s40, s42-s120, s201-s204.
492x Xeon E5-2660 2.2 GHz16 (2 x 8)128GB3/4TBTesla T4s151-s163, s164-s200.
152x Xeon E5-2660 2.2 GHz16 (2 x 8)512GB3/4TBN/As121-s129, s131, s133-s137.
122x Xeon E5-2660 2.2 GHz16 (2 x 8)256GB3/4TBN/As139-s150.
12x Xeon E5-2660 2.2 GHz80 (10 x 8)4TB3/4TBN/As229.
12x Xeon E5-2660 2.2 GHz16 (2 x 8)256GB3/4TBTesla T4s138.
+

CPU

+

GPU

+

Network

+

Storage

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/clusters/uppmax_cloud/index.html b/hardware/clusters/uppmax_cloud/index.html new file mode 100644 index 000000000..afb98b2c4 --- /dev/null +++ b/hardware/clusters/uppmax_cloud/index.html @@ -0,0 +1,3096 @@ + + + + + + + + + + + + + + + + + + + Uppmax cloud - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Uppmax cloud

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/overview/index.html b/hardware/overview/index.html new file mode 100644 index 000000000..7292bdcbd --- /dev/null +++ b/hardware/overview/index.html @@ -0,0 +1,3208 @@ + + + + + + + + + + + + + + + + + + + Hardware overview - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Hardware overview

+

This page describes the hardware architecture of the different compute clusters +at UPPMAX as well as their storage systems.

+

UPPMAX is part of the National Academic Infrastructure for +Supercomputing in Sweden (NAISS).

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterRackhamSnowyBiancaUPPMAX Cloud
PurposeGeneral-purposeGeneral-purposeSensitive dataIaaS
Reserved forNAISS projectsUppsala researchers and course projectsSee BiancaNAISS and local projects
Nodes (Intel)486+144228 + 50 N vidia T4 GPUsSee Bianca40 + 20 A2 and 4 T4 Nvidia GPUs
Cores per node20/1616See Bianca16
Memory per node128GB128GBSee Bianca128/256GB
Fat nodes256GB & 1TB256, 512 GB & 4TBSee BiancaN/A
Local disk (scratch)2/3TB4TBSee BiancaN/A
NetworkInfiniBand FDR 56Gbit/sInfiniBand FDR 40Gbit/ sSee Bianca10GbE
Operating SystemCentOS 7CentOS 7See BiancaLinux cloud image
Login nodesYesNo (reached from Rackham)See BiancaN/A
"Home" storageDomusDomusSee BiancaN/A
"Project" StorageCrex, LutraCrex, LutraSee BiancaN/A
+

The storage systems we have provide a total volume of about 20 PB, the +equivalent of nearly 15 billion 3.5-inch floppy disks or 40,000 years of +128-bit encoded music.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/castor/index.html b/hardware/storage/castor/index.html new file mode 100644 index 000000000..a8942bf0a --- /dev/null +++ b/hardware/storage/castor/index.html @@ -0,0 +1,3110 @@ + + + + + + + + + + + + + + + + + + + Castor - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Castor

+

UPPMAX has many storage systems. +This page describes the Castor storage system.

+

Castor is a custom built storage system running GlusterFS dedicated to +Bianca. +The system consists of 54 Huawei 5288 V3 servers, each server is equipped with +36 x 3TB SATA disks working as one logical volume (with redundancy) and +providing 109TB raw disk space per one server. This gives about 5,7 PB raw disk +space in total. Each storage server is connected to network with 2 x 40 Gbit/s +Ethernet links working as one aggregated link at 80 Gbit/s.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/crex/index.html b/hardware/storage/crex/index.html new file mode 100644 index 000000000..c695183b8 --- /dev/null +++ b/hardware/storage/crex/index.html @@ -0,0 +1,3109 @@ + + + + + + + + + + + + + + + + + + + Crex - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Crex

+

UPPMAX has many storage systems. +This page describes the Crex storage system.

+

Rackham and Snowy's storage system is a DDN (DataDirect Networks) EXAScaler +filesystem based on the ES14KX platform. Crex uses 840 10TB NL-SAS drives and +24 300GB SAS drives for metadata storage. The total volume is 6 PB, with 1 PB +reserved for SciLifeLab, 4.5 PB reserved for SNIC projects, and 0.5 PB for +UPPMAX use. The filesystem is Lustre, a highly scalable filesystem common in +HPC.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/cygnus/index.html b/hardware/storage/cygnus/index.html new file mode 100644 index 000000000..86462f659 --- /dev/null +++ b/hardware/storage/cygnus/index.html @@ -0,0 +1,3105 @@ + + + + + + + + + + + + + + + + + + + Cygnus - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Cygnus

+

UPPMAX has many storage systems. +This page describes the Cygnus storage system.

+

Cygnus is a DDN Secure Lustre file system for +Bianca.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/domus/index.html b/hardware/storage/domus/index.html new file mode 100644 index 000000000..865352b46 --- /dev/null +++ b/hardware/storage/domus/index.html @@ -0,0 +1,3106 @@ + + + + + + + + + + + + + + + + + + + Domus - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Domus

+

UPPMAX has many storage systems. +This page describes the Domus storage system.

+

Domus hosts the home directories and some common system directories, e.g. the +software catalogue. The system is a NetApp totalling 100 TB on 96 SAS 10K +disks, supports snapshots, and has off-site backup.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/lutra/index.html b/hardware/storage/lutra/index.html new file mode 100644 index 000000000..a78f3492d --- /dev/null +++ b/hardware/storage/lutra/index.html @@ -0,0 +1,3115 @@ + + + + + + + + + + + + + + + + + + + Lutra - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Lutra

+

UPPMAX has many storage systems. +This page describes the Lutra storage system.

+

Lutra is a custom built storage system running GlusterFS. The system consist of +6 Huawei 5288 V5 servers with a total of 6x38 10TB SATA-drives for a capacity +of 2.2 PB. The usable disk space is 1.8PB. Lutra is meant for "offload" or +archive storage and available for all users at a cost of (at this moment) 500 +SEK/TB/year, for a commitment of four years and a minimum 50TB. The design and +filesystem choice makes Lutra very scalable, cost efficient while retaining +moderate read/write performance. Lutra is connected to +Rackham and +Snowy for +general availability.

+

If you are interested in this type of storage please +contact support.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/spirula/index.html b/hardware/storage/spirula/index.html new file mode 100644 index 000000000..01627169c --- /dev/null +++ b/hardware/storage/spirula/index.html @@ -0,0 +1,3105 @@ + + + + + + + + + + + + + + + + + + + Spirula - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Spirula

+

UPPMAX has many storage systems. +This page describes the Spirula storage system.

+

The DDLS-funded SciLifeLab FAIR Data Storage system, Spirula, +is runs Ceph-based Object Storage.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/hardware/storage/vulpes/index.html b/hardware/storage/vulpes/index.html new file mode 100644 index 000000000..dfce67b30 --- /dev/null +++ b/hardware/storage/vulpes/index.html @@ -0,0 +1,3104 @@ + + + + + + + + + + + + + + + + + + + Vulpes - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Vulpes

+

UPPMAX has many storage systems. +This page describes the Vulpes storage system.

+

Lupus provided storage for Miarka.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/img/Bianca-transfer.png b/img/Bianca-transfer.png new file mode 100644 index 000000000..5b6d3b7cf Binary files /dev/null and b/img/Bianca-transfer.png differ diff --git a/img/c_557912-l_1-k_cram_compression.png b/img/c_557912-l_1-k_cram_compression.png new file mode 100644 index 000000000..0989d34ae Binary files /dev/null and b/img/c_557912-l_1-k_cram_compression.png differ diff --git a/img/dds-cli.png b/img/dds-cli.png new file mode 100644 index 000000000..12b5beb47 Binary files /dev/null and b/img/dds-cli.png differ diff --git a/img/filezilla-snapshot.png b/img/filezilla-snapshot.png new file mode 100644 index 000000000..6ce5ccc86 Binary files /dev/null and b/img/filezilla-snapshot.png differ diff --git a/img/mac_utf8.png b/img/mac_utf8.png new file mode 100644 index 000000000..02ddec43f Binary files /dev/null and b/img/mac_utf8.png differ diff --git a/img/unused/worst.jpg b/img/unused/worst.jpg new file mode 100644 index 000000000..148f08f20 Binary files /dev/null and b/img/unused/worst.jpg differ diff --git a/img/winscp-snaphot.png b/img/winscp-snaphot.png new file mode 100644 index 000000000..063cb3715 Binary files /dev/null and b/img/winscp-snaphot.png differ diff --git a/img/winscp-snaphot1.png b/img/winscp-snaphot1.png new file mode 100644 index 000000000..d03e12657 Binary files /dev/null and b/img/winscp-snaphot1.png differ diff --git a/index.html b/index.html new file mode 100644 index 000000000..a86c6dc4c --- /dev/null +++ b/index.html @@ -0,0 +1,3112 @@ + + + + + + + + + + + + + + + + + + + + + UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + + + + + + +
+ +
+ +

+



+

drawing

+


+

Welcome to the UPPMAX documentation

+


+

Get support +Give anonymous feedback

+

Apply for a project

+

Getting started + Software + Reset your UPPMAX password + Migration to Dardel

+

Go to the UPPMAX main page

+



+

+ + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +
+
+ + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/hpc2n/index.html b/naiss/hpc2n/index.html new file mode 100644 index 000000000..f8413717a --- /dev/null +++ b/naiss/hpc2n/index.html @@ -0,0 +1,3114 @@ + + + + + + + + + + + + + + + + + + + HPC2N - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

HPC2N

+
+

Info

+

This page is here temporarily, until its content is moved +to a better place.

+
+

NAISS has many HPC centers. +HPC2N is one of those.

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/index.html b/naiss/index.html new file mode 100644 index 000000000..c3680b6d7 --- /dev/null +++ b/naiss/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + NAISS - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

NAISS

+
+

Info

+

This page is here temporarily, until its content is moved +to a better place.

+
+

NAISS does stuff.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/login_cosmos/index.html b/naiss/login_cosmos/index.html new file mode 100644 index 000000000..7ce21615a --- /dev/null +++ b/naiss/login_cosmos/index.html @@ -0,0 +1,3128 @@ + + + + + + + + + + + + + + + + + + + Login COSMOS - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/login_kebnekaise/index.html b/naiss/login_kebnekaise/index.html new file mode 100644 index 000000000..28e65f363 --- /dev/null +++ b/naiss/login_kebnekaise/index.html @@ -0,0 +1,3129 @@ + + + + + + + + + + + + + + + + + + + Login Kebnekaise - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/login_tetralith/index.html b/naiss/login_tetralith/index.html new file mode 100644 index 000000000..285fa56bc --- /dev/null +++ b/naiss/login_tetralith/index.html @@ -0,0 +1,3128 @@ + + + + + + + + + + + + + + + + + + + Login Kebnekaise - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/lunarc/index.html b/naiss/lunarc/index.html new file mode 100644 index 000000000..a1572a4b3 --- /dev/null +++ b/naiss/lunarc/index.html @@ -0,0 +1,3119 @@ + + + + + + + + + + + + + + + + + + + LUNARC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/request_tracker/index.html b/naiss/request_tracker/index.html new file mode 100644 index 000000000..58d07e437 --- /dev/null +++ b/naiss/request_tracker/index.html @@ -0,0 +1,3154 @@ + + + + + + + + + + + + + + + + + + + Request Tracker - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Request Tracker

+
+

Info

+

This page is here temporarily, until its content is moved +to a better place.

+
+

Request Tracker, commonly abbreviated to 'RT' is the software +used by the NAISS ticket system.

+

Workflow

+

As presented

+

As presented by Henric Zazzi on 2024-10-03 at the NAISS All-Hands:

+
flowchart TD
+  new_ticket[New ticket]
+  owned_ticket[Owned ticket]
+  stalled_ticket[Stalled ticket]
+  resolved_ticket[Resolved ticket]
+
+  new_ticket --> |time and knowledge| owned_ticket
+  owned_ticket --> |when solution has been sent| resolved_ticket
+  owned_ticket --> |When ticket cannot be solved yet| stalled_ticket
+  stalled_ticket --> |When ticket can be solved| owned_ticket
+

Alternative

+

As discussed at the whiteboard discussion:

+
flowchart TD
+  new_ticket[New ticket]
+  owned_ticket[Owned ticket]
+  stalled_ticket[Stalled ticket]
+  resolved_ticket[Resolved ticket]
+
+  new_ticket --> |time and knowledge| owned_ticket
+  owned_ticket --> |When ticket cannot be solved yet| stalled_ticket
+  owned_ticket --> |When use has not confirmed the ticket is solved yet| stalled_ticket
+  stalled_ticket --> |When ticket can be solved| owned_ticket
+  stalled_ticket --> |When the user has confirmed the ticket has been solved| resolved_ticket
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/naiss/swestore/index.html b/naiss/swestore/index.html new file mode 100644 index 000000000..18cc5e589 --- /dev/null +++ b/naiss/swestore/index.html @@ -0,0 +1,3122 @@ + + + + + + + + + + + + + + + + + + + Swestore - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Swestore

+

Swestore is a NAISS HPC center hosting the storage system called 'dCache'.

+

This is the information from SUPR:

+
dCache is a resource at Swestore. The total capacity allocated in this round is 100 TiB. The round upper limit is 10 TiB.
+
+Swestore is a Research Data Storage Infrastructure, intended for active research data and operated by the National Academic Infrastructure for Supercomputing in Sweden, NAISS,
+
+The storage resources provided by Swestore are made available for free for academic research funded by VR and Formas through open calls such that the best Swedish research is supported and new research is facilitated.
+
+The purpose of Swestore allocations, granted by National Allocations Committee (NAC), is to provide large scale data storage for “live” or “working” research data, also known as active research data.
+See the documentation at: https://docs.swestore.se
+
+
+

Times are changing

+
+

The following information appears at +application rounds and in decision mails from first of January 2025:

+
Please note: NAISS can currently only approve storage on dCache at Swestore until 2026-01-01. Storage solutions for non-hot data, such as Swestore, is being investigated in accelerated form by NAISS very early 2025, and we hope to communicate the plan for long-term services before the large allocation rounds in spring 2025 are opened.
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 000000000..90c2d9231 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Home","text":"

"},{"location":"#welcome-to-the-uppmax-documentation","title":"Welcome to the UPPMAX documentation","text":"

Get support Give anonymous feedback

Apply for a project

Getting started Software Reset your UPPMAX password Migration to Dardel

Go to the UPPMAX main page

"},{"location":"backup/","title":"Backup","text":""},{"location":"support/","title":"UPPMAX support","text":"

If you lost your UPPMAX password, see how to reset your UPPMAX password.

If you need other help on using UPPMAX, you preferably contact us through the Support Form. If that does not work, use support@uppmax.uu.se.

If you want to contribute, see how to contribute.

If you need general Uppsala University IT support, use the Uppsala University IT Servicedesk.

If you plan a course using UPPMAX resources, see What should I think about when planning a course using UPPMAX resources

"},{"location":"cluster_guides/arrhenius/","title":"Arrhenius","text":"

Arrhenius is a future NAISS cluster and likely the successor of Bianca and Rackham and Tetralith.

Some of its features, as shown at the NAISS User Meeting of 2024-10-01 (and are likely to change):

  • Around 40 PFlops
  • Has CPUs and GPUs
  • Allows for regular and sensitive data
  • Allows for cloud services
  • Allows for AI
  • Storage about 27 PB for regular data, 10 PB for sensitive data
  • 65% will be owned by Sweden (the numbers above show the values for the complete cluster)
"},{"location":"cluster_guides/backup/","title":"How does backup at UPPMAX work?","text":"

Backup of data is especially important to data-driven science. This page provides the ins and outs of how backup works on UPPMAX storage systems.

As PI, you and your academic institution are ultimately responsible for your data. We recommend you maintain a primary copy of your data on a system you control, when possible. At the very least, double-check that your collaborators are taking care of your data in a responsible way.

While UPPMAX systems may have backup, these are not designed to act as the sole repository of primary data, e.g. raw data or originals.

"},{"location":"cluster_guides/backup/#what-does-backup-mean-for-my-data","title":"What does \"backup\" mean for my data?","text":"

The type of backup that is generally available for project storage at UPPMAX is incremental backup with 30 day retention. This means that any file that was deleted more than 30 days ago is irretrievably gone. Changes in a file are kept for 30 days, so we can potentially retrieve an old version up to a month after you edited it.

The backup service tries to backup all changes as often as they occur, but rapid changes will not register. Due to the large amounts of files in the file systems, a single backup session may take upwards of a week or more. This means that if you create a file and delete it the next day, it will probably not be backed up.

Backups are sent off-site to either KTH or LiU, depending on the storage system.

To ensure timely backups, it is very important to reduce the workload of the backup system as much as possible. Create directories with \"nobackup\" in their name or use the pre-existing nobackup directory in /proj/XYZ to store data that does not need backup.

  • It is especially important that temporary files and files that are changed often are placed in nobackup directories.
"},{"location":"cluster_guides/backup/#which-directories-are-backed-up","title":"Which directories are backed up?","text":"

Backup is done on:

  • Home directories (on Rackham these also have snapshots)
  • All of Bianca (projects named sensYYYYXXX), except in folders named \"nobackup\"
  • SciLifeLab Storage projects (named sllstoreYYYYXXX), except in folders named \"nobackup\"
  • UPPMAX Storage projects (uppstore20YYXXX) except in folders named \"nobackup\"
  • UPPMAX Offload storage projects (uppoff20YYXXX)
  • SNIC projects (named snicYYYY-X-ZZZZ)
"},{"location":"cluster_guides/backup/#what-should-i-put-in-directories-with-backup","title":"What should I put in directories with backup?","text":"
  • In short, irreplaceable data should be placed there. This includes especially raw sequencing data and any other data that cannot be recreated by any effort. Scripts and other files that are needed to reproduce or repeat the analyses should also be placed on backup.
"},{"location":"cluster_guides/backup/#what-should-i-not-put-in-directories-with-backup","title":"What should I not put in directories with backup?","text":"
  • Directories where you are actively working, especially if you are creating or modifying many files. The backup mechanisms cannot keep up with large amounts of files changing on a rapid basis.
"},{"location":"cluster_guides/backup/#how-robust-is-uppmax-storage","title":"How robust is uppmax storage?","text":"
  • All UPPMAX storage systems use RAID technology to make storage more robust through redundancy.
  • This means that two or more disks must fail in the same \"RAID volume\" before there is a risk of data loss.

  • However, this technology does not protect against user error (e.g. \"rm -rf * in your project directory) or in case of a significant disaster (e.g. fire in computer hall).

  • Off-site backup is crucial.
"},{"location":"cluster_guides/backup/#how-can-i-access-my-backups","title":"How can I access my backups?","text":"

You must contact UPPMAX support and ask for help. Provide as much information as possible, especially directory and file names.

"},{"location":"cluster_guides/backup/#what-about-snapshots","title":"What about \"snapshots\"?","text":"
  • In addition to the regular backup service, the home directories on Rackham have a feature called \"snapshots\".
  • Snapshot makes a frozen \"picture\" of some file structure as it looks at the time the snapshot was taken.
  • This allows you to restore a particular file as it was at some time point.
  • Snapshots reside on the same storage system as the original data \u2014 when the storage system fails catastrophically then the snapshots are gone as well.
  • Snapshots are taken on regular basis and only available for home directories.

  • You can easily access snapshots in every directory by 'ls .snapshot' or 'cd .snapshot' in a terminal. The '.snapshot' is a hidden directory.

"},{"location":"cluster_guides/bianca/","title":"Bianca","text":"

Bianca is one of the UPPMAX clusters, suitable for working with sensitive data.

In the near future, Bianca will be replaced by Maja.

  • Bianca's name
  • Bianca's design
  • Log in
  • Submitting jobs, using Slurm
  • Starting an interactive node
  • File transfer
    • File transfer using rsync (recommended)
    • File transfer using FileZilla (easiest)
  • The module system
  • IDEs
    • Jupyter
    • RStudio
    • VSCode
    • VSCodium
  • Courses and workshops
  • NAISS-sens
  • Best practices
    • Git on Bianca
  • Bianca installation guides
","tags":["Bianca","cluster","sensitive data"]},{"location":"cluster_guides/bianca_login_node/","title":"Bianca login node","text":"

There are running up to 7 login nodes on a physical Bianca-node.

","tags":["Bianca","login node","login","node"]},{"location":"cluster_guides/bianca_modules/","title":"Working with environment modules on Bianca","text":"

Bianca is shared Linux computer with all the standard Linux tools installed, on which all users should be able to do their work independently and undisturbed.

Because this is the same for nearly all UPPMAX clusters, there is a general page on modules here

","tags":["modules","Bianca"]},{"location":"cluster_guides/bianca_overview/","title":"Bianca overview","text":"

Bianca is an HPC cluster designed to work on sensitive data named after a Tintin character, maintained by UPPMAX.

What is an HPC cluster?

What an HPC cluster is, is described here.

What is the design of Bianca?

How Bianca is designed, is described here

What is UPPMAX?

UPPMAX is described here.

"},{"location":"cluster_guides/biancas_design/","title":"Bianca's design","text":"

Bianca is an high-performance computing (HPC) cluster for sensitive data.

What is an HPC cluster for sensitive data?

What an HPC cluster for sensitive data is, is described here.

Or: Bianca is a group of computers that can effectively run many calculations, as requested by multiple people, at the same time. As the data is sensitive, it is protected to remain only on Bianca.

Bianca is designed to:

  • make accidental data leaks difficult
  • make correct data management as easy as possible
  • emulate the HPC cluster environment that SNIC users were familiar with
  • provide a maximum amount of resources
  • satisfy regulations
The Bianca architecture

Bianca's architecture. Red shows the university networks. Blue shows the whole cluster, with hundreds of nodes. Green shows virtual project clusters. Yellow shows where file transfer occurs.

Bianca's architecture reflects that she is an HPC cluster for sensitive data: the whole Bianca cluster has hundreds of virtual project clusters, each of which is isolated from each other and the Internet. File transfer is only possible through the the so-called 'wharf', which is a special file area that is visible from the Internet.

Bianca has no internet

  • You can log in, but with extra steps
  • You can transfer files, but with extra steps
  • We recommend using the remote desktop login, see here

As Bianca is an HPC cluster that should be as easy to use as possible, there are two ways to interact with Bianca: one more visual, the other a command-line environment. Both environments are shown below.

As Bianca has sensitive data, there are constraints on how to access Bianca.

One such constraint in accessing Bianca, is that one has to be within the university networks, as described at get within the university networks.

Another such constraint, is that data can be transferred to or from a virtual project cluster through the so-called 'wharf', which is a special file area that is visible from the Internet. File transfer is described in more detail here.

Bianca runs the Linux operating system and all users need some basic Linux knowledge to use Bianca.

Using Linux

Using Linux (and especially the so-called command-line/terminal) is essential to use Bianca. Learning the essential Linux commands is described here.

"},{"location":"cluster_guides/biancas_design/#overview-of-all-steps-possibleneeded-to-access-bianca","title":"Overview of all steps possible/needed to access Bianca","text":"
flowchart TD\n\n    subgraph sub_outside[IP outside SUNET]\n      outside(Physically outside SUNET)\n    end\n\n    subgraph sub_inside[IP inside SUNET]\n      physically_inside(Physically inside SUNET)\n      inside_using_vpn(Inside SUNET using VPN)\n      inside_using_rackham(Inside SUNET using Rackham)\n\n      subgraph sub_bianca_shared_env[Bianca shared network]\n        bianca_shared_console[Bianca console environment login]\n        bianca_shared_remote_desktop[Bianca remote desktop login]\n        subgraph sub_bianca_private_env[The project's private virtual project cluster]\n          bianca_private_console[Bianca console environment]\n          bianca_private_remote_desktop[Bianca remote desktop]\n          bianca_private_terminal[Terminal]\n        end\n      end\n    end\n\n    %% Outside SUNET\n    outside-->|Move physically|physically_inside\n    outside-->|Use a VPN|inside_using_vpn\n    outside-->|Login to Rackham|inside_using_rackham\n\n    %% Inside SUNET\n    physically_inside-->|SSH|bianca_shared_console\n    physically_inside-->|UPPMAX website|bianca_shared_remote_desktop\n    physically_inside-.->inside_using_rackham\n    physically_inside-.->inside_using_vpn\n    inside_using_vpn-->|SSH|bianca_shared_console\n    inside_using_vpn-->|UPPMAX website|bianca_shared_remote_desktop\n    inside_using_rackham-->|SSH|bianca_shared_console\n\n    %% Shared Bianca\n    bianca_shared_console --> |UPPMAX password|bianca_private_console\n    bianca_shared_remote_desktop-->|UPPMAX password|bianca_private_remote_desktop\n\n    %% Private Bianca\n    bianca_private_console---|is a|bianca_private_terminal\n    bianca_private_remote_desktop-->|must also use|bianca_private_terminal

This is an overview of all steps possible/needed to access Bianca.

"},{"location":"cluster_guides/biancas_name/","title":"Bianca's name","text":"

Bianca, like all UPPMAX clusters, is named after a Tintin character, in this case after Bianca Castafiore.

What are the UPPMAX clusters?

All UPPMAX clusters can be found here.

"},{"location":"cluster_guides/cluster_guide_faq/","title":"UPPMAX clusters FAQ","text":"
  • How to access data from an expired NAISS project?
  • How to extend the duration of a job that is running?
","tags":["FAQ","UPPMAX","cluster","clusters"]},{"location":"cluster_guides/cluster_speeds/","title":"Cluster speeds","text":"

Sometimes you feel a cluster is slow.

Below are some benchmark results, so you can compare with what you are experiencing.

Please contact support when you find out that your favorite cluster is slower than expected.

What could cause such a slowdown?

When things are slow it is usually due to latency when many processes are accessing the same files and physical hard drives

Examples:

  • 2024-12-06: Castor is still holding some file systems for Bianca or a user that is running a lot of very short lived Perl jobs on Bianca, that are running too hard on Castor.
","tags":["UPPMAX","cluster","clusters","speed","fast","slow"]},{"location":"cluster_guides/cluster_speeds/#starting-an-interactive-session-with-two-cores-for-one-hour","title":"Starting an interactive session with two cores for one hour","text":"

In general:

  • It takes seconds if a free compute node is available
  • It takes minutes to start a new node
Date and time Cluster Command You waited for x seconds Complete time (secs) 2024-12-19 8:00 Bianca interactive -A sens2023036 -n 2 -t 1:00:00 518 (8:38) 548 (9:08)","tags":["UPPMAX","cluster","clusters","speed","fast","slow"]},{"location":"cluster_guides/cluster_speeds/#loading-the-r_packages431-module","title":"Loading the R_packages/4.3.1 module","text":"

For a benchmark to solve a ticket, the following command was run in multiple settings:

time module load R_packages/4.3.1\n

From the three resulting times, the 'Real' time is used.

Here are some expected timings:

Project Setting Real loading time Rackham SSH 0m0.758s Bianca SSH 0m8.984s

Here are some unexpected timings:

Project Setting Real loading time sens2023598 SSH 6m1.265s sens2023598 Website 6m20.234s sens2017625 SSH 6m4.584s sens2017625 Website, interactive session 7m41.433s sens2017625 SSH, interactive session 7m13.111s","tags":["UPPMAX","cluster","clusters","speed","fast","slow"]},{"location":"cluster_guides/cluster_speeds/#loading-the-rstudio2023121-402-module","title":"Loading the RStudio/2023.12.1-402 module","text":"Project Setting Real loading time Bianca Website 2m3.184s","tags":["UPPMAX","cluster","clusters","speed","fast","slow"]},{"location":"cluster_guides/compress_fastQ/","title":"How should I compress FastQ-format files?","text":"

Short answer: The best compression using a widely available format is provided by bzip2 and its parallel equivalent pbzip2. The best compression ratio for FastQ is provided by fqz_comp in the fqzcomp/4.6 module. However, this tool is experimental and is not recommended for general, everyday use.

"},{"location":"cluster_guides/compress_fastQ/#long-answer","title":"Long answer","text":"

We conducted an informal examination of two specialty FastQ compression tools by recompressing an existing fastq.gz file. The first tool fqzcomp (available in the module fqzcomp/4.6) uses a compiled executable (fqz_comp) that works similar to e.g., gzip, while the second tool (LFQC in the module lfqc/1.1) uses separate ruby-language scripts for compression (lfqc.rb) and decompression (lfqcd.rb). It does not appear the LFQC scripts accept piping but the documentation is limited.

Loading the needed modules:

module load bioinfo-tools\nmodule load fqzcomp/4.6\nmodule load lfqc/1.1\n

Both modules have 'module help' available for more info. The help for fqzcomp gives the location of their README which is very helpful in describing minor changes that might occur to the FastQ file during decompression (these do not affect the read name, sequence or quality data).

One thing changed from the 'standard' implementation of LFQC was to make the scripts stand-alone with #! header lines, rather than requiring e.g., 'ruby lfqc.rb ...' as you see in their documentation.

Since piping is not available with LFQC, it is preferable to avoid creating a large intermediate decompressed FastQ file. So, create a named pipe using mkfifo that is named like a fastq file.

mkfifo UME_081102_P05_WF03.se.fastq\nzcat UME_081102_P05_WF03.se.fastq.gz > UME_081102_P05_WF03.se.fastq &\nlfqc.rb UME_081102_P05_WF03.se.fastq\nrm UME_081102_P05_WF03.se.fastq\n

This took a long time, 310 wall seconds.

Next,fqz_comp from fqzcomp/4.6. Since this works like gzip, just use it in a pipe.

zcat UME_081102_P05_WF03.se.fastq.gz | fqz_comp > UME_081102_P05_WF03.se.fastq.fqz\n

This used a little multithreading (up to about 150% CPU) and was much faster than LFQC, just 2-3 seconds. There are other compression options (we tried -s1 and -s9+) but these did not outperform the default (equivalent to -s3). This is not necessarily a surprise; stronger compression means attempting to make better guesses and sometimes these guesses are not correct. No speedup/slowdown was noticed with other settings but the input file was relatively small.

-rw-rw-r-- 1 28635466 Mar 10 12:53 UME_081102_P05_WF03.se.fastq.fqz1\n-rw-rw-r-- 1 29271063 Mar 10 12:52 UME_081102_P05_WF03.se.fastq.fqz9+\n-rw-rw-r-- 1 46156932 Jun 6   2015 UME_081102_P05_WF03.se.fastq.gz\n-rw-rw-r-- 1 28015892 Mar 10 12:53 UME_081102_P05_WF03.se.fastq.fqz\n-rw-rw-r-- 1 24975360 Mar 10 12:45 UME_081102_P05_WF03.se.fastq.lfqc\n

We also compared against bzip2 and xz, which are general-use compressors. These both function like gzip (and thus like fqz_comp) and both outperform gzip, as expected. xz is becoming a more widely-used general compressor like bzip2, but for this file it required perhaps 20x as much time as bzip2 and did worse.

-rw-rw-r-- 1 35664555 Mar 10 13:10 UME_081102_P05_WF03.se.fastq.bz2\n-rw-rw-r-- 1 36315260 Mar 10 13:10 UME_081102_P05_WF03.se.fastq.xz\n

Neither of these improved general-use compressors did as well with FastQ as the specialty compressors. This makes sense given the specialty compressors can take advantages of the restrictions of the format.

"},{"location":"cluster_guides/compress_fastQ/#which-is-the-best-method-in-this-trial","title":"Which is the best method in this trial?","text":"

From the results of this trial, the tool that provides the best compression ratio in a reasonable amount of time is fqz_comp in the fqzcomp/4.6 module. It is as fast as bzip2 which is also much better than gzip but does a much better job of compressing FastQ. However, fqz_comp is experimental so we do not recommend using fqz_comp for everyday use. We recommend using bzip2 or its parallel equivalent, pbzip2.

The fqz_comp executable could be used to decompress FastQ within a named pipe if FastQ is required for input:

... <(fqz_comp -d < file.fastq.fqz) ...\n

Note that fqz_comp is designed to compress FastQ files alone, and neither method here provides the blocked compression format suitable for random access that bgzip does; see which-compression-format-should-i-use-for-ngs-related-files for more on that subject.

"},{"location":"cluster_guides/compress_fastQ/#why-not-lfqc","title":"Why not LFQC?","text":"

Though LFQC has the best compression of FastQ, there are some strong disadvantages. First, it takes quite a long time, perhaps 50x longer than fqz_comp. Second, it apparently cannot be used easily within a pipe like many other compressors. Third, it contains multiple scripts with multiple auxiliary programs, rather than a single executable. Fourth, it is quite verbose during operation, which can be helpful but cannot be turned off. Finally, it was difficult to track down for installation; two different links were provided in the publications and neither worked. It was finally found in a github repository, the location of which is provided in the module help.

"},{"location":"cluster_guides/compress_format/","title":"Which compression format should I use for NGS-related files?","text":"

How well things compress will vary a great deal with the input data. An additional consideration is how useful the compressed format will be to you later. Some tools can handle only one compressed format (almost always gzip) and some can handle two (almost always gzip and bzip2). The help information for the tool should be explicit about which formats it understands. You can also use named pipes or the bash <() syntax to uncompress files 'on the fly' if the tool you are using cannot handle that compressed format.

Another consideration for usefulness is the structure of the specific compressed format. By default gzip is not 'blocked'; the compression is applied continually across the entire file, and to uncompress something in the middle it is necessary to uncompress everything up to that point. Tools that understand compressed VCF and GFF files require these to be compressed with bgzip (available as part of the htslib module), which applies blocked gzip compression, so that it is possible to uncompress interior chunks of the files efficiently. This is useful when viewing compressed VCF/GFF files in a viewer such as IGV, for example. For viewing, such files also need an index created, which is accomplished using tabix (also part of the htslib module), which understands bgzip-compressed files. BAM files also use a type of gzip compression that is blocked. Files compressed with bgzip can be uncompressed with gzip.

Bzip2 is inherently blocked. Bzip2 is a more efficient compression method than gzip, but takes perhaps twice as long or longer to compress the same file. Fortunately, another advantage of blocked compression is that multiple parts of the file can be compressed at once. Uppmax has pbzip2 available as a system tool, which can perform parallel compression and decompression of bzip2-format files using multiple threads. This is quite fast. Do 'pbzip2 -h' for help. An Uppmax user has provided a helpful SBATCH script.

Another disadvantage of compression formats that are not blocked is that an error in a file generally screws up the remainder of the file. Files with blocked compression can recover all non-error-containing blocks.

Other compressed formats are available, including 7z, available by loading the p7zip module, and xz, available as the system tool xz and by loading the liblzma module. For compressing FastQ files in particular, which have a very strict format, our small-scale comparison of tools showed that xz was slightly inferior to bzip2 and much slower during compression, and specialty tools for FastQ compression were superior in compression ratios to general-purpose compressors. See How Should I Compress FastQ-format Files? for more on FastQ compression.

Most compression tools have options that allow you to trade off between speed of compression and size reduction of compression. The defaults are almost always sufficient.

It makes little sense to compress an already-compressed file with a different format. It is much better to set up a pipe to uncompress the file and then recompress in the new format.

Apart from compression dictated by file usage (see above), it is recommended that files that are being compressed for long-term storage (e.g., raw sequence data) are compressed using pbzip2. If the files are already compressed in long-term storage (e.g. Swestore) I don't think it is worthwhile to retrieve the files, decompress-recompress them, then reupload them.

"},{"location":"cluster_guides/compress_guide/","title":"Brief compression guide","text":"

To avoid filling up the storage at UPPMAX, we all users to do their part and store their files in a good way. The best way to store files is to delete everything you don't need anymore, like temporary and intermediate files. For everything else you need to keep, here are some useful commands to know (section about biological data below).

"},{"location":"cluster_guides/compress_guide/#general-files","title":"General files","text":"

We have several compression programs installed and you are free to chose whichever you want (any better than none). Examples:

"},{"location":"cluster_guides/compress_guide/#gzip-fast-good-compression","title":"gzip (fast, good compression)","text":"

gzip also has a parallel version (pigz) that will let the program use multiple cores, making it much faster. If you want to run multithreaded you should make a reservation in the queue system, as the login nodes will throttle your programs if they use too much resources.

# compress a file\n$ gzip file.txt            # single threaded\n$ pigz -p 4 file.txt       # using 4 threads\n# decompress a file\n$ gunzip file.txt.gz       # single threaded\n$ unpigz -p 4 file.txt     # using 4 threads (4 is max)\n
"},{"location":"cluster_guides/compress_guide/#bzip2-slow-better-compression","title":"bzip2 (slow, better compression)","text":"

bzip2 also has a parallel version (pbzip2) that will let the program use multiple cores, making it much faster. If you want to run multithreaded you should make a reservation in the queue system, as the login nodes will throttle your programs if they use too much resources.

# compress a file\n$ bzip2 file.txt            # single threaded\n$ pbzip2 -p4 file.txt       # using 4 threads\n# decompress a file\n$ bunzip2 file.txt.gz       # single threaded\n$ pbunzip2 -p4 file.txt.gz  # using 4 threads\n
"},{"location":"cluster_guides/compress_guide/#zstd-fast-better-compression","title":"zstd (fast, better compression)","text":"

zstd has built in support for using multiple threads when compressing data only, making it much faster. If you want to run multithreaded you should make a reservation in the queue system, as the login nodes will throttle your programs if they use too much resources.

# compress a file\n$ zstd --rm file.txt        # single threaded\n$ zstd --rm -T4 file.txt    # using 4 threads\n# decompress a file, only single threaded\n$ unzstd --rm file.txt.zst\n
"},{"location":"cluster_guides/compress_guide/#compressing-lots-of-files","title":"Compressing lots of files","text":"

The commands above work on a single file at a time, and if you have 1000s of files it is quite boring to go through them manually. If you want to combine all the files into a single compressed archive, you can use a program named tar.

# to compress a folder (folder/)\n# and all files/folder inside it,\n# creating a archive file named files.tar.gz\n$ tar -czvf files.tar.gz folder/\n# to decompress the archive later\n$ tar -xzvf files.tar.gz\n

If you don't want to combine them in a single file, and instead compress them one by one, you can use the command find.

# to find all files with a name ending\n# with .fq and compress them\n$ find /path/to/search/in -iname *.fq -print -exec gzip \"{}\" \\;\n# example to compress all FastQ file in\n# the current directory and all its\n# subdirectories, using 4 threads\n$ find . \\( -iname '*.fq' -o -iname '*.fastq' \\) -print -exec pigz -p 4 \"{}\" \\;\n# same as above, but starting 4 single\n# threaded instances of gzip in parallel\n$ find . \\( -iname '*.fq' -o -iname '*.fastq' \\) -print0 | xargs -0 -P 4 gzip\n
"},{"location":"cluster_guides/compress_guide/#biological-data","title":"Biological data","text":"

There are some compression algorithms that have become standard practice to use in the realm of biological data. Most programs can read the compressed versions of files as long as it's compressed with the correct program. Leaving out the decompression commands, mostly because they are already described in the General files section about, but also because there is little reason to ever decompress biological data.

"},{"location":"cluster_guides/compress_guide/#fastq-files","title":"fastq files","text":"
# compress sample.fq\n$ gzip sample.fq        # single threaded\n$ pigz -p 4 sample.fq   # using 4 threads\n
"},{"location":"cluster_guides/compress_guide/#sam-files","title":"sam files","text":"

Loading the needed modules:

# load samtools\n$ module load bioinfo-tools samtools\n

Then:

# compress sample.sam, but remember to delete\n# sample.sam when finished, since samtools\n# will not do that automatically\n# single threaded\n$ samtools view -b -o sample.bam sample.sam\n# using 4 threads\n$ samtools view -@ 4 -b -o sample.bam sample.sam\n
"},{"location":"cluster_guides/compress_guide/#vcf-gvcf-files","title":"vcf / g.vcf files","text":"
# load htslib\n$ module load bioinfo-tools htslib\n# compress sample.vcf / sample.g.vcf\n$ bgzip sample.vcf         # single threaded\n$ bgzip -@ 4 sample.vcf    # using 4 threads\n# index sample.vcf.gz / sample.g.vcf.gz\n$ tabix sample.vcf.gz\n
"},{"location":"cluster_guides/compress_guide/#programs-that-dont-read-compressed-files","title":"Programs that don't read compressed files","text":"

There are clever ways to get around programs that don't support reading compressed files. Let's say you have a program that only reads plain text files. You can use something called process substitution (also known as anonymous named pipes) to be able to decompress the data on-the-fly while feeding it to the program.

"},{"location":"cluster_guides/compress_guide/#how-you-normally-would-run-the-program","title":"How you normally would run the program","text":"
# run the program with uncompressed file\n$ the_program uncompressed.txt\n# now, let's compress the file first and run\n# the program using process substitution\n# to decompress the file\n$ gzip uncompressed.txt\n# run the program using the compressed file\n# (zcat works like cat, but read gzipped files)\n$ the_program <(zcat compressed.txt.gz)\n# same as above, but reading a\n# bzip2 compressed file\n$ the_program <(bzcat compressed.txt.gz)\n# same as above, but reading a\n# zstd compressed file\n$ the_program <(zstdcat compressed.txt.gz)\n

In this example we give the program not the name of a file to read, but instead we use process substitution to run zcat and feed the uncompressed data to the program, as if it was reading a file.

"},{"location":"cluster_guides/crex/","title":"Crex","text":"

Crex is an UPPMAX storage system.

"},{"location":"cluster_guides/cygnus/","title":"Cygnus","text":"

Cygnus is a DDN Lustre system.

"},{"location":"cluster_guides/dardel/","title":"Dardel","text":"

Dardel is an HPC cluster at Stockholm maintained by PDC.

If you are working on Rackham, consider moving to Dardel.

  • Login to Dardel
  • Create and upload SSH key for Dardel
  • Migration to Dardel
"},{"location":"cluster_guides/dardel/#links","title":"Links","text":"
  • PDC page about Dardel
"},{"location":"cluster_guides/dardel_migration/","title":"Dardel migration","text":"

This page describes how to transfer files to Dardel, the HPC cluster at PDC in Stockholm.

Visit the Rackham 2 Dardel Drop-in

Every Tuesday at 11:15 (except for the month of July) there is online Rackham 2 Dardel Drop-in at Zoom with meeting ID 64896912764

Please join us if you need assistance logging in to Dardel or migrating your data.

Why do I need this?

The Rackham cluster will be decommissioned at the end of 2024, hence all project directories will be deleted. The plan from NAISS is that all Rackham users can move to the Dardel cluster at PDC, and we encourage you to do so right away.

Researchers at Uppsala University, should they so desire, can choose to keep data at UPPMAX. Projects with UU affiliation that remain on Rackham at the end of this year can be transferred to a new local system.

To facilitate this move, we have created a tool that makes the transfer easier.

More details of Rackham end of life here.

"},{"location":"cluster_guides/dardel_migration/#short-version","title":"Short version","text":"

The really short description is:

  1. Become a member of a project with resources at Dardel in SUPR.
  2. Create a passwordless SSH key.
  3. Add the key to the PDC login portal.
  4. Add *.uppmax.uu.se as allowed address for the key.
  5. Load module darsync and run darsync check on the folder you want to transfer.
  6. Create a Slurm script using darsync gen on the folder you want to transfer.
  7. Submit the created Slurm script.

See the rest of this guide for more information about these steps.

"},{"location":"cluster_guides/dardel_migration/#long-version","title":"Long version","text":"Prefer a video?

This procedure is also shown in this YouTube video.

First, we are here to help. Please contact support if you run into problems when trying the guide below.

This migration consists of a couple of steps summarized below. Press the links to get more detailed explanation of each step. Note that step 1 requires some hours of waiting and step 2 requires an overnight wait.

flowchart TD\n  get_supr_project[1 Access to a SUPR project with Dardel]\n  get_pdc_account[2 Access to a PDC account]\n  create_ssh_key[3 Create temporary SSH keys]\n  add_ssh_key[4 Add the SSH keys to the PDC Login Portal]\n  run_darsync[5 Run Darsync]\n  slurm[6 Submit the script created by Darsync]\n  check_logs[7 Check logs]\n  double_check_transfer[8 double-check the transfer]\n  delete_ssh_keys[9 Delete the temporary SSH keys]\n  delete_rackham_files[10 Delete the files on Rackham]\n\n  get_supr_project --> |needed for| get_pdc_account\n\n  create_ssh_key --> |needed for| add_ssh_key\n  get_pdc_account --> |needed for| add_ssh_key\n  add_ssh_key --> |needed for| run_darsync\n  run_darsync --> |needed for| slurm\n  slurm --> |needed for| check_logs\n  check_logs --> |optional| double_check_transfer\n  double_check_transfer --> delete_ssh_keys\n  check_logs --> |needed for| delete_ssh_keys\n  delete_ssh_keys --> |needed for| delete_rackham_files

Overview of the migration process. Note that step 1 requires some hours of waiting and step 2 requires an overnight wait.

After those steps, the procedure will take around 20 minutes, as shown in the YouTube video that shows this procedure.

"},{"location":"cluster_guides/dardel_migration/#1-get-access-to-a-supr-project-with-dardel","title":"1. Get access to a SUPR project with Dardel","text":"

First step is to get get access to a SUPR project with Dardel. This is described at PDC's page on getting access to Dardel. You will get an email when you are added to a project, this can take some hours.

How do I know I have access to a Dardel project?

Login to https://supr.naiss.se/. If there is a PDC project, you may have access to a project with Dardel.

An example user that has access to a PDC project

If you may a PDC project that does not use Dardel, click on the project to go the the project overview.

An example PDC project overview

From there, scroll down to 'Resources'. If you see 'Dardel' among the compute resources, you have confirmed you have access to a Dardel project.

Resources from an example PDC project

"},{"location":"cluster_guides/dardel_migration/#2-get-a-pdc-account-via-supr","title":"2. Get a PDC account via SUPR","text":"

Get a PDC account via SUPR. This is described at the PDC page on how to apply for a SUPR account. You will get a PDC account overnight.

How do I know I have a PDC account?

Login to https://supr.naiss.se/. and click on 'Accounts' in the main menu bar at the left.

If you see 'Dardel' among the resources, and status 'Enabled' in the same row, you have a PDC account!

Example of a user having an account at PDC's Dardel HPC cluster

"},{"location":"cluster_guides/dardel_migration/#3-create-ssh-key-pair","title":"3. Create SSH key pair","text":"

First we will create SSH keys to be able to connect to Dardel. We have made a small tool to create the keys for Darsync for you, so just run these commands on UPPMAX:

Loading the needed module:

module load darsync\n

Then creating the key:

darsync sshkey\n
How does that look like?

The screen output will look similar to this:

[sven@rackham1 ~]$ module load darsync\n[sven@rackham1 ~]$ darsync sshkey\n\n\n  ____ ____  _   _ _  _________   __\n / ___/ ___|| | | | |/ / ____\\ \\ / /\n \\___ \\___ \\| |_| | ' /|  _|  \\ V /\n  ___) |__) |  _  | . \\| |___  | |\n |____/____/|_| |_|_|\\_\\_____| |_|\n\nThe sshkey module of this script will generate a SSH key pair that you can use to login to Dardel.\nIt will create two files, one with the private key and one with the public key.\nThe private key should be kept secret and the public key should be added to your authorized_keys file on Dardel.\n\n\n\n\nCreated SSH key: /home/sven/id_ed25519_pdc and /home/sven/id_ed25519_pdc.pub\n\nContent of the public key:\n\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAZkAoqlvm+YQrw26mCuH/4B/meG8O6aS8BB3kw1FDfl sven@rackham1.uppmax.uu.se\n\n\n\n\nYou will now have to add the public key above to the Dardel Login Portal, https://loginportal.pdc.kth.se\n\nSee the user guide for more info about this,\nhttps://docs.uppmax.uu.se/software/ssh_key_use_dardel/#2-how-to-add-an-ssh-key-to-the-pdc-login-portal\n
"},{"location":"cluster_guides/dardel_migration/#4-add-the-public-key-to-the-pdc-login-portal","title":"4. Add the public key to the PDC Login Portal","text":"

See create and use an SSH key pair for Dardel, step 2, to see how to upload the public SSH key to the PDC Login Portal.

"},{"location":"cluster_guides/dardel_migration/#5-run-the-migration-tool-darsync","title":"5. Run the migration tool Darsync","text":""},{"location":"cluster_guides/dardel_migration/#51-load-the-module","title":"5.1 Load the module","text":"
module load darsync\n
"},{"location":"cluster_guides/dardel_migration/#52-check-for-problems","title":"5.2 Check for problems","text":"

This step is optional, yet may help against possible problems.

Running darsync check will make Darsync prompt for questions:

darsync check\n
How does that look like?

Here is output similar to yours, for a user with username sven that wants to transfer his Documents folder:

[sven@rackham1 ~]$ darsync check\n\n\n   ____ _   _ _____ ____ _  __\n  / ___| | | | ____/ ___| |/ /\n | |   | |_| |  _|| |   | ' /\n | |___|  _  | |__| |___| . \\\n  \\____|_| |_|_____\\____|_|\\_\\\n\nThe check module of this script will recursively go through\nall the files in, and under, the folder you specify to see if there\nare any improvements you can to do save space and speed up the data transfer.\n\nIt will look for file formats that are uncompressed, like .fasta and .vcf files\n(most uncompressed file formats have compressed variants of them that only\ntake up 25% of the space of the uncompressed file).\n\nIf you have many small files, e.g. folders with 100 000 or more files,\nit will slow down the data transfer since there is an overhead cost per file\nyou want to transfer. Large folders like this can be archived/packed into\na single file to speed things up.\n\n\nSpecify which directory you want to copy.\nMake sure to use tab completion (press the tab key to complete directory names)\nto avoid spelling errors.\nEx.\n/proj/naiss2099-22-999/\nor\n/proj/naiss2099-22-999/raw_data_only\n\nSpecify local directory: Documents\n/domus/h1/sven/Documents/MATLAB\n\n\n  ____   ___  _   _ _____\n |  _ \\ / _ \\| \\ | | ____|\n | | | | | | |  \\| |  _| \n | |_| | |_| | |\\  | |___\n |____/ \\___/|_| \\_|_____|\n\nChecking completed. Unless you got any warning messages above you\nshould be good to go.\n\nGenerate a Slurm script file to do the transfer by running this script again,\nbut use the 'gen' option this time. See the help message for details,\nor continue reading the user guide for examples on how to run it.\n\ndarsync gen -h\n\nA file containing file ownership information,\n/domus/h1/sven/Documents/darsync_Documents.ownership.gz,\nhas been created. This file can be used to make sure that the\nfile ownership (user/group) will look the same on Dardel as it does here.\nSee ../cluster_guides/dardel_migration/#52-check-for-problems\nfor more info about this.\n
Can I also give the arguments on the command line?

If you prefer to specify everything from the command-line, do:

darsync check --local-dir [foldername]\n

where [foldername] is the name to a folder, for example darsync check --local-dir ~/my_folder.

There are some more optional arguments, see these by doing:

darsync check --help\n

If there are problems reported, contact support or try to fix them yourself.

What is the file darsync_[dirname].ownership.gz?

This is a file containing file ownership information. It is created in the root of the folder you told Darsync to transfer to Dardel.

When a user transfer all the files in a project to a project at Dardel, all the files at Dardel will be owned by the user who did the transfer. By saving the ownership information of the files at UPPMAX, we can map the file ownership information to the corresponding users at Dardel.

Can I delete the file darsync_[dirname].ownership.gz?

No, keep it until you feel at home at Dardel and have worked in your new project a couple of months. By that time you should have encountered any problems with file permissions that you might have.

If you discover that you get problems because of wrong owner of files (write permissions etc), this file contains the information needed to recreate the file ownerships as they were before you transfered the files, even if your UPPMAX project has already been deleted.

How to fix WARNING: files with uncompressed file extensions above the threshold detected

It looks for files with file endings matching common uncompressed file formats, like .fq, .sam, .vcf, .txt. If the combined file size of these files are above a threshold it will trigger the warning. Most programs that uses these formats can also read the compressed version of them.

Examples of how to compress common formats:

# fastq/fq/fasta/txt\ngzip file.fq\n\n# vcf\nbgzip file.vcf\n\n# sam\nsamtools view -b file.sam > file.bam\n# when the above command is completed successfully:\n# rm file.sam\n

For examples on how to compress other file formats, use an internet search engine to look for

how to compress <insert file format name> file\n
How to fix WARNING: Total number of files, or number of files in a single directory

If a project consists of many small files it will decrease the data transfer speed, as there is an overhead cost to starting and stopping each file transfer. A way around this is to pack all the small files into a single tar archive, so that it only has to start and stop a single time.

Example of how to pack a folder and all files in it into a single tar archive.

```bash

"},{"location":"cluster_guides/dardel_migration/#on-uppmax","title":"on uppmax","text":""},{"location":"cluster_guides/dardel_migration/#pack-it","title":"pack it","text":"

tar -czvf folder.tar.gz /path/to/folder

"},{"location":"cluster_guides/dardel_migration/#the-the-command-above-finished-without-error-messages-and-you-have-a-foldertargz-file-that-seems-about-right-in-size","title":"the the command above finished without error messages and you have a folder.tar.gz file that seems about right in size,","text":"

rm -r /path/to/folder

"},{"location":"cluster_guides/dardel_migration/#53-generate-script","title":"5.3 Generate script","text":"

In this third step, the Slurm script is created.

A lot of questions

The script will ask multiple questions. Below it is described how to get the answers :-)

Running darsync gen will make Darsync prompt for questions:

darsync gen\n
How does that look like?

Here is output similar to yours, for a fictional user called Sven Svensson, with the UPPMAX username of sven and the PCD username of svensv:

[sven@rackham1 ~]$ darsync gen\n\n\n   ____ _____ _   _\n  / ___| ____| \\ | |\n | |  _|  _| |  \\| |\n | |_| | |___| |\\  |\n  \\____|_____|_| \\_|\n\nThe gen module of this script will collect the information needed\nand generate a script that can be submitted to Slurm to preform the\ndata transfer.\n\nIt will require you to know\n\n    1) Which directory on UPPMAX you want to transfer (local directory).\n    2) Which UPPMAX project id the Slurm job should be run under.\n        ex. naiss2099-23-999\n    3) Which cluster the Slurm job should be run on.\n        ex. rackham, snowy\n    4) Which username you have at Dardel.\n    5) Where on Dardel it should transfer your data to.\n        ex. /cfs/klemming/projects/snic/naiss2099-23-999/from_uppmax\n    6) Which SSH key should be used when connecting to Dardel.\n        ex. /home/user/id_ed25519_pdc\n    7) Where you want to save the generated Slurm script.\n\n\n\nSpecify which directory you want to copy.\nMake sure to use tab completion (press the tab key to complete directory names)\nto avoid spelling errors.\nEx.\n/proj/naiss2099-22-999/\nor\n/proj/naiss2099-22-999/raw_data_only\n\nSpecify local directory: Documents\n\n\nSpecify which project id should be used to run the data transfer job in Slurm.\nEx.\nnaiss2099-23-999\n\nSpecify project id: naiss2099-23-999\n\n\nSpecify which cluster the Slurm job should be run on.\nChoose between rackham and snowy.\nDefault is rackham\n\nSpecify cluster: rackham\n\n\nSpecify the username that should be used to login at Dardel.\nIt is the username you have created at PDC and it is\nprobably not the same as your UPPMAX username.\n\nSpecify Dardel username: svensv\n\n\nSpecify the directory on Dardel you want to transfer your data to.\nEx.\n/cfs/klemming/projects/snic/naiss2099-23-999\n\nSpecify Dardel path: /cfs/klemming/projects/snic/naiss2099-23-999\n\n\nSpecify which SSH key should be used to login to Dardel.\nCreate one by running `dardel_ssh-keygen` if you have not done so yet.\nIf no path is given it will use the default key created by `dardel_ssh-keygen`,\n~/id_ed25519_pdc\n\nSpecify SSH key:\n\n\nSpecify where the Slurm script file should be saved.\nIf not given it will save it here: ~/darsync_Documents.slurm\n\nSpecify Slurm script path:\n\n\n  ____   ___  _   _ _____\n |  _ \\ / _ \\| \\ | | ____|\n | | | | | | |  \\| |  _| \n | |_| | |_| | |\\  | |___\n |____/ \\___/|_| \\_|_____|\n\n\nCreated Slurm script: /home/sven/darsync_Documents.slurm\n\ncontaining the following command:\n\nrsync -e \"ssh -i /home/sven/id_ed25519_pdc -o StrictHostKeyChecking=no\" -acPuv /domus/h1/sven/Documents/ svensv@dardel.pdc.kth.se:/cfs/klemming/projects/snic/naiss2099-23-999\n\n\nTo test if the generated file works, run\n\nbash /home/sven/darsync_Documents.slurm\n\nIf the transfer starts you know the script is working, and you can terminate\nit by pressing ctrl+c and submit the script as a Slurm job.\n\nRun this command to submit it as a job:\n\nsbatch /home/sven/darsync_Documents.slurm\n

After answering all the questions a new file will be created. By default it will be created in your home directory, named darsync_foldername.sh, where foldername is the name of the folder you told it to transfer, e.g. ~/darsync_nais2024-23-9999.sh

In case of a typo, you can also modify the transfer script created by Darsync, which is a regular Slurm script.

Can I also give the arguments on the command line?

If you prefer to specify everything from the command-line, do:

darsync gen \\\n  --local-dir [foldername on UPPMAX] \\\n  --remote-dir [foldername on Dardel] \\\n  --slurm-account [slurm_account] \\\n  --cluster [slurm_cluster] \\\n  --username [pdc_username] \\\n  --ssh-key [private_ssh_key_path] \\\n  --outfile [output_filename]\n

where

  • [foldername] is the name to a folder, e.g. ~/my_folder
  • [slurm_account] is the UPPMAX project ID, e.g. uppmax2023-2-25
  • [slurm_cluster] is the cluster on UPPMAX where the job will run, e.g. rackham or snowy
  • [pdc_username] is your PDC username, e.g svenan
  • [private_ssh_key_path] is the path the private SSH key, e.g. ~/id_ed25519_pdc
  • [output_filename] is the name of the Slurm output file, e.g. ~/dardel_naiss2024-23-9999.sh

resulting in:

darsync gen \\\n  --local-dir ~/my_folder \\\n  --remote-dir /cfs/klemming/projects/nais2024-23-9999\n  --slurm-account uppmax2023-2-25 \\\n  --username svenan \\\n  --ssh-key ~/id_ed25519_pdc \\\n  --outfile ~/dardel_naiss2024-23-9999.sh\n

There are some more optional arguments, see these by doing:

darsync gen --help\n
How to find out my UPPMAX project ID?

The UPPMAX project ID is used in your Slurm scripts, with the -A flag.

Your UPPMAX project IDs can be found at https://supr.naiss.se/. UPPMAX projects for Rackham usually start with NAISS or UPPMAX and have '(UPPMAX)' after the project name.

Here is how to convert the UPPMAX project name to UPPMAX project ID:

UPPMAX project name UPPMAX project ID NAISS 2024/22-49 naiss2024-22-49 UPPMAX 2023/2-25 uppmax2023-2-25

An example https://supr.naiss.se/ page. Eligible candidates seem 'NAISS 2024/22-49' and 'UPPMAX 2023/2-25'.

How to find out my PDC username?

Login to https://supr.naiss.se/. and click on 'Accounts' in the main menu bar at the left.

If you see 'Dardel' among the resources, and status 'Enabled' in the same row, you have a PDC account. In the first column of such a row, you will see your username

An example of a user having an account at PDC's Dardel HPC cluster. In this case, the username is svenbi

How to find out where on Dardel I will transfer your data to?
  • Your home folder: /cfs/klemming/home/[first letter of username]/[username], where [first letter of username] is the first letter of your PDC username, and [username] is your PDC username, for example /cfs/klemming/home/s/sven
  • Your project folder: /cfs/klemming/projects/[project_storage], where [project_storage] is your PDC project storage folder, for example /cfs/klemming/projects/snic/naiss2023-22-1027

Composite image of a PDC project and its associated storage folder at the bottom. In this case, the full folder name is /cfs/klemming/projects/snic/naiss2023-22-10271

"},{"location":"cluster_guides/dardel_migration/#6-runsubmit-the-script-created-by-darsync","title":"6. Run/submit the script created by Darsync","text":"

You can then start the transfer script the same way you let bash run a script:

bash dardel_naiss2024-23-9999.sh\n

Replace nais2024-23-9999 with the name of the folder you told Darsync to transfer.

You terminal does needs to be running during the whole process. If you do need to log out, use sbatch as show below.

Shouldn't I use sbatch?

No.

Indeed, usually (and until September 17th) we recommend to use sbatch.

However, in this case, the login node has a bigger file transfer bandwidth compared to the compute nodes. Hence, now the advice is to run the script on the login node.

Wouldn't I get complaints?

No.

Normally, when you run CPU intensive tasks on a login node, we will either contact you or make your program use less CPU power.

In this case, however, the login node is the superior node for file transfer and we at UPPMAX agreed on allowing our users to run the transfer from it.

Will this run when I close the terminal?

No.

Normally, when you run CPU intensive tasks on a login node, we will either contact you or make your program use less CPU power.

In this case, however, the login node is the superior node for file transfer and we at UPPMAX agreed on allowing our users to run the transfer from it.

My transfer job stopped. Is progress lost? Can I restart it?

No progress is lost. Yes, you can restart it: rsync will continue transferring files that have not been transferred or have not been transferred completely.

If you want to start the job by submitting it to the job queue, use the following command:

sbatch ~/dardel_naiss2024-23-9999.sh\n

Replace nais2024-23-9999 with the name of the folder you told Darsync to transfer.

How does that look like?

Similar to this:

[sven@rackham1 ~]$ sbatch /home/sven/darsync_Documents.slurm\nSubmitted batch job 49021945 on cluster rackham\n
I get an error 'sbatch: error: Batch job submission failed'. What do I do?

It means that the script created for you has a mistake.

See Slurm troubleshooting for guidance on how to troubleshoot this.

How do I know this job has finished?

One way is to see if your job queue is empty:

[sven@rackham1 ~]$ squeue -u $USER\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n

Here, an empty job queue is shown. If the job is still running, you can find it in this list.

"},{"location":"cluster_guides/dardel_migration/#7-check-logs","title":"7. Check logs","text":"

Once the submitted job has finished, have a look at the log file produced by the job and make sure it did not end in a error message. Replace nais2024-23-9999 with the name of the folder you told Darsync to transfer.

tail ~/dardel_naiss2024-23-9999.out\ntail ~/dardel_naiss2024-23-9999.err\n
How does that look like?

If the job finished successfully, the output will look similar to this:

[sven@rackham1 ~]$ tail darsync_Documents.out\nsending incremental file list\n[sven@rackham1 ~]$ tail darsync_Documents.err\n[sven@rackham1 ~]$\n
I have the warning rsync: [generator] failed to set times on \"...\": Operation not permitted (1). Did something go wrong?

No.

Here is the full warning:

rsync: [generator] failed to set times on \"/cfs/klemming/projects/snic/my_project/.\": Operation not permitted (1)\n

This is a warning, indicating that the target folder on Dardel already exists. You can safely ignore it.

I have the warning rsync error: some files/attrs were not transferred. Did something go wrong?

No.

Here is the full warning:

rsync error: some files/attrs were not transferred (see previous errors) (code 23) at main.c(1179) [sender=3.1.2]\n

This is a warning, indicating that some file attributes were not transferred. An easy example is the file attribute for who is the file creator: this will differ between UPPMAX and PDC (the organisation that takes care of Dardel) because you have different usernames, for example svesv ('Sven Svensson') on UPPMAX and svensv on PDC. Hence, the file creator will differ between files.

"},{"location":"cluster_guides/dardel_migration/#8-optional-confirm-all-files-are-transferred","title":"8. (optional) confirm all files are transferred","text":"

If your Slurm log looks like below, all file transfers are finished.

[sven@rackham3 ~]$ bash darsync_cedi.slurm\nsending incremental file list\nrsync: [generator] failed to set times on \"/cfs/klemming/projects/snic/cedi/.\": Operation not permitted (1)\n

The tool that darsync uses (called rsync) inherently cares about file integrity: you can reasonably assume your files have been transferred. See the box below for details.

How can I reasonably assume my files are transferred?

rsync will only stop transferring data if all MD5 checksums between Rackham and Dardel match. An MD5 checksum is a way to sum up a file's content in one big number. If any bit in a file differs, this results in a different MD5 checksum. Hence, if the MD5 checksums match, you can reasonably assume the files are identical.

One way to double-check, is to see if the total file sizes between Rackham and Dardel match.

In https://supr.naiss.se, you can see the disk usage of your projects

How does that look like?

This looks like this, for an UPPMAX project:

A PDC project will look similar.

You can also use a command line tool, uquota, to see your project's disk usage on Rackham.

How does that look like?

This looks like this, for an UPPMAX project:

[sven@rackham1 ~]$ uquota\nYour project       Your File Area           Unit        Usage  Quota Limit  Over Quota\n-----------------  -----------------------  -------  --------  -----------  ----------\nhome               /home/sven               GiB          17.6           32            \nhome               /home/sven               files      112808       300000            \nnaiss2024-22-1202  /proj/r-py-jl-m-rackham  GiB           6.1          128            \nnaiss2024-22-1202  /proj/r-py-jl-m-rackham  files       52030       100000            \nnaiss2024-22-1442  /proj/hpc-python-fall    GiB           0.0          128            \nnaiss2024-22-1442  /proj/hpc-python-fall    files           4       100000            \nnaiss2024-22-49    /proj/introtouppmax      GiB           5.1          128            \nnaiss2024-22-49    /proj/introtouppmax      files       20290       100000            \n

For PDC, read their documentation here: you will need to search for 'Klemming data management'.

"},{"location":"cluster_guides/dardel_migration/#9-delete-the-ssh-key","title":"9. Delete the SSH key","text":"

After the migration, these temporary SSH keys can and should be deleted:

rm ~/id_ed25519_pdc*\n
How does this look like?

You screen will show something similar to this:

[sven@rackham1 ~]$ rm ~/id_ed25519_pdc*\n[sven@rackham1 ~]$\n
"},{"location":"cluster_guides/dardel_migration/#10-delete-the-files-on-rackham","title":"10. Delete the files on Rackham","text":"

Now that the files are transferred to Dardel, you can delete the files on Rackham that you've just transferred to Dardel.

How does that look like?

If you transferred one folder, for example, Documents, here is how to delete it and how that looks like:

[sven@rackham1 ~]$ rm -rf Documents/\n[sven@rackham1 ~]$\n

The rm command (rm is short for 'remove') cannot be undone. Luckily, your files are on Dardel already :-)

"},{"location":"cluster_guides/dardel_migration/#questions","title":"Questions","text":"How long does the transfer take?

Estimates range from 23 to 360 to gigabyte per hour. This excludes the extremes of 7 and 3600 gigabyte per hour.

However, for large numbers of small files the metric files/seconds would be better, yet requires a benchmark.

"},{"location":"cluster_guides/dardel_migration/#t-troubleshooting","title":"T. Troubleshooting","text":""},{"location":"cluster_guides/dardel_migration/#t1-ssh-connect-to-host-dardelpdckthse-port-22-no-route-to-host","title":"T1. ssh: connect to host dardel.pdc.kth.se port 22: No route to host","text":""},{"location":"cluster_guides/dardel_migration/#t1-full-error-message","title":"T1. Full error message","text":"
[sven@rackham1 ~]$ bash /domus/h1/sven/dardel_transfer_script.sh\nssh: connect to host dardel.pdc.kth.se port 22: No route to host\nrsync: connection unexpectedly closed (0 bytes received so far) [sender]\nrsync error: unexplained error (code 255) at io.c(226) [sender=3.1.2]\n
"},{"location":"cluster_guides/dardel_migration/#t1-likely-cause","title":"T1. Likely cause","text":"

This probably means that Dardel is down, likely due to maintenance.

"},{"location":"cluster_guides/dardel_migration/#t1-solution","title":"T1. Solution","text":"

You can do nothing, except wait until Dardel is up again.

You may check the PDC news at https://www.pdc.kth.se/about/pdc-news to confirm that there is indeed a problem with Dardel.

"},{"location":"cluster_guides/dardel_migration/#t2-rsync-generator-failed-to-set-times-on-cfsklemmingprojectssnicnaiss2024-23-352-operation-not-permitted-1","title":"T2. rsync: [generator] failed to set times on \"/cfs/klemming/projects/snic/naiss2024-23-352/.\": Operation not permitted (1)","text":""},{"location":"cluster_guides/dardel_migration/#t2-full-error-message","title":"T2. Full error message","text":"
$ bash darsync_my_folder.slurm\nsending incremental file list\nrsync: [generator] failed to set times on \"/cfs/klemming/projects/snic/naiss2024-23-352/.\": Operation not permitted (1)\n

after which the script keeps running.

For UPPMAX staff

An example can be found at https://github.com/UPPMAX/ticket_296149.

"},{"location":"cluster_guides/dardel_migration/#t2-hypothesized-cause","title":"T2. Hypothesized cause","text":"

This darsync script is running for the second (or more) time, hence it has already created the target folders on Dardel. This hypothesis is backed by this Stack Overflow post where it is suggested to delete the folders; in this case: the target folders on Dardel.

"},{"location":"cluster_guides/dardel_migration/#t2-solution","title":"T2. Solution","text":"

On Dardel, delete the target folders that are already there and re-run the script.

"},{"location":"cluster_guides/dardel_migration/#t3-permission-denied-publickeygssapi-keyexgssapi-with-mic","title":"T3. Permission denied (publickey,gssapi-keyex,gssapi-with-mic)","text":""},{"location":"cluster_guides/dardel_migration/#t3-full-error-message","title":"T3. Full error message","text":"
[sven@rackham1 .ssh]$ bash /home/sven/darsync_my_script.slurm\nPermission denied (publickey,gssapi-keyex,gssapi-with-mic).\n\nrsync: connection unexpectedly closed (0 bytes received so far) [sender]\n\nrsync error: unexplained error (code 255) at io.c(226) [sender=3.1.2]\n

Note that our fictional user runs the Slurm script via bash, instead of via squeue.

"},{"location":"cluster_guides/dardel_migration/#t3-first-possible-fix","title":"T3. First possible fix","text":"

Run the script as such:

sbatch /home/sven/darsync_my_script.slurm\n
"},{"location":"cluster_guides/dardel_migration/#t3-second-possible-fix","title":"T3. Second possible fix","text":"

Another possible fix comes from StackOverflow:

Setting 700 to .ssh and 600 to authorized_keys solved the issue.

chmod 700 /root/.ssh\nchmod 600 /root/.ssh/authorized_keys\n

Hence, try:

chmod 700 ~/.ssh\nchmod 600 ~/.ssh/authorized_keys\n

Still does not work? Contact support

"},{"location":"cluster_guides/disk_quota_more/","title":"How can I display my disk quota?","text":"

To limit the amount of disk space each user can allocate we use a disk quota system at UPPMAX. The default disk quota is 32 GByte in your home directory. Every SNIC-project also comes with a default 128 GByte backed-up project storage. If more data is needed you may apply for an UPPMAX Storage Project and get more quota. UPPNEX project have a default 512 GByte backed-up project storage and a and 512 GB nobackup space.

You can display your current usage with the command uquota.

When you exceed your quota, the system will not let you write any more data and you have to either remove some files or request more quota. The 'uquota' command will also show the date and to what limit your quota will change to, if you have been given a larger quota.

Before contacting support, clean out unnecessary data and make an inventory of the data in your project (what type of data, how big, why it's needed). Here are two commands:

du -b $PWD | sort -rn | awk 'NR==1 {ALL=$1} {print int($1*100/ALL) \"% \" $0}'\n

This first command results in a list of subdirectories ordered by size and proportion of total size.

find $PWD -print0 -type f | xargs -0 stat -c \"%s %n\" | sort -rn\n

This second command produces a list of the files in the current directory that take up most space. These may take a long time to complete, use CTRL + C to cancel execution if you change your mind.

After these two checks, to get more disk space, contact support and state how much, for how long time, and why you need it. See the storage project application page for more information on how we handle and prioritise storage requests.

You should also read the Disk Storage Guide.

"},{"location":"cluster_guides/disk_storage_guide/","title":"Disk storage guide","text":""},{"location":"cluster_guides/disk_storage_guide/#quota","title":"Quota","text":"

Users have access to shared network storage on various cluster file systems. This mean that whenever you are logged in to a login server or if you are running on a compute node you will have the same view of the storage.

There are several different classes of disk storage available with different policies for usage, limits and backup:

  • The user home file system
  • Local scratch file systems
  • The network project and nobackup file system
  • Temporary virtual filesystem

Users have access to shared network storage on various cluster file systems, and backup home directories and some project storage to tape.

"},{"location":"cluster_guides/disk_storage_guide/#how-much-of-my-quota-do-i-use","title":"How much of my quota do I use?","text":"

Use uquotato check current disk usage and limits.

How does that look like?

Your output will look similar to this:

[sven@rackham3 ~]$ uquota\nYour project       Your File Area           Unit        Usage  Quota Limit  Over Quota\n-----------------  -----------------------  -------  --------  -----------  ----------\nhome               /home/sven             GiB          16.6           32            \nhome               /home/sven             files      104165       300000            \nnaiss2024-22-1202  /proj/r-py-jl-m-rackham  GiB           0.0          128            \nnaiss2024-22-1202  /proj/r-py-jl-m-rackham  files           4       100000            \nnaiss2024-22-49    /proj/introtouppmax      GiB           5.1          128            \nnaiss2024-22-49    /proj/introtouppmax      files       20290       100000            \nstaff              /proj/staff              GiB       66064.8       102400            \nstaff              /proj/staff              files    21325500     15000000           *\n
"},{"location":"cluster_guides/disk_storage_guide/#i-use-more-quota-than-i-think-how-do-i-find-out-the-cause","title":"I use more quota than I think. How do I find out the cause?","text":"

To find out which folder uses most storage, run the following command to find the 20 folders that take up most storage space:

du -b $PWD | sort -rn | awk 'NR==1 {ALL=$1} {print int($1*100/ALL) \"% \" $0}' | head -n 20\n
How does that look like?

Your output looks similar to this:

[sven@rackham3 ~]$ du -b $PWD | sort -rn | awk 'NR==1 {ALL=$1} {print int($1*100/ALL) \"% \" $0}' | head -n 20\n100% 17643266006 /home/sven\n50% 8984271436 /home/sven/.cache\n45% 8016778981 /home/sven/.cache/pip\n39% 6988369390 /home/sven/.cache/pip/http\n28% 4986824453 /home/sven/.local\n28% 4986117855 /home/sven/.local/lib\n27% 4816372185 /home/sven/.local/lib/python3.8\n27% 4816368089 /home/sven/.local/lib/python3.8/site-packages\n15% 2797022871 /home/sven/.local/lib/python3.8/site-packages/nvidia\n10% 1876238645 /home/sven/.cache/pip/http/3\n9% 1648194862 /home/sven/.local/lib/python3.8/site-packages/torch\n9% 1589833684 /home/sven/users\n8% 1569946463 /home/sven/users/fares\n8% 1553069908 /home/sven/.local/lib/python3.8/site-packages/torch/lib\n8% 1431151816 /home/sven/.cache/pip/http/0\n7% 1411093224 /home/sven/.cache/pip/http/3/c\n5% 1023338615 /home/sven/.local/lib/python3.8/site-packages/nvidia/cudnn\n5% 1022966263 /home/sven/.local/lib/python3.8/site-packages/nvidia/cudnn/lib\n5% 983932032 /home/sven/.cache/pip/http-v2\n5% 983390581 /home/sven/.cache/pip/http/9\n

To find out which files uses most storage, run the following command to find the 20 files that take up most storage space:

find $PWD -print0 -type f | xargs -0 stat -c \"%s %n\" | sort -rn\n
How does that look like?

Your output looks similar to this:

[sven@rackham3 ~]$ find $PWD -print0 -type f | xargs -0 stat -c \"%s %n\" | sort -rn | head -n 20\n1546936200 /home/sven/users/anna/H10_Avian_1650_2000_HA_alignment.trees\n902414441 /home/sven/.local/lib/python3.8/site-packages/torch/lib/libtorch_cuda.so\n797076603 /home/sven/.cache/pip/http/0/c/d/a/3/0cda36001dc173401b525a7e434e8b7f1079d34f31141b688325244b\n755535721 /home/sven/.cache/pip/http/9/b/8/7/5/9b875d1148ce95ad551df724a540378d1dc8158fa59145beb2ec4125\n731727087 /home/sven/.cache/pip/http/3/c/e/f/9/3cef90e2f33f3b9a1b50e02cc0736e09cc97714cb8b1101d706d912d\n664753951 /home/sven/.cache/pip/http/3/c/8/2/7/3c827aae7500e30cec6930647f8971adb3eafb1cd65a44fcf02ba940\n589831274 /home/sven/.cache/pip/http-v2/9/4/c/e/7/94ce755eb45386ac0cd2115e71a8162388f908eac28abff6118b7e7a.body\n569645536 /home/sven/.local/lib/python3.8/site-packages/nvidia/cudnn/lib/libcudnn_engines_precompiled.so.9\n515090264 /home/sven/.local/lib/python3.8/site-packages/nvidia/cublas/lib/libcublasLt.so.12\n497648053 /home/sven/.cache/pip/http/0/e/3/7/9/0e379b2d265d90194ab62c0f7704318e349017777b755c72c955e025\n497624428 /home/sven/.cache/pip/http/4/b/7/9/b/4b79bbc6cc88163d2cba55b1492741f457013fc2c14b26bdd398a0a3\n495148366 /home/sven/.cache/pip/http/e/7/c/6/1/e7c618a0177b1a48a4599a6785fda5ffd4946442a77e875b970fdfee\n492151297 /home/sven/.local/lib/python3.8/site-packages/torch/lib/libtorch_cpu.so\n468354983 /home/sven/.cache/huggingface/hub/models--zhihan1996--DNABERT-2-117M/blobs/7ff39ec77a484dd01070a41bfd6e95cdd7247bec80fe357ab43a4be33687aeba\n410595986 /home/sven/.cache/pip/http/3/9/a/e/6/39ae6aa825aebb75b0193714975cbc9defffa90203c5342f2214137e\n264876688 /home/sven/.local/lib/python3.8/site-packages/nvidia/cusparse/lib/libcusparse.so.12\n240706416 /home/sven/.local/lib/python3.8/site-packages/nvidia/cudnn/lib/libcudnn_adv.so.9\n232685936 /home/sven/.local/lib/python3.8/site-packages/nvidia/nccl/lib/libnccl.so.2\n209353474 /home/sven/.cache/pip/http/d/4/c/3/e/d4c3ec899ac2836a7f89ffad88e243bf35b92f56ff0b61ad0f5badf5\n195959494 /home/sven/.cache/pip/http/6/e/f/7/a/6ef7ae373253a3997ffc8ac7b70e67716f79d6365ffa5c28f40f349a\n
"},{"location":"cluster_guides/disk_storage_guide/#if-you-need-more-quota","title":"If you need more quota","text":"

If more quota is needed, contact support for advice. We do not extend quotas for home directories or SNIC project directories, but it's possible to apply for storage projects.

Before contacting support, clean out unnecessary data and make an inventory of the data in your project (what type of data, how big, why it's needed). Here are two commands:

du -b $PWD | sort -rn | awk 'NR==1 {ALL=$1} {print int($1*100/ALL) \"% \" $0}'\n

This first command results in a list of subdirectories ordered by size and proportion of total size.

find $PWD -print0 -type f | xargs -0 stat -c \"%s %n\" | sort -rn\n

This second command produces a list of the files in the current directory that take up most space. These may take a long time to complete, use CTRL + C to cancel execution if you change your mind.

"},{"location":"cluster_guides/disk_storage_guide/#if-you-need-even-more-quota-for-archiving","title":"If you need even more quota for archiving","text":"

Please contact support.

We have a previously been able to provide users with a low-cost moderate performant storage solution for a cost of 500SEK/TB/year, for a commitment of four years and 50TB.

"},{"location":"cluster_guides/disk_storage_guide/#environmental-variables","title":"Environmental variables","text":"

We have defines several environment variables to help our users. They are:

  • $HOME (or $SNIC_BACKUP) is a traditional one, pointing to the users home directory.
  • $TMPDIR or ($SNIC_TMP) points to node-local storage, suitable for temporary files that can be deleted when the job finishes
  • $SNIC_NOBACKUP points to an UPPMAX-wide storage suitable for temporary files (not deleted when the job is finished)
"},{"location":"cluster_guides/disk_storage_guide/#types-of-storage","title":"Types of storage","text":""},{"location":"cluster_guides/disk_storage_guide/#user-home-directories","title":"User Home directories","text":"

Paths: $HOME or $SNIC_BACKUP

Permanent storage of users files during the lifetime of the accounts. Shared access on all cluster nodes. Snapshots are normally enabled on this file system, and you can access the snapshots in every directory by 'ls .snapshot' or 'cd .snapshot'. The quota is 32GB per user. We provide backup of this volume, and we keep the files on tape up to 90 days after they are deleted from disk. If you have files you do not want to back up place them in a folder called 'nobackup'.

We recommend you do not use your home directory for running jobs. Our general recommendation is to keep everything related to a specific research project in its project directory.

"},{"location":"cluster_guides/disk_storage_guide/#local-scratch","title":"Local Scratch","text":"

Paths: $TMPDIR or $SNIC_TMP

Each node has a /scratch volume for local access providing the most efficient disk storage for temporary files. Users have read/write access to this file system. Slurm defines the environment variable TMPDIR which you may use in job scripts. On clusters with Slurm you may use /scratch/$SLURM_JOB_ID. This area is for local access only, and is not directly reachable from other nodes or from the front node. There is no backup of the data and the lifetime of any file created is limited to the current user session or batch job. Files are automatically erased when space is needed by other users.

"},{"location":"cluster_guides/disk_storage_guide/#projects-global-network-storage","title":"Projects global (network) storage","text":"

Paths: /proj/[proj-id]

The project global storage is permanent storage of project's files during the lifetime of the project. Disk quota on this volume is shared by all project members. Default quota allocation is determined by your project type.

Note that the quota system on crex is built on group ownership for files/directories. This means that moving files between project directories does not directly affect quota. We have scripts and other tricks that tries to ensure the correct group is always used, but in general this may lag quite some time - it takes a while to go through everything, especially since we don't want to affect performance. To make sure quota information is correct, you can change the group to the correct one after moving directories:

chgrp -R PROJECT_YOU_MOVED_TO PATH_OF_THE_MOVED_DIRECTORY

if you don't do this, it will still be fixed, but it may take a while.

The files are backed up to tape and we keep the files for 30 days after they are deleted from disk. In the project folder you should keep all your raw data and important scripts.

On Bianca and in SLLStore and UppStore projects, all temporary files, and files that can be regenerated (e.g.. data created from your computations), should be moved to the nobackup folder.

More information about backup at UPPMAX.

"},{"location":"cluster_guides/disk_storage_guide/#temporary-virtual-filesystem","title":"Temporary virtual filesystem","text":"

Paths: /dev/shm/[job-id]

On all our clusters we have a temporary virtual filesystem implemented as a shared memory area. I.e. it uses primarily the RAM for storage (until it eventually might have to swap out to physical disk), and can be accessed via the path /dev/shm/[job-id].

In some situations this \"disk\" area can be quicker to read/write to, but depending on the circumstances it can also be slower than local scratch disk. Also note that it is a shared resource among all running jobs on a specific node, so depending on the node and how much memory your job has been allocated, the amount of data you can write will vary.

"},{"location":"cluster_guides/extend_duration_of_running_job/","title":"How to extend the duration of a job that is running?","text":"

There are many UPPMAX cluster frequently asked questions. This page describes how to extend a job that has been running for a long time and is near its duration limit.

","tags":["FAQ","job","longer","extend","duration","time"]},{"location":"cluster_guides/extend_duration_of_running_job/#casus","title":"Casus","text":"

You need to do a hard calculation. It is expected to take a log time, hence you set the time limit to the maximum time limit of 10 days.

At the 8th day, you see that the job is completed for only 75%, hence you need a bit more extra days.

How to extend the duration of your job?

","tags":["FAQ","job","longer","extend","duration","time"]},{"location":"cluster_guides/extend_duration_of_running_job/#solution","title":"Solution","text":"

Contact support

","tags":["FAQ","job","longer","extend","duration","time"]},{"location":"cluster_guides/file_transfer/","title":"File transfer","text":"

File transfer is the process of getting files from one place to the other.

  • File transfer to/from Bianca
  • File transfer to/from Dardel
  • File transfer to/from Rackham
  • File transfer to/from Transit
"},{"location":"cluster_guides/files/","title":"Files on UPPMAX","text":""},{"location":"cluster_guides/files/#disk-storage-guide","title":"Disk storage guide","text":"

See the UPPMAX disk storage guide.

"},{"location":"cluster_guides/files/#where-are-my-files-or-what-are-the-different-file-systems","title":"Where are my files? (Or, what are the different file systems?)","text":"

You have access to the same home directory regardless of what cluster you have logged into. Here you store your private files.

All projects also have a central storage area under the /proj/[project id]/ directory path, i.e. when you first login to UPPMAX, you will see your home directory, so you will have to change to the project directory if you want to transfer project data files.

Also note that UPPMAX uses different disk quotas on your home directory and other areas you have access to (like the project folder). Use uquota to see who much disk space you use.

"},{"location":"cluster_guides/files/#your-private-files","title":"Your private files","text":"

When you log in to UPPMAX for the first time you only have the following files created by the system:

$ ls -la\ntotal 68\ndrwxr-x---  7 user     uppmax 4096 Jun  2 23:11 .\ndrwxr-xr-x 19 root     root      0 Jun  9 13:16 ..\n-rw-r--r--  1 user     uppmax   24 Jan  9  2008 .bash_logout\n-rw-r--r--  1 user     uppmax  435 Apr 21  2008 .bash_profile\n-rw-r--r--  1 user     uppmax  446 Jan  9  2008 .bashrc\ndrwxr-xr-x  2 user     uppmax 4096 Jan  9  2008 bin\n-rw-r--r--  1 user     uppmax  385 Jan  9  2008 .cshrc\n-rw-r--r--  1 user     uppmax  237 Jan  9  2008 .emacs\ndrwxrwxrwx  1 user     uppmax   14 Jun  2 11:05 glob\n-rw-r--r--  1 user     uppmax  120 Jan  9  2008 .gtkrc\n-rw-r--r--  1 user     uppmax  279 Apr 21  2008 .login\ndrwx--S---  2 user     uppmax 4096 May  2  2008 private\n-rw-r--r--  1 user     uppmax  307 Apr 21  2008 .profile\n-rw-r--r--  1 user     uppmax  220 Jan  9  2008 .zshrc\n

The files starting with a . (i.e. a dot or period) are hidden files. These are commonly startup scripts or configuration files.

The default permission of your home directory is 750, i.e. you can do everything, people belonging to the same group can read and execute your files and other people can not do anything.

Also note the private sub-folder: here you can put files that you want only you, and no one else, to be able to access. Each day we have a job that ensures that all users private folders still can't be accessed by anyone else, even if the permissions somehow accidentally would change.

"},{"location":"cluster_guides/files/#creating-and-editing-files","title":"Creating and editing files","text":"

Creating and editing files is taught:

  • UPPMAX intro day 1: use the remote desktop environment
  • UPPMAX intro day 1: use the terminal
"},{"location":"cluster_guides/get_data_from_an_expired_project/","title":"How to get data from an expired project?","text":"

There are many UPPMAX cluster frequently asked questions. This page describes how to get the data from an expired project.

","tags":["FAQ","data","expired","project","NAISS","UPPMAX"]},{"location":"cluster_guides/get_data_from_an_expired_project/#casus","title":"Casus","text":"

You've been working on a project for some time, published your results, and the project expires.

Later you realize that there is still some data you need from that project.

How to get the data from an expired project?

","tags":["FAQ","data","expired","project","NAISS","UPPMAX"]},{"location":"cluster_guides/get_data_from_an_expired_project/#solution","title":"Solution","text":"

Contact support

","tags":["FAQ","data","expired","project","NAISS","UPPMAX"]},{"location":"cluster_guides/gorilla/","title":"Gorilla","text":"

Gorilla is a future storage system, using the ceph file system.

"},{"location":"cluster_guides/interactive_more/","title":"How to run interactively on a compute node?","text":"

You may want to run an interactive application on one or several compute nodes. You may want to use one or several compute nodes as a development workbench, interactively. How can this be arranged? The program interactive may be what you are looking for.

The best way to use the command is usually to add as few parameters as possible, because the interactive command tries to find an optimal solution to give you a high queue priority and thus a quick job start. If you have a clear idea about what parameters you need, please specify them, otherwise it might be a good idea to first see what you get with fewer parameters.

The one parameter you must always specify is the project name. Let's assume for this article that your project name is p2010099.

To get one compute core with the proportional amount of RAM, we recommend you to use the most simple command on the login node for the cluster you want to use:

interactive -A p2010099\n

If you need more than one core, or special features on your node, you can specify that to the interactive command, e.g. on milou:

interactive -A p2010099 -n 16 -C fat\n

as if it was an sbatch command. Actually, interactive is implemented partly as an sbatch command and you can use most sbatch flags here. Please note that only a few nodes are fat, so you may have to wait for quite a long time to get your session started.

There are three ways to get a priority boost, and the interactive command knows how to use them all:

Internally using the sbatch flag \"--qos=interact\", that allows a single-node job with a timelimit of up to 12 hours. (Please note that you are not allowed to keep more than one \"--qos=interact\" jobs in the batch system simultaneously, and please note that you can not use this \"priority lane\" when you have oversubscribed your 30 days running core hour allocation.) Internally using the special devel partition, that allows the job to use 1-4 nodes, with a timelimit of up to one hour. (Please note that you are not allowed to keep more than one \"devel\" job in the batch system simultaneously, regardless if they are running or merely queued.) Internally using the sbatch flag \"--qos=short\", that allows the job to use 1-4 nodes, with a timelimit of up to 15 minutes. (Please note that your are not allowed to keep more than two \"short\" jobs in the batch system simultaneously.) If you do not specify any timelimit, the interactive command will give you the maximum timelimit allowed, according to the rules for priority boosts.

In the last example (\"interactive -A p2010099 -n 16 -C fat\"), the interactive command can not use \"priority lane\" 1 above, because it uses more than one node (one node contains eight cores, two nodes contain a total of sixteen cores), and it can not use \"priority lane\" 2 above, because the special devel partition contains no fat nodes, so the interactive command tries to give you a high-priority 15-minute job.

If you also want to run for 15 hours, you may say so, with the command

interactive -A p2010099 -n 16 -C fat -t 15:00:00\n

but no \"priority lane\" can be used, you get your normal queue priority, and you might have to wait for a very long time for your session to start. Please note that you need to keep watch over when the job starts, because you are accounted for all the time from job start even if you are sleeping, and because an allocated and unused node is a waste of expensive resources.

NB. You can not launch an interactive job from an other cluster with the flag -M, which otherwise is a common flag to other Slurm commands. You must launch it from a login node to the cluster you want to use.

"},{"location":"cluster_guides/login_node/","title":"Login node","text":"

A login node is the computer where you arrive after logging in to an UPPMAX HPC cluster.

How does that look like?

Here is how it looks like to be on a login node:

A user on a login node, in this case on a Rackham login node called rackham4. The user used an SSH client and is in a console environment.

A user on a login node, in this case on the Bianca login node of his/her virtual cluster. This user logged in to the Bianca remote desktop via the website

A login node is a shared resource. With this diagram you can determine if you are alone on a login node:

flowchart TD\n  question[Are you alone on the login node?]\n  which_cluster[Which cluster?]\n  alone_in_project[Are you alone in this project?]\n  no[No: you share the login node with others]\n  yes[Yes: you have the login node for yourself]\n  question --> which_cluster\n  which_cluster --> |Rackham| no\n  which_cluster --> |Bianca| alone_in_project\n  alone_in_project --> |yes| yes\n  alone_in_project --> |no| no

Decision tree to determine if you are alone on a login node

Because usually you share a login node with others, this is the rule how to behave on a login node:

Only do short and light things on the login node

Examples of short and light things are:

  • Editing files
  • Copying, deleting, moving files
  • Scheduling jobs
  • Starting an interactive session

Examples of heavy things are:

  • Running code with big calculations, use the job scheduler instead
  • Develop code with big calculations line-by-line, use an interactive node instead
flowchart TD\n    UPPMAX(What to run on which node?)\n    operation_type[What type of operation/calculation?]\n    interaction_type[What type of interaction?]\n    login_node(Work on login node)\n    interactive_node(Work on interactive node)\n    calculation_node(Schedule for calculation node)\n\n    UPPMAX-->operation_type\n    operation_type-->|light,short|login_node\n    operation_type-->|heavy,long|interaction_type\n    interaction_type-->|Direct|interactive_node\n    interaction_type-->|Indirect|calculation_node

Decision tree to determine which type of node you should probably work on

I work alone on a Bianca project. Can I use the login node for heavy things?

Yes!

Or, to be more precise: yes, if the login node is powerful enough for your calculations.

For example, when using RStudio on Bianca it is recommended to use at least two cores (and login node has 2 cores only).

So, if you can, use the login node. If you need more resources, either use the job scheduler or use an interactive node with more nodes than the login node has.

","tags":["login node","node","login"]},{"location":"cluster_guides/login_node_restrictions/","title":"Login node restrictions","text":""},{"location":"cluster_guides/login_transit/","title":"Log in to Transit","text":"

Below is a step-by-step procedure to login to Transit.

Enjoy a video?

See how to log in to Transit as a video.

"},{"location":"cluster_guides/login_transit/#1-get-within-sunet","title":"1. Get within SUNET","text":"

Get inside the university networks.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

"},{"location":"cluster_guides/login_transit/#2-use-ssh-to-login","title":"2. Use SSH to login","text":"

On your local computer, start a terminal and use ssh to login to Transit:

ssh [username]@transit.uppmax.uu.se\n

where [username] is your UPPMAX username, for example:

ssh sven@transit.uppmax.uu.se\n

If you haven't setup using SSH keys, you will be asked for your UPPMAX password.

If this is your first time on Transit, you will be asked for adding it to your list of known hosts. Type yes.

How does that look like?

This is how it looks like when you are asked for adding Transit to your list of known hosts.

You are now logged in to Transit!

How does that look like?
sven@sven-N141CU:~/GitHubs/UPPMAX-documentation/docs/cluster_guides$ ssh sven@transit.uppmax.uu.se\nsven@transit.uppmax.uu.se's password: \nLast login: Tue May 14 07:32:22 2024 from vpnpool188-185.anst.uu.se\n\nTransit server\n\nYou can mount bianca wharf with the command\n\nmount_wharf PROJECT [path]\n\nIf you do not give a path the mount will show up as PROJECT in your home\ndirectory.\n\nNote; any chagnes you do to your normal home directory will not persist.\n
"},{"location":"cluster_guides/maja/","title":"Maja","text":"

Maja is an upcoming UPPMAX cluster for sensitive data, taking over from Bianca.

","tags":["Maja","cluster","sensitive data"]},{"location":"cluster_guides/maja/#features-of-maja-compared-to-bianca","title":"Features of Maja, compared to Bianca","text":"

Although we from UPPMAX cannot say exactly how Maja will look, we try to keep her similar to Bianca.

","tags":["Maja","cluster","sensitive data"]},{"location":"cluster_guides/maja/#migration-from-bianca-to-maja","title":"Migration from Bianca to Maja","text":"

As both clusters are UPPMAX clusters, we are probably able to transfer your data from Bianca to Maja.

","tags":["Maja","cluster","sensitive data"]},{"location":"cluster_guides/miarka/","title":"Miarka","text":"

Miarka is a SciLifeLab cluster.

"},{"location":"cluster_guides/module_conflicts/","title":"How can I resolve problems with conflicting modules?","text":"

Sometimes you may experience conflicting modules. An example would be that your program finds an incorrect library. This can be caused by two or more modules providing libraries with the same name.

Since there are a lot of different modules installed at UPPMAX, we have no possibility to test the compatibility of all the modules.

If you get error messages that you think might be because of conflicting modules, you can do the following:

Check what modules you have loaded:

module list\n

If you want to remove one module:

module unload modulename\n

If you want to remove ALL modules:

module purge\n

Then start to load the modules you need, one by one:

module load modulename\n

Until you can run your program without errors.

UPPMAX recommends that you only load as many modules as you need for each program, to minimize the risk of having conflicting modules.

","tags":["module","conflict","conflicts","solve","resolve","fix"]},{"location":"cluster_guides/modules/","title":"Software modules","text":"

Here we show how to use the environment module system.

After describing the background/reasoning why such a system is needed, we show how to work with the module system.

There is a table of commonly used shorthand syntaxes, as well as links to almost all installed software and databases on UPPMAX.

","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#background","title":"Background","text":"

The UPPPMAX clusters are shared Linux computers with all the standard Linux tools installed, on which all users should be able to do their work independently and undisturbed.

To ensure this, users cannot modify, upgrade or uninstall software themselves and instead an environment module system (from now on: 'module system') is used. This allow users to independently use their favorite versions of their favorite software.

Using a module system keeps installed software hidden by default, and users have to explicitly tell their terminal which version of which software they need.

To have new software installed on an UPPMAX cluster, users must explicitly request a version of a piece of software. As of today, there are nearly 800+ programs and packages, with multiple versions available on all UPPMAX clusters. Using explicit versions of software is easy to do and improves the reproducibility of the scripts written.

To preserve hard disk space, Bianca also has multiple big databases installed.

Warning

To access bioinformatics tools, load the bioinfo-tools module first.

","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#working-with-the-module-system","title":"Working with the module system","text":"

Info

Command Description module load bioinfo-tools Load this module first, to find others module spider Search for a module module spider [module] Get info about a module, e.g. module spider cowsay module avail Search for a module that is available module list List all activated modules module load [module] Load a module, e.g. module load cowsay module load [module]/[version] Load a module of a specific versions, e.g. module load cowsay/3.03 module help Show the help for a module module unload [module] Unload the module [module], e.g. module unload cowsay What is the difference between module spider and module avail?
  • module spider: search for a module, also those that are not available (yet)
  • module avail: search for a module that is available

As an example, use the samtools module, which will always be found by module spider samtools, but will only be found by module avail after a `module load bioinfo-tools\"

Working with the module system means:

  • searching for a module
  • activating ('loading') a module
  • deactivate ('unloading') a module

This section describes these steps in more details.

The module command is the basic interface to the module system.

To search for a module, use module spider [module], for example module spider cowsay.

Would you like to see a video instead?

Watch the video that shows the use of modules on Bianca here

What is cowsay?

See the UPPMAX page on cowsay

What is R?

R is the module for the R programming language. R is a free and open-source programming language, commonly used in data analysis and visualization.

How does the output of module spider R look like?
$ module spider R\n\n-------------------------------------------\n  R:\n-------------------------------------------\n     Versions:\n        R/3.0.2\n        R/3.2.3\n        R/3.3.2\n        R/3.4.0\n        R/3.4.3\n        R/3.5.0\n        R/3.5.2\n        R/3.6.0\n        R/3.6.1\n        R/4.0.0\n        R/4.0.4\n        R/4.1.1\n        R/4.2.1\n     Other possible modules matches:\n        454-dataprocessing  ADMIXTURE  ANTLR  ARCS  ARC_assembler  ARPACK-NG  ART  AdapterRemoval  AlienTrimmer  Amber  AnchorWave  Arlequin  Armadillo  ArrowGrid  Bamsurgeon  BclConverter  BioBakery  BioBakery_data  ...\n\n-------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*R.*'\n\n-------------------------------------------\n  For detailed information about a specific \"R\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider R/4.2.1\n-------------------------------------------\n
What is samtools?

samtools is the module for SAMtools. From wikipedia:

SAMtools is a set of utilities for interacting with and post-processing short DNA sequence read alignments in the SAM (Sequence Alignment/Map), BAM (Binary Alignment/Map) and CRAM formats

How does the output of module spider samtools look like?
$ module spider samtools\n\n-------------------------------------------\n  samtools:\n-------------------------------------------\n     Versions:\n        samtools/0.1.12-10\n        samtools/0.1.19\n        samtools/1.1\n        samtools/1.2\n        samtools/1.3\n        samtools/1.4\n        samtools/1.5_debug\n        samtools/1.5\n        samtools/1.6\n        samtools/1.8\n        samtools/1.9\n        samtools/1.10\n        samtools/1.12\n        samtools/1.14\n        samtools/1.16\n        samtools/1.17\n     Other possible modules matches:\n        SAMtools\n\n-------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*samtools.*'\n\n-------------------------------------------\n  For detailed information about a specific \"samtools\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider samtools/1.17\n-------------------------------------------\n
How does the output of module spider samtools/1.17 look like?
$ module spider samtools/1.17\n\n-------------------------------------------\n  samtools: samtools/1.17\n-------------------------------------------\n\n    You will need to load all module(s) on any one of the lines below before the \"samtools/1.17\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n        samtools - use samtools 1.17\n\n        Version 1.17\n

This reminds us that we need to load the bioinfo-tools module to be able to load samtools/1.17. Again, this is required (just once) before loading bioinformatics software.

If there is an exact match, that module is reported first. Of the module shown, also the different versions are reported.

Do module load bioinfo-tools first

When working with modules, do module load bioinfo-tools first

What to do when you cannot find a module

Run module load bioinfo-tools.

This will allow other modules to be found.

What to do when module load gives an 'Lmod has detected the following error: These module(s) or extension(s) exist but cannot be loaded as requested' error?

Ouch, now it is time to try out many things.

Do not hesitate to contact support so that you can spend time on your research and we figure this out :-)

To load a module, use module load [module], for example module load cowsay. This will load the default version of that module, which is almost always the latest version. Loading a module always results in a helpful message (such as that it worked fine), however, it is not general help for using the tool itself.

How can I see which modules I've loaded?

Use the command module list.

How does the output of module list look like?
$ module list\n\nCurrently Loaded Modules:\n  1) uppmax   2) bioinfo-tools   3) samtools/1.17\n

In this example case, we can see that the modules bioinfo-tools and samtools version 1.17 are loaded.

Getting help on a module

Run module help [module], e.g. module help cowsay to get the general help on a module

For reproducible research, however, it is good practice to load a specific version. The information given by module spider contains the versions of the module. For example, to load the samtools/1.17 module, do module load samtools/1.17.

How does the output of module load GATK/4.3.0.0 look like?
$ module load GATK/4.3.0.0\nNote that all versions of GATK starting with 4.0.8.0 use a different wrapper\nscript (gatk) than previous versions of GATK.  You might need to update your\njobs accordingly.\n\nThe complete GATK resource bundle is in /sw/data/GATK\n\nSee 'module help GATK/4.3.0.0' for information on activating the GATK Conda\nenvironment for using DetermineGermlineContigPloidy and similar other tools.\n

This message references the command module help GATK/4.3.0.0 for additional help with this module.

Huh, module load samtools/1.17 gives an error?

If you do module load samtools/1.17 without doing module load bioinfo-tools first, you'll get the error:

$ module load samtools/1.17\nLmod has detected the following error:  These module(s) or\nextension(s) exist but cannot be loaded as requested: \"samtools/1.17\"\n   Try: \"module spider samtools/1.17\" to see how to load the module(s).\n

The solution is to do module load bioinfo-tools first.

To see which modules are loaded, use module list.

How does the output of module list look like?
$ module list\n\nCurrently Loaded Modules:\n  1) uppmax   2) bioinfo-tools   3) samtools/1.17   4) java/sun_jdk1.8.0_151   5) GATK/4.3.0.0\n

Modules can also be unloaded, which also unloads their prerequisites.

To see a module-specific help, use module help [module] (e.g. module help cowsay).

How does the output of module help GATK/4.3.0.0 look like?
$ module help GATK/4.3.0.0\n\n-------------- Module Specific Help for \"GATK/4.3.0.0\" ---------------\nGATK - use GATK 4.3.0.0\nVersion 4.3.0.0\n\n**GATK 4.3.0.0**\n\nUsage:\n\n    gatk --help     for general options, including how to pass java options\n\n    gatk --list     to list available tools\n\n    gatk ToolName -OPTION1 value1 -OPTION2 value2 ...\n                  to run a specific tool, e.g., HaplotypeCaller, GenotypeGVCFs, ...\n\nFor more help getting started, see\n\n    https://software.broadinstitute.org/gatk/documentation/article.php?id=9881\n\n...\n

To unload a module, do module unload [module] (e.g. module unload cowsay). This will also unload module that depend on the unloaded one. For example, module unload bioinfo-tools will unload all bioinformatics tool.

","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#using-modules-in-an-executable-script","title":"Using modules in an executable script","text":"

Using modules in an executable script is straightforward: just load the module needed before using the software in that module.

For example, this is a valid bash script:

#!/bin/bash\nmodule load cowsay/3.03\ncowsay hello\n

When using a bioinformatics tool such as samtools version 1.17, one needs to first load bioinfo-tools:

#!/bin/bash\nmodule load bioinfo-tools\nmodule load samtools/1.17\n
","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#common-shorthand-syntaxes","title":"Common shorthand syntaxes","text":"Full command Shorthand syntax module - module avail ml av module spider ml spider module load ml module list ml module unload [module] ml -[module]","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#links","title":"Links","text":"
  • Almost all installed software on UPPMAX
  • Almost all installed databases on UPPMAX
  • Wikipedia page on environment modules
  • lmod homepage
","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#extra-materials","title":"Extra materials","text":"","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#about-module-avail","title":"About module avail","text":"

Why here?

As far as I can see, there is no use case for module avail.

module avail list all modules immediately available, or search for a specific available module:

  • module avail
  • module avail *tool*

This command is not so smart, though, especially when searching for a specific tool, or a bioinformatics tool. It only reports modules that are immediately available.

module avail R\n

outputs everything that has an r in the name... not useful.

$ module avail samtools\nNo module(s) or extension(s) found!\nUse \"module spider\" to find all possible modules and extensions.\nUse \"module keyword key1 key2 ...\" to search for all possible modules matching any of the \"keys\".\n
","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/modules/#conflicting-modules","title":"Conflicting modules","text":"

Sometimes some tools cannot be run together, that is working when another module is loaded. Read about this in the page:

  • Conflicting modules
","tags":["module","modules","software module","software modules","lmod"]},{"location":"cluster_guides/nodes_own_disk/","title":"How to use the nodes' own disk","text":"","tags":["node","disk","disc","SNIC_TMP","scratch"]},{"location":"cluster_guides/nodes_own_disk/#short-version","title":"Short version","text":"

When possible, copy the files you want to use in the analysis to $SNIC_TMP at the start of the job, and store all output there as well. The last thing you do in the job is to copy the files you want to keep back to your project directory.

","tags":["node","disk","disc","SNIC_TMP","scratch"]},{"location":"cluster_guides/nodes_own_disk/#long-version","title":"Long version","text":"

Parallel network file systems are very fast when accessed from many nodes, but can nevertheless become a bottleneck. For instance, if many jobs on a single are doing many file operations, all those jobs may be fighting each other and degrading performance. Additionally, the metadata server on these kinds of file systems can be overburdened if very large numbers of files are created and/or removed.

For this reason, jobs that perform a lot of file accesses, especially on temporary files, should use the compute node's local hard drive. If you do, then any slow-down due to file I/O is limited to the node(s) on which these jobs are running.

The hard drive of the node is located at /scratch, and each job that runs on a node gets a folder created automatically with the same name as the jobid, /scratch/<jobid>. This folder name is also stored in the environment variable $SNIC_TMP for ease of use. The idea is that you copy files that you will be reading randomly, such as indices and databases but not files of reads, to $SNIC_TMP first thing in the job. Files that you read as a stream from beginning to end, like files of reads, should remain in project storage and read from there. You then run your analysis and have all the output files written to $SNIC_TMP as well. After the analysis is done, you copy back all the output files you want to keep to your project storage folder. Everything in /scratch/<jobid> will be deleted as soon as the job is finished, and you have no hope of recovering it after the job is completed.

An example would be a script that runs bwa to align read. Usually they look something like this:

#!/bin/bash -l\n#SBATCH -A snic2022-X-YYY\n#SBATCH -t 12:00:00\n#SBATCH -p core\n#SBATCH -n 20\n\n# load modules\nmodule load bioinfo-tools bwa/0.7.17 samtools/1.14\n\n# run the alignment and convert its output directly to\n# a sorted bam format\nbwa mem -t 16 /proj/snic2022-X-YYY/nobackup/ref/hg19.fa /proj/snic2022-X-YYY/rawdata/sample.fq.gz | samtools sort -@ 4 -M 10G -O bam - > /proj/snic2022-X-YYY/nobackup/results/sample.bam\n

The steps to be added are (1) copy the index to $SNIC_TMP but not the reads; (2) adjust your script to read read the index from $SNIC_TMP; and (3) copy the results back to project storage once the alignment is done.

#!/bin/bash\n#SBATCH -A snic2022-X-YYY\n#SBATCH -t 12:00:00\n#SBATCH -p core\n#SBATCH -n 20\n\n# load modules\nmodule load bioinfo-tools bwa/0.7.17 samtools/1.14\n\n# copy the index files used in the analysis to $SNIC_TMP\ncp /proj/snic2022-X-YYY/nobackup/ref/hg19.fa* $SNIC_TMP/\n\n# go to the $SNIC_TMP folder to make sure any temporary files\n# are created there as well\ncd $SNIC_TMP\n\n# run the alignment using the index in $SNIC_TMP and the reads\n# from project storage. write the sorted BAM containing\n# alignments directly to $SNIC_TMP. Use 16 threads for\n# alignment and 4 threads for sorting and compression, and\n# 20GB RAM for sorting. These values are appropriate for a\n# full standard rackham node.\nbwa mem -t 16 $SNIC_TMP/hg19.fa /proj/snic2022-X-YYY/rawdata/sample.fq.gz | samtools sort -@ 4 -m 20G -O bam - > $SNIC_TMP/sample.bam\n\n# copy the results back to the network file system\ncp $SNIC_TMP/sample.bam /proj/snic2022-X-YYY/nobackup/results/\n

It's not harder than that. This way, the index files are copied to $SNIC_TMP in a single operation, which is much less straining for the file system than small random read/writes. The network filesystem is used when gathering reads for alignment, and streaming reads are easy for that filesystem. When the alignment is finished the results is copied back to project directory so that it can be used in other analysis.

One problem that can happen is if your files and the results are too large for the node's hard drive. The drive is 2TiB on Rackham and 4TiB on Bianca, so it is unusual for the hard drive to be too small for the results of such analyses. If you run into this problem, please email UPPMAX at support@uppmax.uu.se and we can look into the problem.

","tags":["node","disk","disc","SNIC_TMP","scratch"]},{"location":"cluster_guides/optimizing_jobs/","title":"Optimizing jobs","text":"

The UPPMAX clusters use the Slurm job scheduler. The UPPMAX pages about Slurm here describe how to start a job. However, a job may not have run optimally, i.e. reserving CPU power and/or memory that is not used.

This page describes how to optimize your Slurm jobs.

"},{"location":"cluster_guides/optimizing_jobs/#commands","title":"Commands","text":"

You will probably have good use of the following commands:

Command Description finishedjobinfo information about finished jobs jobinfo telling you about running and waiting jobs jobstats see CPU and memory use of finished job in a plot projinfo telling you about the CPU hour usage of your projects projmembers telling you about project memberships projsummary [project id] summarizes some useful information about projects uquota telling you about your file system usage Working on Snowy? Use -M snowy

For Slurm commands and for commands like projinfo, jobinfo and finishedjobinfo, you may use the -M flag to ask for the answer to be given for a system that you are not logged in to. For example, when logged into Rackham, you may ask about information about current core hour usage on Snowy, with the command projinfo -M Snowy

"},{"location":"cluster_guides/optimizing_jobs/#check-you-storage-with-uquota","title":"Check you storage with uquota","text":""},{"location":"cluster_guides/optimizing_jobs/#check-your-cpu-hour-usage-with-projinfo","title":"Check your CPU hour usage with projinfo","text":""},{"location":"cluster_guides/pelle/","title":"Pelle","text":"Why such a bad image?

Copyright. This is one of the few images that shows Pelle Svansl\u00f6s with a Creative Commons license.

Pelle is an upcoming general-purpose UPPMAX cluster, paid by Uppsala University.

Uppsala users of Rackham will be moved to Pelle by UPPMAX after applying to a Pelle project.

Non-Uppsala users of Rackham can move their data to Dardel, see the Rackham to Dardel migration guide.

Status

Pelle is in the process of being set up. Here is the current status of Pelle.

The delivery of the new UPPMAX system, Pelle, faces continued delays. UPPMAX now has a plan for provisioning Rackham to Uppsala University researchers until Pelle is ready.

We have started to approve proposals submitted to the UPPMAX Local 2025 round. This will take a little time, but when your project is approved we ask you to examine the decision email closely and let us know if we\u2019re missing any projects for data migration.

New projects with Pelle and Gorilla resources will not be able to use Pelle or Gorilla until the systems are operational. The projects will have those resources represented in SUPR anyway.

Approved projects will receive a Rackham allocation, in addition to the resources you asked for, which will let you log in and submit jobs to Rackham.

Access to storage directories on Crex belonging to UU-affiliated projects will not expire, so you will have continued access to data in UU-affiliated projects on Crex until Pelle is ready. Therefore, most approved projects will not receive a Crex allocation. If we can not identify existing storage allocations for you, then we will create a Crex allocation for your storage needs.

When Pelle is operational, we will move data from the NAISS projects you\u2019ve mentioned in the proposals to the UPPMAX Local projects for you. Then, we will switch off Rackham and Crex and let you use Pelle and Gorilla.

The \u201cUPPMAX for Education\u201d service on Snowy will, for now, continue as normal. At some point, we will move the GPUs from Snowy to newer hardware. At the same time, we will add Rackham and/or Pelle allocations to projects and shut down Snowy. We will inform you before we do this.

","tags":["Pelle","cluster","general-purpose"]},{"location":"cluster_guides/pelle/#how-to-apply-to-a-pelle-project","title":"How to apply to a Pelle project","text":"

See how to apply to a Pelle project.

","tags":["Pelle","cluster","general-purpose"]},{"location":"cluster_guides/pelle/#features-of-pelle-compared-to-rackham","title":"Features of Pelle, compared to Rackham","text":"

Although we from UPPMAX cannot say exactly how Pelle will look, we try to have Pelle be as similar to Rackham as possible.

","tags":["Pelle","cluster","general-purpose"]},{"location":"cluster_guides/pelle/#migration-from-rackham-to-pelle","title":"Migration from Rackham to Pelle","text":"

As both clusters are UPPMAX clusters, we will transfer your data from Rackham to Pelle. Users will have to apply to a Pelle project.

","tags":["Pelle","cluster","general-purpose"]},{"location":"cluster_guides/project_management/","title":"Manage you projects","text":""},{"location":"cluster_guides/project_management/#check-the-cpu-hours-of-your-projects","title":"Check the CPU hours of your project(s)","text":""},{"location":"cluster_guides/project_management/#on-an-uppmax-cluster","title":"On an UPPMAX cluster","text":"

To get an overview of how much of your project allocation that has been used, please use the projinfo command. Please use the command projinfo -h to get details on usage.

Usage: projinfo [-OPTIONS [-MORE_OPTIONS]] [--] [PROGRAM_ARG1 ...]\n\nThe following single-character options are accepted:\n        With arguments: -s -e -M\n        Boolean (without arguments): -h -q -v -m -y\n
  • With no flags given, projinfo will tell you your usage during the current month.

  • Usage in project \"testproj\" during the current year: projinfo -y testproj

  • Usage in project testproj during the specified two months: projinfo -s 2010-02 -e 2010-03 testproj

Usage in your projects today until the moment you run the command: projinfo -s today

"},{"location":"cluster_guides/project_management/#supr","title":"SUPR","text":"

Log in to SUPR and view your projects there. You can also get information about usage levels, storage levels, membership, etc.

"},{"location":"cluster_guides/project_management/#storage","title":"Storage","text":"
  • Disk storage guide
  • Backup
"},{"location":"cluster_guides/project_management/#display-storage-quota","title":"Display storage quota","text":"
  • Display your project quota with the command uquota:
uquota\n
  • Display the disk quota on the more detailed level
"},{"location":"cluster_guides/project_management/#other","title":"Other","text":"

How do I specify that I do not need my large datasets to be backed up?

How do I specify that I do not need my large datasets to be backed up?

If you create a folder named nobackup, inside any folder, then all data stored inside this folder will not be backed-up.

Simply move (mv or rsync) your data into a folder with the proper name.

Also note that all projects have a separate nobackup folder under the /proj/xyz/ hierarchy (and also under the /proj/xyz/private/ hierarchy) with a separate quota limit than the ordinary backed up project folder. You can read more about this in our disk storage guide.

What is this 'glob' folder in my home folder?
  • The glob directory found in your home has been deprecated since early 2017.
  • It is now a normal directory and shared your default 32GByte sized home.
  • The glob directory remains to not interfere with scripts who might reference ~/glob in the source code.

  • Historically, the glob directory was the main storage area for storage of user data.

    • It was shared by all nodes.
    • The directory was used for files needed by all job instances and could house files exceeding the quota of the home directory.
    • Job input and output files was (and can still be) stored here.
"},{"location":"cluster_guides/project_management/#members","title":"Members","text":"

Check the current project members with:

projmembers <project-name>\n

If you want to check which members that presently belong to a certain (Linux) group you do:

getent groups <project name>\n
  • You can also check in SUPR - Swedish User and Project Repository
"},{"location":"cluster_guides/rackham/","title":"Rackham","text":"

Rackham is one of the UPPMAX clusters that is a general-purpose cluster.

Consider migrating to Dardel already

The Rackham cluster will be decommissioned at the end of 2024 so all projects have to migrate their data and calculations to other resources. The plan from NAISS is that all Rackham users will move to the Dardel cluster at PDC.

See the page on file transfer to Dardel here.

In the near future, Rackham will be replaced by Pelle and will be only accessible to Uppsala researchers.

  • Rackham's name
  • Rackham's design
  • Rackham's hardware
  • Log in
  • Starting an interactive node
  • File transfer
    • using a graphical program
    • using SCP
    • using SFTP
  • The module system
  • IDEs
    • Jupyter
    • RStudio
    • VSCode
    • VSCodium
  • Isolated environments
    • venv
  • Run webexport
  • Best practices
  • Rackham installation guides
  • Rackham workshops
","tags":["Rackham","cluster","general-purpose"]},{"location":"cluster_guides/rackham_file_transfer_using_gui/","title":"File transfer to/from Rackham using a graphical tool","text":"

Data transfer to/from Rackham using a graphical tool is one of the ways to transfer files to/from Rackham

What are the other ways?

Other ways to transfer data to/from Rackham are described here

There are many graphical tools that can do this:

  • File transfer to/from Rackham using FileZilla

","tags":["Rackham","File transfer","Graphical tool","Visual tool"]},{"location":"cluster_guides/rackham_file_transfer_using_transit/","title":"Data transfer to/from Rackham using Transit","text":"

Data transfer to/from Rackham using Transit is one of the ways ways to transfer files to/from Rackham

What is Transit?

See the page about the UPPMAX Transit server.

What are the other ways?

Other ways to transfer data to/from Rackham are described here

This page assumes your files are 'posted' to Transit. Transit is a service, not a file server.

How to transfer files to/from Transit?

See here

To transfer files between Rackham and Transit can be done in multiple ways too:

  • Using SCP
  • Using SFTP
"},{"location":"cluster_guides/rackham_file_transfer_using_transit/#overview","title":"Overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n    classDef transit_node fill:#fff,color:#000,stroke:#fff\n\n    subgraph sub_inside[SUNET]\n      direction LR\n      user(User)\n      subgraph sub_transit_env[Transit]\n        transit_login(Transit login):::calculation_node\n        files_on_transit(Files posted to Transit):::transit_node\n      end\n      subgraph sub_rackham_shared_env[Rackham]\n          files_in_rackham_home(Files in Rackham home folder):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#ccc,color:#000,stroke:#000\n    style sub_transit_env fill:#cfc,color:#000,stroke:#000\n    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000\n\n    user --> |logs in |transit_login\n\n    transit_login --> |can use|files_on_transit\n    %% user_local_files <--> |graphical tool|files_in_rackham_home\n    %% user_local_files <--> |SCP|files_in_rackham_home\n    files_on_transit <==> |transfer|files_in_rackham_home

Overview of file transfer on Rackham The purple nodes are about file transfer, the blue nodes are about 'doing other things'. The user can be either inside or outside SUNET.

"},{"location":"cluster_guides/rackham_modules/","title":"Working with environment modules on Rackham","text":"

Rackham is shared Linux computer with all the standard Linux tools installed, on which all users should be able to do their work independently and undisturbed.

Because this is the same for nearly all UPPMAX clusters, there is a general page on modules here

"},{"location":"cluster_guides/rackhams_design/","title":"Rackham's design","text":"

Rackham is an (general-purpose) high-performance computing (HPC) cluster.

What is an HPC cluster?

What an HPC cluster is, is described here.

Or: Rackham is a group of computers that can effectively run many calculations, as requested by multiple people, at the same time. Rackham runs the Linux operating system and all users need some basic Linux knowledge to use Rackham.

Using Linux

Using Linux (and especially the so-called command-line/terminal) is essential to use Rackham. Learning the essential Linux commands is described here.

"},{"location":"cluster_guides/rackhams_name/","title":"Rackham's name","text":"

Rackham, like all UPPMAX clusters, is named after a Tintin character, in this case after Red Rackham.

What are the UPPMAX clusters?

All UPPMAX clusters can be found here.

"},{"location":"cluster_guides/runtime_tips/","title":"Runtime tips","text":""},{"location":"cluster_guides/runtime_tips/#general","title":"General","text":"How can I run X11 applications inside GNU screen?

If I log in to the login node with ssh -XA user@hostname as supposed when wanting to run X applications, and then try to start an X application inside a screen session, why does this not work?

(This applies also for trying to do PNG output in R, since it depends on X11)

When starting a screen session, your DISPLAY environment can sometimes change from the one that you had when you logged in.

To solve this problem, you simply have to set the DISPLAY variable inside the screen session, to the same value that you have outside it.

So, outside the screen session, do:

echo $DISPLAY\n

You might see something like:

localhost:45.0\n

Then, inside your screen session, set your DISPLAY env variable to that same value using the export command, like so:

export DISPLAY=localhost:45.0\n

(NOTE: The actual number above might be different for you, and should be changed accordingly!)

I want my program to send data to both stdout and to a file but nothing comes until the program ends

There is a program called unbuffer. You could try using it like (tee takes care of sending both to stdout and to a file):

unbuffer your_program |tee some_output_file\n
My program suddenly seems to stop executing but it does not crash, the process is still alive. What is wrong?
  • This may happen if your executable binary file is deleted while the program is running.
  • For example, if you recompile your program the previous executable file is deleted, which can cause running instances of the program to crash with \"Bus error\".
  • The recommended solution is that if you need to recompile or reinstall while the program is running, create a copy of the executable file and execute the copy.
  • Then, the original executable file can be safely deleted. -
  • Alternatively, rename the currently executing file to something new and unique (using the mv command) before recompiling/reinstalling your program.
My program crashes with the error message 'Bus error'. Why?
  • This may happen if your executable binary file is deleted while the program is running.
  • For example, if you recompile your program the previous executable file is deleted, which can cause running instances of the program to crash with \"Bus error\".
  • The recommended solution is that if you need to recompile or reinstall while the program is running, create a copy of the executable file and execute the copy.
  • Then, the original executable file can be safely deleted. -
  • Alternatively, rename the currently executing file to something new and unique (using the mv command) before recompiling/reinstalling your program.
I have strange problems with my text-files / scripts when they have been copied from other computers For UPPMAX staff

TODO: InfoGlue link: https://www.uppmax.uu.se/support/faq/running-jobs-faq/strange-problems-with-text-files---scripts-copied-from-other-computers/

One reason is that copy-and-paste sometimes doesn't work. Rich text files and PDF's often replace symbols like quotes and white space with different symbols to improve readability, and copying from sources like these is generally not a good idea.

Another possible reason is that lines of text files are terminated differently on UNIX/Windows/MAC. Read on for information on how to solve this:

This might happen because your file was created, for instance, on a Windows computer and later copied to UPPMAX Linux machines. Text files have different line terminations on for instance Windows and Linux/Unix. If this is an ordinary textfile you can test this by using the \"file\" command, like this:

$ file myfile\nmyfile: ASCII text, with CRLF line terminators\n

CRLF terminators tells you that each line of the file is ended by both a carriage-return and a line-feed, as on Windows. On all UPPMAX systems, the file can simply be converted to UNIX style text files using the \"dos2unix\" command:

$ dos2unix myfile\ndos2unix: converting file myfile to UNIX format ...\n

Checking the file again with the \"file\" command reveals that it now has ordinary UNIX line terminators (only LF):

$ file myfile\nmyfile: ASCII text\n

Similarly, a file from a Mac can be converted using the \"mac2unix\" command.

If a shell script is behaving strangely, it can be due to the same problem. Trying to execute a program where the end of line marker is wrong might result in an error message such as the one below:

$ cat myscript.sh\n#!/bin/sh\n./program\n$ ./myscript.sh\n: No such file or directory\n

The \"file\" command does not work in this case as it simply tells us that the script is a \"Bourne shell script text executable\". Opening the script using \"vi\" shows at the bottom of the screen \"myscript.sh\" [dos] 2L, 22C. The \"[dos]\" is a sure marker of the same problem. Opening the same file in emacs reveals the same thing (-uu-(DOS)---F1 myscript.sh). Convert the script to unix-format using the \"dos2unix\" command as described above. An alternative is to copy the file and use the \"dos2unix\" command on the copy and compare the file sizes using \"ls -l\":

$ ls -l testme.sh\nrwxr-xr-x  1 daniels uppmax_staff 22 Dec 15 10:53 testme.sh\n$ dos2unix testme.sh\ndos2unix: converting file testme.sh to UNIX format ...\n$ ls -l testme.sh\n-rwxr-xr-x  1 daniels uppmax_staff 20 Dec 15 10:54 testme.sh\n

Note that the file size went from 22 bytes to 20, reflecting that the two CR bytes at the (almost) end of the line were removed.

How to run interactively on a compute node?
  • Start an interactive node
  • More about interactive
I got problems running Perl on UPPMAX with messages about 'locale'
  • Edit your .bashrc file (located in your home folderon a UPPMAX cluster, like Rackham) and add the following lines:
export LC_CTYPE=en_US.UTF-8 \nexport LC_ALL=en_US.UTF-8 \n
  • ... then restart your terminal, or run, when located in your home folder:
source .bashrc\n
"},{"location":"cluster_guides/runtime_tips/#related-to-batch-jobs","title":"Related to Batch jobs","text":"Looking at \"jobinfo\" output, PRIORITY and REASON for my waiting jobs change over time. Please explain what is going on!

What do the fields PRIORITY and REASON mean in \"jobinfo\" output?

How do I use the modules in batch jobs?
  • In order to make running installed programs easier you should use the module command.
  • The different modules that are installed sets the correct environments that are needed for the programs to run, like PATH, LD_LIBRARY_PATH and MANPATH. -To see what what modules that are available, type module avail. To see what modules you have loaded, type module list.

  • Note. For the batch system slurm to work with modules you must have

#!/bin/bash -l\n

in your submit script.

  • For more information, read the module system guide
What is causing the sbatch script error 'Unknown shell type load'?
  • If you're getting the error message
init.c(379):ERROR:109: Unknown shell type load\n

when running your sbatch script, then your script is probably starting with the line

#!/bin/bash\n

To remedy this you need to make sure that your script starts with

#!/bin/bash -l\n

i.e. notice the trailing \"-l\". This tells bash to load the correct environment settings, which makes the module system usable.

I get slurmstepd: error: _get_pss:ferror() /proc/$$/smaps

Sometimes, this error message occurs in the Slurm output file: slurmstepd: error: _get_pss: ferror() indicates error on file /proc/$$/smaps

This error does not affect the results and can be ignored.

Statistics are collected when a job has finished, including PSS, which is a measure of memory usage. The error message means that when Slurm tries to collect all info to calculate PSS, the file exposing kernel statistics for the process is already gone. This is probably due to the cleaning process being slightly out of sync.

Job statistics based on the PSS value, like how much memory a job has used, might not be reliable. But since this is something that happens after the job has finished, results should not be affected.

How can I see my job's memory usage?
  • Historical information can first of all be found by issuing the command finishedjobinfo -j. That will print out the maximum memory used by your job.

  • If you want more details then we also save some memory information each 5 minute interval for the job in a file under /sw/share/slurm/[cluster-name]/uppmax_jobstats/. Notice that this is only stored for 30 days.

  • You can also ask for an e-mail containing the log, when you submit your job with sbatch or start an \"interactive\" session, by adding a \"-C usage_mail\" flag to your command. Two examples:

sbatch -A testproj -p core -n 5 -C usage_mail batchscript1\n

or, if interactive

interactive -A testproj -p node -n 1 -C \"fat&usage_mail\"\n
  • As you see, you have to be careful with the syntax when asking for two features, like \"fat\" and \"usage_mail\", at the same time. The logical AND operator \"&\" combines the flags.

  • If you overdraft the RAM that you asked for, you will probably get an automatic e-mail anyway.

  • If, on the other hand, you want to view your memory consumption in real time then you will have to login to the node in question in another SSH session. (You will probably find a more recently updated memory information file there, named /var/spool/uppmax_jobstats/.)

  • By naively looking at the memory consumption with tools like ps and top you as a user can easily get the wrong impression of the system, as the Linux kernel uses free memory for lots of buffers and caches to speed up other processes (but releases this as soon as applications requests it).

  • If you know that you are the only user running on the node (from requesting a node job for example), then you could issue the command \"free -g\" instead. That will show you how much memory is used/free by the whole system, exclusive to these caches. Look for the row called \"-/+ buffers/cache\".

  • If you require more detailed live information, then it would probably be best if the tool called smem is used. Download the latest version from http://www.selenic.com/smem/download/ and unpack it in your home directory. Inside you will find an executable Python script, and by executing the command smem -utk you will see your user's memory usage reported in three different ways.

    • USS is the total memory used by the user without shared buffers or caches.
    • RSS is the number reported in \"top\" and \"ps\"; i.e. including ALL shared buffered/cached memory.
    • And then there's also the PSS figure which tries to calculate a proportional memory usage per user for all shared memory buffers and caches (i.e. the figure will fall between USS and RSS).
My job has very low priority! What can be wrong?
  • One reason could be that your project has consumed its allocated hours.

  • Background: Every job is associated with a project.

    • Suppose that that you are working for a SNIC project s00101-01 that's been granted 10000 core hours per 30-days running.
    • At the start of the project, s00101-01 is credited with 10000 hours and jobs that runs in that project are given a high priority.
    • All the jobs that are finished or are running during the last 30 days is compared with this granted time.
    • If enough jobs have run to consume this amount of hours the priority is lowered.
    • The more you have overdrafted your granted time, the lower the priority.
  • If you have overdrafted your granted time it's still possible to run jobs. You will probably wait for a longer time in the queue.

  • To check status for your projects, run

$ projinfo\n(Counting the number of core hours used since 2010-05-12/00:00:00 until now.)\n\nProject             Used[h]   Current allocation [h/month]\nUser\n-----------------------------------------------------\ns00101-01          72779.48               50000\nsome-user       72779.48\n
  • If there are enough jobs left in projects that have not gone over their allocation, jobs associated with this project are therefore stuck wating at the bottom of the jobinfo list until the usage for the last 30 days drops down under its allocated budget again.

  • On the other side they may be lucky to get some free nodes, so it could happen that they run as a bonus job before this happens.

  • The job queue, that you can see with the jobinfo command, is ordered on job priority. Jobs with a high priority will run first, if they can (depending on number of free nodes and any special demands on e.g. memory).

  • Job priority is the sum of the following numbers (you may use the sprio command to get exact numbers for individual jobs):

    • A high number (100000 or 130000) if your project is within its allocation and a lower number otherwise. There are different grades of lower numbers, depending on how many times your project is overdrafted. As an example, a 2000 core hour project gets priority 70000 when it has used more than 2000 core hours, gets priority 60000 when it has used more than 4000 core hours, gets priority 50000 when it has used more than 6000 core hours, and so on. The lowest grade gives priority 10000 and does not go down from there.
    • The number of minutes the job has been waiting in queue (for a maximum of 20160 after fourteen days).
    • A job size number, higher for more nodes allocated to your job, for a maximum of 104.
    • A very, very high number for \"short\" jobs, i.e. very short jobs that is not wider than four nodes.
    • If your job priority is zero or one, there are more serious problems, for example that you asked for more resources than the batch system finds on the system.
  • If you ask for a longer run time (TimeLimit) than the maximum on the system, your job will not run. The maximum is currently ten days. If you must run a longer job, submit it with a ten-day runtime and contact UPPMAX support.

"},{"location":"cluster_guides/slurm/","title":"Slurm","text":"

The UPPMAX clusters are a shared resource. To ensure fair use, UPPMAX uses a scheduling system. A scheduling system decides at what time which calculation is done. The software used is called Slurm.

Why not write SLURM?

Indeed, Slurm started as an abbreviation of 'Simple Linux Utility for Resource Management'. However, the Slurm homepage uses 'Slurm' to describe the tool, hence we use Slurm too.

This page describes how to use Slurm in general. See optimizing jobs how to optimize Slurm jobs. See Slurm troubleshooting how to fix Slurm errors.

For information specific to clusters, see:

  • Slurm on Bianca
  • Slurm on Rackham
  • Slurm on Snowy
","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#slurm-commands","title":"Slurm Commands","text":"

The Slurm system is accessed using the following commands:

  • interactive - Start an interactive session. This is described in-depth for Bianca and Rackham
  • sbatch - Submit and run a batch job script
  • srun - Typically used inside batch job scripts for running parallel jobs (See examples further down)
  • scancel - Cancel one or more of your jobs.
  • sinfo: view information about Slurm nodes and partitions
flowchart TD\n  login_node(User on login node)\n  interactive_node(User on interactive node)\n  computation_node(Computation node):::calculation_node\n\n  login_node --> |move user, interactive|interactive_node\n  login_node ==> |submit jobs, sbatch|computation_node\n  computation_node -.-> |can become| interactive_node

The different types of nodes an UPPMAX cluster has. The thick edge shows the topic of this page: how to submit jobs to a computation node.

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#job-parameters","title":"Job parameters","text":"

This session describes how to specify a Slurm job:

  • Getting started redirects to the cluster-specific pages
  • Partitions specify the type of job
","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#getting-started","title":"Getting started","text":"

To let Slurm schedule a job, one uses sbatch, like:

sbatch -A [project_code] [script_filename]\n

for example:

sbatch -A sens2017625 my_script.sh\n

Minimal and complete examples of using sbatch is described at the respective cluster guides:

  • Bianca
  • Rackham
  • Snowy
","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#specify-duration-of-the-run","title":"Specify duration of the run","text":"

To let Slurm schedule a job with a certain, one uses sbatch, like:

sbatch -A [project_code] --time [duration] [script_filename]\n

for example, for a job of 1 day, 23 hours, 59 minutes and 0 seconds:

sbatch -A sens2017625 --time 1-23:59:00 my_script.sh\n

If the job takes too long, this will result in a timeout error and the job will be aborted.

The maximum duration of the run depends on the cluster you use.

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#partitions","title":"Partitions","text":"

Partitions are a way to tell what type of job you are submitting, e.g. if it needs to reserve a whole node, or part of a node.

To let Slurm schedule a job using a partition, use the --partition (or -p) flag like this:

sbatch -A [project_code] --partition [partition_name] [script_filename]\n

for example:

sbatch -A sens2017625 --partition core my_script.sh\n

These are the partition names and their descriptions:

Partition name Description core Use one or more cores node Use a full node's set of cores devel Development job devcore Development job","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#the-core-partition","title":"The core partition","text":"

The core partition allows one to use one or more cores.

Here is the minimal use for one core:

sbatch -A [project_code] --partition core [script_filename]\n

For example:

sbatch -A sens2017625 --partition core my_script.sh\n

To specify multiple cores, use --ntasks (or -n) like this:

sbatch -A [project_code] --partition core --ntasks [number_of_cores] [script_filename]\n

For example:

sbatch -A sens2017625 --partition core --ntasks 2 my_script.sh\n

Here, two cores are used.

What is the relation between ntasks and number of cores?

Agreed, the flag ntasks only indicates the number of threads. However, by default, the number of tasks per core is set to one. One can make this link explicit by using:

sbatch -A [project_code] --partition core --ntasks [number_of_cores] --ntasks-per-core 1 [script_filename]\n

This is especially important if you might adjust core usage of the job to be something less than a full node.

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#the-node-partition","title":"The node partition","text":"

Whenever -p node is specified, an entire node is used, no matter how many cores are specifically requested with -n [no_of_cores].

For example, some bioinformatics tools show minimal increase in performance when more than 8-10 cores/job; in this case, specify \"-p core -n 8\" to ensure that only 8 cores (less than a single node) are allocated for such a job.

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#the-devel-partition","title":"The devel partition","text":"","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#the-devcore-partition","title":"The devcore partition","text":"","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#specifying-job-parameters","title":"Specifying job parameters","text":"

Whether you use the UPPMAX clusters interactively or in batch mode, you always have to specify a few things, like number of cores needed, running time etc. These things can be specified in two ways:

Either as flags sent to the different Slurm commands (sbatch, srun, the interactive command, etc.), like so:

sbatch -A p2012999 -p core -n 1 -t 12:00:00 -J some_job_name my_job_script_file.sh\n

or, when using the sbatch command, it can be specified inside the job script file itself, by using special SBATCH comments, for example:

job_script.sh
#!/bin/bash -l\n\n#SBATCH -A p2012999\n#SBATCH -p core\n#SBATCH -n 1\n#SBATCH -t 12:00:00\n#SBATCH -J some_job_name\n

If doing this, then one will only need to start the script like so, without any flags:

sbatch job_script.sh\n
How to see how many resources my project has used?

Use projplot.

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#need-more-resources-or-gpu","title":"Need more resources or GPU?","text":"","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#more-memory","title":"More memory","text":"

If you need extra memory (128 GB is available in common nodes) you can allocate larger nodes. The number and sizes differ among the clusters.

Table below shows the configurations and flags to use.

RAM Rackham Snowy Bianca 256 GB -C mem256GB -C mem256GB -C mem256GB 512 GB N/A -C mem512GB -C mem512GB 1 TB -C mem1TB N/A N/A 2 TB N/A -p veryfat -C mem2TB N/A 4 TB N/A -p veryfat -C mem4TB N/A","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#gpus","title":"GPUs","text":"
  • Bianca: Nodes with Nvidia A100 40 GB
    • All GPU nodes have at least 256 GB RAM (fat nodes) with 16 CPU cores and 2 GPUs per node
  • Snowy: Nodes with Tesla T4 16 GB
    • The GPU nodes have either 128 or 256 GB memory and one GPU per node

Slurm options:

  • Snowy 128 GB: -M snowy -p node --gres=gpu:1 -t 1:0:1 (Please note that -t has to be more than 1 hr)
  • Snowy 256 GB: -M snowy -p node -C mem256GB --gres=gpu:1 -t 1:0:1
  • Bianca: -C gpu --gres=gpu:1 -t 01:10:00

  • https://slurm.schedmd.com/gres.html#Running_Jobs

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#the-queue","title":"The queue","text":"Do you want to see a graphical representation of the scheduler?

Slurm scheduler

","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm/#more-about-slurm-on-uppmax","title":"More about Slurm on UPPMAX","text":"","tags":["slurm","Simple Linux Utility for Resource Management"]},{"location":"cluster_guides/slurm_details/","title":"Compute nodes, Slurm and debugging jobs","text":""},{"location":"cluster_guides/slurm_details/#more-slurm-and-other-advanced-uppmax-techniques","title":"More Slurm and other advanced UPPMAX techniques","text":"
  • A closer look at Slurm
  • Using the GPUs
  • Debugging
  • Job efficiency with the jobstats tool
  • Advanced job submission
"},{"location":"cluster_guides/slurm_details/#the-slurm-workload-manager","title":"The Slurm Workload Manager","text":"
  • Free, popular, lightweight
  • Open source: https://slurm.schedmd.com
  • Available at all SNIC centres
  • UPPMAX Slurm user guide
"},{"location":"cluster_guides/slurm_details/#the-queue","title":"The queue","text":"Do you want to see a graphical representation of the scheduler?

Slurm scheduler

"},{"location":"cluster_guides/slurm_details/#more-on-sbatch","title":"More on sbatch","text":"

Recap:

sbatch -A naiss20YY-XX-ZZ -t 10:00 -p core -n 10 my_job.sh slurm batch project name max runtime partition (\"job type\") #cores job script"},{"location":"cluster_guides/slurm_details/#more-on-time-limits","title":"More on time limits","text":"
  • Format -t dd-hh:mm:ss
  • Examples and variants on syntax

    • 0-00:10:00 = 00:10:00 = 10:00 = 10
    • 0-12:00:00 = 12:00:00
    • 3-00:00:00 = 3-0
    • 3-12:10:15
"},{"location":"cluster_guides/slurm_details/#job-walltime","title":"Job walltime","text":"When you have no idea how long a program will take to run, what should you book?

A: very long time, e.g. 10-00:00:00

When you have an idea of how long a program would take to run, what should you book?

A: overbook by 50%

"},{"location":"cluster_guides/slurm_details/#more-on-partitions","title":"More on partitions","text":"
  • -p core

    • \u201ccore\u201d is the default partition
    • \u2264 16 cores on Bianca and Snowy
    • \u2264 20 cores in Rackham
    • a script or program written without any thought on parallelism will use 1 core
  • -p node

    • if you wish to book full node(s)
"},{"location":"cluster_guides/slurm_details/#quick-testing","title":"Quick testing","text":"
  • The \u201cdevel\u201d partition
    • max 2 nodes per job
    • up to 1 hour in length
    • only 1 at a time
    • -p devcore, -p devel
Any free nodes in the devel partition? Check status with
  • sinfo -p devel
  • jobinfo -p devel
  • more on these tools later
  • High priority queue for short jobs

    • 4 nodes
    • up to 15 minutes
    • --qos=short
"},{"location":"cluster_guides/slurm_details/#debugging-or-complicated-workflows","title":"Debugging or complicated workflows","text":"
  • Interactive jobs

    • handy for debugging a code or a script by executing it line by line or for using programs with a graphical user interface
    • salloc -n 80 -t 03:00:00 -A sens2023598
    • interactive -n 80 -t 03:00:00 -A sens2023598

    • up to 12 hours

    • useful together with the --begin=<time> flag
    • salloc -A naiss20YY-XX-ZZ --begin=2022-02-17T08:00:00

    • asks for an interactive job that will start earliest tomorrow at 08:00

"},{"location":"cluster_guides/slurm_details/#parameters-in-the-job-script-or-the-command-line","title":"Parameters in the job script or the command line?","text":"
  • Command line parameters override script parameters
  • A typical script may be:
#!/bin/bash\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH -p core\n#SBATCH -n 1\n#SBATCH -t 24:00:00\n

Just a quick test:

sbatch -A naiss20YY-XX-ZZ -p devcore -t 00:15:00 jobscript.sh\n
Hands-on #1: sbatch/jobinfo
  • login to Bianca
  • find out which projects you\u2019re a member of using projinfo
  • submit a short (10 min) test job; note the job ID
  • find out if there are any free nodes in the devel partition
  • submit a new job to use the devel partition
  • write in the HackMD when you\u2019re done
"},{"location":"cluster_guides/slurm_details/#memory-in-core-or-devcore-jobs","title":"Memory in core or devcore jobs","text":"
  • -n X
  • Bianca: 8GB per core
  • Slurm reports the available memory in the prompt at the start of an interactive job
"},{"location":"cluster_guides/slurm_details/#more-flags","title":"More flags","text":"
  • -J <jobname>
  • email:

    • --mail-type=BEGIN,END,FAIL,TIME_LIMIT_80
    • --mail-user

      • Don\u2019t use. Set your email correctly in SUPR instead.
  • out/err redirection:

    • --output=slurm-%j.out and --error=slurm-%j.err

      • by default, where %j will be replaced by the job ID
        • --output=my.output.file
        • --error=my.error.file
"},{"location":"cluster_guides/slurm_details/#monitoring-jobs","title":"Monitoring jobs","text":"
  • jobinfo - a wrapper around squeue

    • lists running and pending jobs
    • jobinfo -u username
    • jobinfo -A naiss20YY-XX-ZZ
    • jobinfo -u username --state=running
    • jobinfo -u username --state=pending
  • You may also use the squeue command.

  • bianca_combined_jobinfo (queued jobs of all projects)

"},{"location":"cluster_guides/slurm_details/#monitoring-and-modifying-jobs","title":"Monitoring and modifying jobs","text":"
  • scontrol

    • scontrol show job [jobid]
  • possible to modify the job details after the job has been submitted; some options, like maximum runtime, may be modified (=shortened) even after the job started

    • scontrol update JobID=jobid QOS=short
    • scontrol update JobID=jobid TimeLimit=1-00:00:00
    • scontrol update JobID=jobid NumNodes=10
    • scontrol update JobID=jobid Features=mem1TB
"},{"location":"cluster_guides/slurm_details/#when-a-job-goes-wrong","title":"When a job goes wrong","text":"
  • scancel [jobid]

    • -u username - to cancel all your jobs
    • -t [state] - cancel pending or running jobs
    • -n name - cancel jobs with a given name
    • -i - ask for confirmation
"},{"location":"cluster_guides/slurm_details/#priority","title":"Priority","text":"
  • Roughly:

    • The first job of the day has elevated priority
    • Other normal jobs run in the order of submission (subject to scheduling)
    • Projects exceeding their allocation get successively into the lower priority category
    • Bonus jobs run after the jobs in the higher priority categories
  • In practice:

    • submit early = run early
    • bonus jobs always run eventually, but may need to wait until the night or weekend
    • In detail: jobinfo
Hands-on #2: sbatch/squeue/scancel/scontrol/jobinfo
  • submit a new job; note the job ID
  • check all your running jobs
  • what is the priority or your recently-submitted job?
  • submit a new job to run for 24h; note the job ID
  • modify the name of the job to \u201cwrongjob\u201d
  • cancel your job with name \u201cwrongjob\u201d
"},{"location":"cluster_guides/slurm_details/#determining-job-efficiency","title":"Determining job efficiency","text":"
  • jobstats - custom-made UPPMAX tool
"},{"location":"cluster_guides/slurm_details/#job-efficiency","title":"Job efficiency","text":"
  • jobstats - a tool in the fight for productivity

    • it works only for jobs longer than 5-15 minutes
    • -r jobid - check running jobs
    • A project - check all recent jobs of a given project
    • p jobid - produce a CPU and memory usage plot
  • Jobstats user guide

Hands-on #3: jobstats
    • Firstly, find some job IDs from this month, using finishedjobinfo -m username
    • Write down the IDs from some interesting jobs.
    • Generate the images:

    Generate jobstats plots for your jobs

    $ jobstats -p ID1 ID2 ID3\n
  • Look at the images

$ eog *png &\n
  • Which of the plots
    • Show good CPU or memory usage?
    • Indicate that the job requires a fat node?
"},{"location":"cluster_guides/slurm_details/#different-flavours-of-slurm-job-script-examples-and-workflows","title":"Different flavours of Slurm: Job script examples and workflows","text":""},{"location":"cluster_guides/slurm_details/#simple-workflow","title":"Simple workflow","text":"
#!/bin/bash\n#SBATCH -J jobname\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH -p core\n#SBATCH -n 10\n#SBATCH -t 10:00:00\n\nmodule load software/version\nmodule load python/3.9.5\n\n./my-script.sh\n./another-script.sh\n./myprogram.exe\n
"},{"location":"cluster_guides/slurm_details/#job-dependencies","title":"Job dependencies","text":"
  • sbatch jobscript.sh submitted job with jobid1
  • sbatch anotherjobscript.sh submitted job with jobid2
  • --dependency=afterok:jobid1:jobid2 job will only start running after the successful end of jobs jobid1:jobid2
  • very handy for clearly defined workflows
  • You may also use --dependency=afternotok:jobid in case you\u2019d like to resubmit a failed job, OOM (out of memory) for example, to a node with a higher memory: -C mem256GB or -C mem512GB
"},{"location":"cluster_guides/slurm_details/#io-intensive-jobs-snic_tmp","title":"I/O intensive jobs: $SNIC_TMP","text":"
#!/bin/bash\n#SBATCH -J jobname\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH -p core\n#SBATCH -n 1\n#SBATCH -t 10:00:00\n\nmodule load bioinfotools\nmodule load bwa/0.7.17 samtools/1.14\n\nexport SRCDIR=$HOME/path-to-input\n\ncp $SRCDIR/foo.pl $SRCDIR/bar.txt $SNIC_TMP/.\ncd $SNIC_TMP\n\n./foo.pl bar.txt\n\ncp *.out $SRCDIR/path-to-output/.\n
"},{"location":"cluster_guides/slurm_details/#openmp-or-multi-threaded-job","title":"OpenMP or multi-threaded job","text":"
#!/bin/bash\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH --exclusive\n#SBATCH -p node\n#SBATCH --ntasks-per-node=1\n#SBATCH --cpus-per-task=20\n#SBATCH -t 01:00:00\n\nmodule load uppasd\nexport OMP_NUM_THREADS=20\n\nsd > out.log\n
"},{"location":"cluster_guides/slurm_details/#gpu-nodes","title":"GPU nodes","text":"
  • Bianca: Nodes with Nvidia A100 40 GB
  • Snowy: Nodes with Tesla T4 16 GB
  • All GPU nodes have at least 256 GB RAM (fat nodes) with 16 CPU cores and 2 GPUs per node

  • slurm options:

    • Snowy: -M snowy --gres=gpu:1
    • Bianca: ``-C gpu --gres=gpu:1 -t 01:10:00
  • https://slurm.schedmd.com/gres.html#Running_Jobs

"},{"location":"cluster_guides/slurm_details/#running-on-several-nodes-mpi-jobs","title":"Running on several nodes: MPI jobs","text":"
#!/bin/bash -l\n#SBATCH -J rsptjob\n#SBATCH \u2014mail-type=FAIL\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH -t 00-07:00:00\n#SBATCH -p node\n#SBATCH -N 4\n### for jobs shorter than 15 min (max 4 nodes):\n###SBATCH --qos=short\n\n\nmodule load RSPt/2021-10-04\nexport RSPT_SCRATCH=$SNIC_TMP\n\nsrun -n 80 rspt\n\nrm -f apts dmft_lock_file e_entropy efgArray.dat.0 efgData.out.0 energy_matrices eparm_last interstitialenergy jacob1 jacob2 locust.* out_last pot_last rspt_fft_wisdom.* runs.a symcof_new\n
"},{"location":"cluster_guides/slurm_details/#job-arrays","title":"Job arrays","text":"
  • Submit many jobs at once with the same or similar parameters
  • Use $SLURM_ARRAY_TASK_ID in the script in order to find the correct path
#!/bin/bash\n#SBATCH -A naiss20YY-XX-ZZ\n#SBATCH -p node\n#SBATCH -N 2\n#SBATCH -t 01:00:00\n#SBATCH -J jobarray\n#SBATCH --array=0-19\n#SBATCH --mail-type=ALL,ARRAY_TASKS\n\n# SLURM_ARRAY_TASK_ID tells the script which iteration to run\necho $SLURM_ARRAY_TASK_ID\n\ncd /pathtomydirectory/dir_$SLURM_ARRAY_TASK_ID/\n\nsrun -n 40 my-program\nenv\n
  • You may use scontrol to modify some of the job arrays.
"},{"location":"cluster_guides/slurm_details/#snakemake-and-nextflow","title":"Snakemake and Nextflow","text":"
  • Conceptually similar, but with different flavours
  • First define steps, each with an input, an output, and a command that transforms the input into output
  • Then just ask for the desired output and the system will handle the rest
  • Snakemake hackathon (re-occurring event)
  • Nextflow training
Hands-on #4: make it your own
  • use 2 or 3 of the sample job scripts as a starting point for your own job script
  • tweak them so that you run something closer to your research; or just feel free to experiment
  • paste at least one of the examples in the HackMD
  • great if you could add a comment what the job script is about
"},{"location":"cluster_guides/slurm_details/#where-to-go-from-here","title":"Where to go from here?","text":"
  • Code documentation
  • NAISS training newsletter - software-specific training events included
  • https://coderefinery.org/workshops/upcoming/
  • https://nbis.se/training/events.html (bio)
  • Contact support
"},{"location":"cluster_guides/slurm_on_bianca/","title":"Using Slurm on Bianca","text":"

This page describes how to use Slurm on Bianca.

What is Slurm?

See the general page on Slurm here

What is Bianca?

See the general page on Bianca here

See Slurm troubleshooting how to fix Slurm errors.

","tags":["Slurm","Bianca"]},{"location":"cluster_guides/slurm_on_bianca/#sbatch-and-interactive-on-bianca","title":"sbatch (and interactive) on Bianca","text":"

sbatch (and interactive) work the same as on Rackham.

Want to start an interactive job?

See how to start an interactive job on Bianca here

Here it is shown how to submit a job with:

  • command-line Slurm parameters
  • Slurm parameters in the script
","tags":["Slurm","Bianca"]},{"location":"cluster_guides/slurm_on_bianca/#sbatch-a-script-with-command-line-slurm-parameters","title":"sbatch a script with command-line Slurm parameters","text":"

The minimal command to use sbatch with command-line Slurm parameters is:

sbatch -A [project_code] [script_filename]\n

where [project_code] is the project code, and [script_filename] the name of a bash script, for example:

sbatch -A sens2017625 my_script.sh\n
Forgot your Bianca project?

When login to Bianca's remote desktop environment webpage at https://bianca.uppmax.uu.se is helpful in showing you your Bianca projects:

An example of the Bianca projects for this user

What is in the script file?

The script file my_script.sh is a minimal example script. Such a minimal example script could be:

#!/bin/bash\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm here.

","tags":["Slurm","Bianca"]},{"location":"cluster_guides/slurm_on_bianca/#sbatch-a-script-with-slurm-parameters-in-script","title":"sbatch a script with Slurm parameters in script","text":"

The minimal command to use sbatch with Slurm parameters in the script:

sbatch [script_filename]\n

where [script_filename] the name of a bash script, for example:

sbatch my_script.sh\n

The script must contain at least the following lines:

#SBATCH -A [project_code]\n

where [project_code] is the project code, for example:

#SBATCH -A sens2017625\n
Forgot your Bianca project?

When login to Bianca's remote desktop environment webpage at https://bianca.uppmax.uu.se is helpful in showing you your Bianca projects:

An example of the Bianca projects for this user

A full example script would be:

#!/bin/bash\n#SBATCH -A sens2017625\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm.

","tags":["Slurm","Bianca"]},{"location":"cluster_guides/slurm_on_bianca/#more-about-slurm-on-bianca","title":"More about slurm on Bianca","text":"","tags":["Slurm","Bianca"]},{"location":"cluster_guides/slurm_on_bianca_details/","title":"Compute nodes, Slurm and debugging jobs on Bianca","text":""},{"location":"cluster_guides/slurm_on_bianca_details/#more-slurm-and-other-advanced-uppmax-techniques","title":"More Slurm and other advanced UPPMAX techniques","text":"
  • A closer look at Slurm
  • Using the GPUs
  • Debugging
  • Job efficiency with the jobstats tool
  • Advanced job submission
"},{"location":"cluster_guides/slurm_on_bianca_details/#the-slurm-workload-manager","title":"The Slurm Workload Manager","text":"
  • Free, popular, lightweight
  • Open source: https://slurm.schedmd.com
  • Available at all SNIC centres
  • UPPMAX Slurm user guide
"},{"location":"cluster_guides/slurm_on_bianca_details/#more-on-sbatch","title":"More on sbatch","text":"

Recap:

sbatch -A sens2023598 -t 10:00 -p core -n 10 my_job.sh slurm batch project name max runtime partition (\"job type\") #cores job script"},{"location":"cluster_guides/slurm_on_bianca_details/#more-on-time-limits","title":"More on time limits","text":"
  • Format -t dd-hh:mm:ss
  • Examples and variants on syntax

    • 0-00:10:00 = 00:10:00 = 10:00 = 10
    • 0-12:00:00 = 12:00:00
    • 3-00:00:00 = 3-0
    • 3-12:10:15
"},{"location":"cluster_guides/slurm_on_bianca_details/#job-walltime","title":"Job walltime","text":"When you have no idea how long a program will take to run, what should you book?

A: very long time, e.g. 10-00:00:00

When you have an idea of how long a program would take to run, what should you book?

A: overbook by 50%

"},{"location":"cluster_guides/slurm_on_bianca_details/#more-on-partitions","title":"More on partitions","text":"
  • -p core

    • \u201ccore\u201d is the default partition
    • \u2264 16 cores on Bianca
    • a script or program written without any thought on parallelism will use 1 core
  • -p node

    • if you wish to book full node(s)
"},{"location":"cluster_guides/slurm_on_bianca_details/#quick-testing","title":"Quick testing","text":"
  • The \u201cdevel\u201d partition
    • max 2 nodes per job
    • up to 1 hour in length
    • only 1 at a time
    • -p devcore, -p devel
Any free nodes in the devel partition? Check status with
  • sinfo -p devel
  • jobinfo -p devel
  • more on these tools later
  • High priority queue for short jobs

    • 4 nodes
    • up to 15 minutes
    • --qos=short
"},{"location":"cluster_guides/slurm_on_bianca_details/#debugging-or-complicated-workflows","title":"Debugging or complicated workflows","text":"
  • Interactive jobs

    • handy for debugging a code or a script by executing it line by line or for using programs with a graphical user interface
    • salloc -n 80 -t 03:00:00 -A sens2023598
    • interactive -n 80 -t 03:00:00 -A sens2023598

    • up to 12 hours

    • useful together with the --begin=<time> flag
    • salloc -A snic2022-22-50 --begin=2022-02-17T08:00:00

    • asks for an interactive job that will start earliest tomorrow at 08:00

"},{"location":"cluster_guides/slurm_on_bianca_details/#parameters-in-the-job-script-or-the-command-line","title":"Parameters in the job script or the command line?","text":"
  • Command line parameters override script parameters
  • A typical script may be:
#!/bin/bash\n#SBATCH -A sens2023598\n#SBATCH -p core\n#SBATCH -n 1\n#SBATCH -t 24:00:00\n

Just a quick test:

sbatch -p devcore -t 00:15:00 jobscript.sh\n
Hands-on #1: sbatch/jobinfo
  • login to Bianca
  • find out which projects you\u2019re a member of using projinfo
  • submit a short (10 min) test job; note the job ID
  • find out if there are any free nodes in the devel partition
  • submit a new job to use the devel partition
  • write in the HackMD when you\u2019re done
"},{"location":"cluster_guides/slurm_on_bianca_details/#memory-in-core-or-devcore-jobs","title":"Memory in core or devcore jobs","text":"
  • -n X
  • Bianca: 8GB per core
  • Slurm reports the available memory in the prompt at the start of an interactive job
"},{"location":"cluster_guides/slurm_on_bianca_details/#more-flags","title":"More flags","text":"
  • -J <jobname>
  • email:

    • --mail-type=BEGIN,END,FAIL,TIME_LIMIT_80
    • --mail-user

      • Don\u2019t use. Set your email correctly in SUPR instead.
  • out/err redirection:

    • --output=slurm-%j.out and \u2014-error=slurm-%j.err

      • by default, where %j will be replaced by the job ID
    • --output=my.output.file

    • --error=my.error.file
"},{"location":"cluster_guides/slurm_on_bianca_details/#monitoring-jobs","title":"Monitoring jobs","text":"
  • jobinfo - a wrapper around squeue

    • lists running and pending jobs
    • jobinfo -u username
    • jobinfo -A sens2023598
    • jobinfo -u username --state=running
    • jobinfo -u username --state=pending
  • You may also use the squeue command.

  • bianca_combined_jobinfo (queued jobs of all projects)

"},{"location":"cluster_guides/slurm_on_bianca_details/#monitoring-and-modifying-jobs","title":"Monitoring and modifying jobs","text":"
  • scontrol

    • scontrol show job [jobid]
  • possible to modify the job details after the job has been submitted; some options, like maximum runtime, may be modified (=shortened) even after the job started

    • scontrol update JobID=jobid QOS=short
    • scontrol update JobID=jobid TimeLimit=1-00:00:00
    • scontrol update JobID=jobid NumNodes=10
    • scontrol update JobID=jobid Features=mem1TB
"},{"location":"cluster_guides/slurm_on_bianca_details/#when-a-job-goes-wrong","title":"When a job goes wrong","text":"
  • scancel [jobid]

    • -u username - to cancel all your jobs
    • -t [state] - cancel pending or running jobs
    • -n name - cancel jobs with a given name
    • -i - ask for confirmation
"},{"location":"cluster_guides/slurm_on_bianca_details/#priority","title":"Priority","text":"
  • Roughly:

    • The first job of the day has elevated priority
    • Other normal jobs run in the order of submission (subject to scheduling)
    • Projects exceeding their allocation get successively into the lower priority category
    • Bonus jobs run after the jobs in the higher priority categories
  • In practice:

    • submit early = run early
    • bonus jobs always run eventually, but may need to wait until the night or weekend
    • In detail: jobinfo
Hands-on #2: sbatch/squeue/scancel/scontrol/jobinfo
  • submit a new job; note the job ID
  • check all your running jobs
  • what is the priority or your recently-submitted job?
  • submit a new job to run for 24h; note the job ID
  • modify the name of the job to \u201cwrongjob\u201d
  • cancel your job with name \u201cwrongjob\u201d
"},{"location":"cluster_guides/slurm_on_bianca_details/#determining-job-efficiency","title":"Determining job efficiency","text":"
  • jobstats - custom-made UPPMAX tool
"},{"location":"cluster_guides/slurm_on_bianca_details/#job-efficiency","title":"Job efficiency","text":"
  • jobstats - a tool in the fight for productivity

    • it works only for jobs longer than 5-15 minutes
    • -r jobid - check running jobs
    • A project - check all recent jobs of a given project
    • p jobid - produce a CPU and memory usage plot
  • Jobstats user guide

Hands-on #3: jobstats
    • Firstly, find some job IDs from this month
    • Run finishedjobinfo -m username
    • Write down the IDs from some interesting jobs
    • Generate the images:

    Generate jobstats plots for your jobs

    $ jobstats -p ID1 ID2 ID3\n
  • Look at the images

$ eog *png &\n
  • Which of the plots
    • Show good CPU or memory usage?
    • Indicate that the job requires a fat node?
"},{"location":"cluster_guides/slurm_on_bianca_details/#different-flavours-of-slurm-job-script-examples-and-workflows","title":"Different flavours of Slurm: Job script examples and workflows","text":""},{"location":"cluster_guides/slurm_on_bianca_details/#simple-workflow","title":"Simple workflow","text":"
#!/bin/bash\n#SBATCH -J jobname\n#SBATCH -A sens2023598\n#SBATCH -p core\n#SBATCH -n 10\n#SBATCH -t 10:00:00\n\nmodule load software/version\nmodule load python/3.9.5\n\n./my-script.sh\n./another-script.sh\n./myprogram.exe\n
"},{"location":"cluster_guides/slurm_on_bianca_details/#job-dependencies","title":"Job dependencies","text":"
  • sbatch jobscript.sh submitted job with jobid1
  • sbatch anotherjobscript.sh submitted job with jobid2
  • --dependency=afterok:jobid1:jobid2 job will only start running after the successful end of jobs jobid1:jobid2
  • very handy for clearly defined workflows
  • You may also use --dependency=afternotok:jobid in case you\u2019d like to resubmit a failed job, OOM (out of memory) for example, to a node with a higher memory: -C mem215GB or -C mem512GB
"},{"location":"cluster_guides/slurm_on_bianca_details/#io-intensive-jobs-snic_tmp","title":"I/O intensive jobs: $SNIC_TMP","text":"
#!/bin/bash\n#SBATCH -J jobname\n#SBATCH -A sens2023598\n#SBATCH -p core\n#SBATCH -n 1\n#SBATCH -t 10:00:00\n\nmodule load bioinfotools\nmodule load bwa/0.7.17 samtools/1.14\n\nexport SRCDIR=$HOME/path-to-input\n\ncp $SRCDIR/foo.pl $SRCDIR/bar.txt $SNIC_TMP/.\ncd $SNIC_TMP\n\n./foo.pl bar.txt\n\ncp *.out $SRCDIR/path-to-output/.\n
"},{"location":"cluster_guides/slurm_on_bianca_details/#openmp-or-multi-threaded-job","title":"OpenMP or multi-threaded job","text":"
#!/bin/bash\n#SBATCH -A sens2023598\n#SBATCH --exclusive\n#SBATCH -p node\n#SBATCH --ntasks-per-node=1\n#SBATCH --cpus-per-task=20\n#SBATCH -t 01:00:00\n\nmodule load uppasd\nexport OMP_NUM_THREADS=20\n\nsd > out.log\n
"},{"location":"cluster_guides/slurm_on_bianca_details/#gpu-nodes-on-bianca","title":"GPU nodes on Bianca","text":"
  • Nodes with Nvidia A100 40 GB and 80 GB.
  • All GPU nodes have at least 256 GB RAM (fat nodes) with 16 CPU cores and 2 GPUs per node.
  • In order to avoid GPU misuse, a project cannot request more than 7 GPU nodes, in total.
  • SBATCH options:
#SBATCH -C gpu\n#SBATCH --gpus=2            #number of GPUs requested\n#SBATCH --gpus-per-node=2   #number of GPUs per node\n\nnvidia-smi\n
  • https://slurm.schedmd.com/gres.html#Running_Jobs
"},{"location":"cluster_guides/slurm_on_bianca_details/#running-on-several-nodes-mpi-jobs","title":"Running on several nodes: MPI jobs","text":"
#!/bin/bash -l\n#SBATCH -J rsptjob\n#SBATCH \u2014mail-type=FAIL\n#SBATCH -A sens2023598\n#SBATCH -t 00-07:00:00\n#SBATCH -p node\n#SBATCH -N 4\n### for jobs shorter than 15 min (max 4 nodes):\n###SBATCH --qos=short\n\nmodule load RSPt/2021-10-04\nexport RSPT_SCRATCH=$SNIC_TMP\n\nsrun -n 80 rspt\n\nrm -f apts dmft_lock_file e_entropy efgArray.dat.0 efgData.out.0 energy_matrices eparm_last interstitialenergy jacob1 jacob2 locust.* out_last pot_last rspt_fft_wisdom.* runs.a symcof_new\n
"},{"location":"cluster_guides/slurm_on_bianca_details/#job-arrays","title":"Job arrays","text":"
  • Submit many jobs at once with the same or similar parameters
  • Use $SLURM_ARRAY_TASK_ID in the script in order to find the correct path
#!/bin/bash\n#SBATCH -A sens2023598\n#SBATCH -p node\n#SBATCH -N 2\n#SBATCH -t 01:00:00\n#SBATCH -J jobarray\n#SBATCH --array=0-19\n#SBATCH --mail-type=ALL,ARRAY_TASKS\n\n# SLURM_ARRAY_TASK_ID tells the script which iteration to run\necho $SLURM_ARRAY_TASK_ID\n\ncd /pathtomydirectory/dir_$SLURM_ARRAY_TASK_ID/\n\nsrun -n 40 my-program\nenv\n
  • You may use scontrol to modify some of the job arrays.
"},{"location":"cluster_guides/slurm_on_bianca_details/#snakemake-and-nextflow","title":"Snakemake and Nextflow","text":"
  • Conceptually similar, but with different flavours
  • First define steps, each with an input, an output, and a command that transforms the input into output
  • Then just ask for the desired output and the system will handle the rest
  • Snakemake hackathon (re-occurring event)
  • Nextflow training
Hands-on #4: make it your own
  • use 2 or 3 of the sample job scripts as a starting point for your own job script
  • tweak them so that you run something closer to your research; or just feel free to experiment
  • paste at least one of the examples in the HackMD
  • great if you could add a comment what the job script is about
"},{"location":"cluster_guides/slurm_on_bianca_details/#where-to-go-from-here","title":"Where to go from here?","text":"
  • Code documentation
  • NAISS training newsletter - software-specific training events included
  • https://coderefinery.org/workshops/upcoming/
  • https://nbis.se/training/events.html (bio)
  • email support@uppmax.uu.se or https://supr.naiss.se/support/
"},{"location":"cluster_guides/slurm_on_rackham/","title":"Using Slurm on Rackham","text":"

This page describes how to use Slurm on Rackham.

What is Slurm?

See the general page on Slurm here

What is Rackham?

See the general page on Rackham here

See Slurm troubleshooting how to fix Slurm errors.

"},{"location":"cluster_guides/slurm_on_rackham/#sbatch-and-interactive-on-rackham","title":"sbatch (and interactive) on Rackham","text":"

sbatch (and interactive) work the same as on other clusters, the only difference is that one need specify one want to use the Rackham computer nodes.

Want to start an interactive job?

See how to start an interactive job on Rackham here

Here it is shown how to submit a job with:

  • command-line Slurm parameters
  • Slurm parameters in the script
"},{"location":"cluster_guides/slurm_on_rackham/#sbatch-a-script-with-command-line-slurm-parameters","title":"sbatch a script with command-line Slurm parameters","text":"

The minimal command to use sbatch with command-line Slurm parameters is:

sbatch -A [project_code] [script_filename]\n

where [project_code] is the project code, and [script_filename] the name of a bash script, for example:

sbatch -A uppmax2023-2-25 my_script.sh\n
Forgot your Rackham project?

One can go to the SUPR NAISS pages to see one's projects,

Example of the Rackham project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Rackham, has a slightly different name: the account name to use on Rackham is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

What is in the script file?

The script file my_script.sh is a minimal example script. Such a minimal example script could be:

#!/bin/bash\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm here.

"},{"location":"cluster_guides/slurm_on_rackham/#sbatch-a-script-with-slurm-parameters-in-script","title":"sbatch a script with Slurm parameters in script","text":"

The minimal command to use sbatch with Slurm parameters in the script:

sbatch [script_filename]\n

where [script_filename] the name of a bash script, for example:

sbatch my_script.sh\n

The script must contain at least the following lines:

#SBATCH -A [project_code]\n

where [project_code] is the project code, for example:

#SBATCH -A uppmax2023-2-25\n
Forgot your Rackham project?

One can go to the SUPR NAISS pages to see one's projects,

Example of the Rackham project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Rackham, has a slightly different name: the account name to use on Rackham is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

A full example script would be:

#!/bin/bash\n#SBATCH -A uppmax2023-2-25\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm here.

"},{"location":"cluster_guides/slurm_on_snowy/","title":"Using Slurm on Snowy","text":"

This page describes how to use Slurm on Snowy.

What is Slurm?

See the general page on Slurm here

What is Snowy?

See the general page on Snowy here

See Slurm troubleshooting how to fix Slurm errors.

"},{"location":"cluster_guides/slurm_on_snowy/#sbatch-and-interactive-on-snowy","title":"sbatch (and interactive) on Snowy","text":"

sbatch (and interactive) work the same as on other clusters, the only difference is that one need specify one want to use the Snowy computer nodes.

Want to start an interactive job?

See how to start an interactive job on Snowy here

Here it is shown how to submit a job with:

  • command-line Slurm parameters
  • Slurm parameters in the script
"},{"location":"cluster_guides/slurm_on_snowy/#sbatch-a-script-with-command-line-slurm-parameters","title":"sbatch a script with command-line Slurm parameters","text":"

The minimal command to use sbatch with command-line Slurm parameters is:

sbatch -A [project_code] -M snowy [script_filename]\n

where [project_code] is the project code, and [script_filename] the name of a bash script, for example:

sbatch -A uppmax2023-2-25 -M snowy my_script.sh\n
Forgot your Snowy project?

One can go to the SUPR NAISS pages to see one's projects,

An example of the Snowy project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Snowy, has a slightly different name: the account name to use on Snowy is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

What is in the script file?

The script file my_script.sh is a minimal example script. Such a minimal example script could be:

#!/bin/bash\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm here.

"},{"location":"cluster_guides/slurm_on_snowy/#sbatch-a-script-with-slurm-parameters-in-script","title":"sbatch a script with Slurm parameters in script","text":"

The minimal command to use sbatch with Slurm parameters in the script:

sbatch [script_filename]\n

where [script_filename] the name of a bash script, for example:

sbatch my_script.sh\n

The script must contain at least the following lines:

#SBATCH -A [project_code]\n#SBATCH -M snowy\n

where [project_code] is the project code, for example:

#SBATCH -A uppmax2023-2-25\n#SBATCH -M snowy\n
Forgot your Snowy project?

One can go to the SUPR NAISS pages to see one's projects,

An example of the Snowy project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Snowy, has a slightly different name: the account name to use on Snowy is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

A full example script would be:

#!/bin/bash\n#SBATCH -A uppmax2023-2-25\n#SBATCH -M snowy\necho \"Hello\"\n

Again, what is shown here is a minimal use of sbatch. See the general page on Slurm here.

"},{"location":"cluster_guides/slurm_scheduler/","title":"The job scheduler and the queue","text":"
  • How does the queue work?

  • Let's look graphically at jobs presently running.

  • x-axis: cores, one thread per core
  • y-axis: time

  • We see some holes where we may fit jobs already!

  • Let's see which type of jobs that can fit!

  • 4 one-core jobs can run immediately (or a 4-core wide job).*

    • The jobs are too long to fit at core number 9-13.

  • A 5-core job has to wait.*

    • Too long to fit in cores 9-13 and too wide to fit in the last cores.
  • Easiest to schedule single-threaded, short jobs

Tip

  • You don't see the queue graphically, however.
  • But, overall:
    • short and narrow jobs will start fast
    • test and development jobs can get use of specific development nodes if they are shorter than 1 hour and uses up to two nodes.
    • waste of resources unless you have a parallel program or need all the memory, e.g. 128 GB per node
"},{"location":"cluster_guides/slurm_troubleshooting/","title":"Slurm troubleshooting","text":"

When using Slurm, unexpected things may happen. This page describes Slurm errors.

"},{"location":"cluster_guides/slurm_troubleshooting/#1-invalid-account-or-accountpartition-combination-specified","title":"1. Invalid account or account/partition combination specified","text":""},{"location":"cluster_guides/slurm_troubleshooting/#11-full-error-message","title":"1.1. Full error message","text":"
sbatch: error: Batch job submission failed: Invalid account or account/partition combination specified\n
"},{"location":"cluster_guides/slurm_troubleshooting/#12-to-reproduce","title":"1.2. To reproduce","text":"
touch do_something.sh\necho '#!/bin/bash' >> do_something.sh \nsbatch -A some_invalid_account do_something.sh \n
"},{"location":"cluster_guides/slurm_troubleshooting/#13-problem","title":"1.3. Problem","text":"

As stated by the error message: you've used either:

  • an invalid account (for example, some_invalid_account in the example above)
  • an invalid combination of account and partition, for example using a Rackham account for a Snowy partition

Or, in less formal terms, you are using a NAISS project that is not an active UPPMAX project for that UPPMAX cluster.

"},{"location":"cluster_guides/slurm_troubleshooting/#14-solution","title":"1.4. Solution","text":"
  • View your NAISS projects and see if the project you used is indeed an active UPPMAX project that can be used on the cluster you expect
  • Use these in your scripts
"},{"location":"cluster_guides/slurm_troubleshooting/#2-invalid-project","title":"2. Invalid project","text":""},{"location":"cluster_guides/slurm_troubleshooting/#21-full-error-message","title":"2.1. Full error message","text":"
````\n\nsbatch: error: Errors in job submission:\nsbatch: error: ERROR 1: Invalid project.\nsbatch: error: Use the flag -A to specify an active project with allocation on this cluster.\nsbatch: error: Batch job submission failed: Unspecified error```\n\n### 2.2. To reproduce\n\n```bash\nsbatch my_script.sh -A my_project\n
"},{"location":"cluster_guides/slurm_troubleshooting/#23-problem","title":"2.3. Problem","text":"

The order of the arguments is incorrect. The script to be submitted must be the last argument.

"},{"location":"cluster_guides/slurm_troubleshooting/#24-solution","title":"2.4. Solution","text":"
sbatch -A my_project my_script.sh\n
"},{"location":"cluster_guides/snowy/","title":"Snowy","text":"

Snowy is one of the UPPMAX clusters.

  • Snowy's name
  • Snowy's design
  • Snowy's hardware
  • Log in
  • Submitting jobs, using Slurm
  • Courses and workshops
"},{"location":"cluster_guides/snowy/#accounts-and-log-in","title":"Accounts and log in","text":"

Snowy is different from other clusters at UPPMAX in that there are no login nodes for Snowy. All access to this system is done via secure shell (a.k.a SSH) interactive login to the login node, using the domain name rackham.uppmax.uu.se

ssh -X user@rackham.uppmax.uu.se\n

See the UPPMAX page on how to get a user account page. on how to get a user account.

For questions concerning accounts and access to Rackham and Snowy, please contact UPPMAX support.

Note that the machine you arrive at when logged in, is only a so called login node, where you can do various smaller tasks. We have some limits in place that restricts your usage. For larger tasks you should use our batch system that pushes your jobs onto other machines within the cluster.

All access to Snowy is done using the batch system Slurm, either as an interactive job or non-interactive batch jobs.

"},{"location":"cluster_guides/snowy/#using-the-batch-system","title":"Using the batch system","text":"

To allow a fair and efficient usage of the system we use a resource manager to coordinate user demands. On Snowy we use the Slurm resource manager, as is discussed in more detail there.

Note: When accessing snowy from Rackhams login nodes you must\u00a0always use the flag -M for all Slurm commands. Examples:

  • squeue -M snowy
  • jobinfo -M snowy
  • sbatch -M snowy slurm_script_file
  • scancel -u username -M snowy
  • interactive -A projectname -M snowy -p node -n 32 -t 01:00:00

Note: We always recommend loading all your modules in your job script file. This is even more important when running on Snowy since the module environment is not the same on the Rackham login nodes as on Snowy compute nodes.

"},{"location":"cluster_guides/snowy/#some-limits","title":"Some Limits","text":"
  • There is a job walltime limit of 30\u00a0days (720 hours).
  • We restrict each user to at most 5000 running and waiting jobs in total.
  • Each project has a 30 days running allocation of CPU hours. We do not forbid running jobs after the allocation is overdrafted, but instead allow to submit jobs with a very low queue priority, so that you may be able to run your jobs anyway, if a sufficient number of nodes happens to be free on the system.
  • Very wide jobs will only be started within a maintenance window (just before the maintenance window or at the end of the maintenance window). These are planned for the first Wednesday of each month. On Snowy a \"very wide\" job asks for 100 nodes or more.
"},{"location":"cluster_guides/snowy/#convenience-variables","title":"Convenience Variables","text":"
  • $SNIC_TMP - Path to node-local temporary disk space

The $SNIC_TMP variable contains the path to a node-local temporary file directory that you can use when running your jobs, in order to get maximum disk performance (since the disks are local to the current compute node). This directory will be automatically created on your (first) compute node before the job starts and automatically deleted when the job has finished.

The path specified in $SNIC_TMP is equal to the path: /scratch/$SLURM_JOB_ID, where the job variable $SLURM_JOB_ID contains the unique job identifier of your job.

WARNING: Please note, that in your \"core\" (see below) jobs, if you write data in the /scratch directory but outside of the /scratch/$SLURM_JOB_ID directory, your data may be automatically deleted during your job run.

"},{"location":"cluster_guides/snowy/#details-about-the-core-and-node-partitions","title":"Details about the \"core\" and \"node\" partitions","text":"

A normal Snowy node contains 128 GB of RAM and 16\u00a0compute cores. An equal share of RAM for each core would mean that each core gets at most 8 GB of RAM. This simple calculation gives one of the limits mentioned below for a \"core\" job.

You need to choose between running a \"core\" job or a \"node\" job. A \"core\" job must keep within certain limits, to be able to run together with up to 15 other \"core\" jobs on a shared node. A job that cannot keep within those limits must run as a \"node\" job.

Some serial jobs must run as \"node\" jobs. You tell Slurm that you need a \"node\" job with the flag \"-p node\". (If you forget to tell Slurm, you are by default choosing to run a \"core\" job.)

A \"core\" job:

  • Will use a part of the resources on a node, from a 1/16\u00a0share to a 15/16\u00a0share of a node.
  • Must specify less cores than 16, i.e.between \"-n 1\" to \"-n 15\".
  • Must not demand \"-N\", \"--nodes\", or \"--exclusive\".
  • Is recommended not to demand \"--mem\"
  • Must not demand to run on a fat node (see below, for an explanation of \"fat\"), a devel node.
  • Must not use more than 8\u00a0GB of RAM for each core it demands. If a job needs half of the RAM, i.e. 64 GB, you need to reserve also at least half of the cores on the node, i.e. 8\u00a0cores, with the \"-n 8\" flag.

A \"core\" job is accounted on your project as one \"core hour\" (sometimes also named as a \"CPU hour\") per core you have been allocated, for each wallclock hour that it runs. On the other hand, a \"node\" job is accounted on your project as sixteen core hours for each wallclock hour that it runs, multiplied with the number of nodes that you have asked for.

"},{"location":"cluster_guides/snowy/#node-types","title":"Node types","text":"

Rackham has two node types, thin being the typical cluster node and fat nodes having double the amount of memory available normally (256 GB). You may specify a node with more RAM, by adding the words \"-C mem256GB\" or \"-C fat\" to your job submission line and thus making sure that you will get 256 GB of RAM on each node in your job. If you need even more memory you can request a 512 GB\u00a0memory node by adding \"-C mem512GB\". Please note that there are only 13\u00a0nodes with 256GB and 17\u00a0with 512GB of RAM.

To request a fat node, use -C mem256GB or -C fat in your Slurm command.

To request the fattest nodes, use -C mem512GB in your Slurm command.

"},{"location":"cluster_guides/snowy/#file-storage-and-disk-space","title":"File storage and disk space","text":"

At UPPMAX we have a few different kinds of storage areas for files, see Disk Storage User Guide for more information and recommended use.

"},{"location":"cluster_guides/snowy/#very-long-jobs","title":"Very Long jobs","text":"

If you have very long jobs that require more than 10 days of CPU-time. We recommend using Snowy. But in order for our job to successfully run for several weeks you should implement the following;

  • Use only local disk for your job. Copy all input and data files needed to $SNIC_TMP at the start of your job and at the end, copy all output back to your project directory.
  • Book a full node with the Slurm flags -p node\u00a0(you won't be able to submit these jobs in the core partition).
  • If\u00a0possible make sure you do not rely on files stored outside the node. One way of achieving this may be to copy program files to\u00a0$SNIC_TMP.

Even if you do this we can't promise that a 20 or 30 day long job will finish without being interrupted by the global file systems, the network or problems on the node,

"},{"location":"cluster_guides/snowy/#snowys-design","title":"Snowy's design","text":"

Snowy is an (general-purpose) high-performance computing (HPC) cluster, with GPUs and suitable for longer jobs.

What is an HPC cluster?

What an HPC cluster is, is described here.

Or: Snowy is a group of computers that can effectively run many calculations, as requested by multiple people, at the same time. Snowy runs the Linux operating system and all users need some basic Linux knowledge to use Snowy.

Additionally, Snowy has GPUs and allows for jobs running for maximally 30 days.

Snowy does not have a login node. Instead, it uses a login node on Rackham.

Using Linux

Using Linux (and especially the so-called command-line/terminal) is essential to use Snowy. Learning the essential Linux commands is described here.

"},{"location":"cluster_guides/snowy/#snowys-system-configuration","title":"Snowy's system configuration","text":"

Snowy consists of 228 compute servers (nodes) where each compute server consists of two 8-core Xeon E5-2660 processors running at 2.2 GHz. We provide 198\u00a0nodes with 128 GB memory\u00a0(s1-s120, s151-s228), 13 nodes with 256 GB (s138-s150) and\u00a017\u00a0nodes with 512\u00a0GB (s121-s137). All nodes are interconnected with a 2:1 oversubscribed FDR (40\u00a0GB/s) InfiniBand fabric. In total Snowy provides 3548\u00a0CPU cores in compute nodes.

"},{"location":"cluster_guides/snowy/#compiling-on-snowy","title":"Compiling on Snowy","text":"

There are several compilers available through the module system on Snowy. This gives you flexibility to obtain programs that run optimally on Snowy.

  • gcc - the newest version usually generates the best code, if you tell it to use the new instructions. Check which version is the newest by doing module avail. The compiler executable is named gcc for C, g++ for C++, and gfortran for Fortran. To use the new instructions available on Snowy (AVX2 and FMA3), give the additional options \"-mavx2 -mfma3\" to gcc. For good performance with this compiler you should also specify optimization at least at level -O2 or -O3. Also try using -march=broadwell for GCC >= 4.9.0 or -march=core-avx2 for GCC 4.8.x, which will enable all the instructions on the CPU.
  • Intel+MKL - usually generates the fastest code. As with gcc, it is good to use the latest version. The compiler executable is named icc for C, icpc for C++, and ifort for Fortran. You should give optimization options at least -O2, preferably -O3 or -fast. You can also try to use the -xCORE-AVX2 option to the compiler to output AVX2 instructions.
  • pgi - often generates somewhat slower code, but it is stable so often it is easier to obtain working code, even with quite advanced optimizations. The compiler executable is named pgcc for C, pgCC for C++, and pgfortran, pgf77, pgf90, or pgf95 for Fortran. For this compiler, you can generate code for Snowy using the following options \"UPDATES NEEDED\". Also give optimization options at least -O2, preferably -Ofast, even though the compile times are much longer, the result is often worth the wait.
"},{"location":"cluster_guides/snowys_design/","title":"Snowy's design","text":"

Snowy is an (general-purpose) high-performance computing (HPC) cluster, with GPUs and suitable for longer jobs.

What is an HPC cluster?

What an HPC cluster is, is described here.

Or: Snowy is a group of computers that can effectively run many calculations, as requested by multiple people, at the same time. Snowy runs the Linux operating system and all users need some basic Linux knowledge to use Snowy.

Additionally, Snowy has GPUs and allows for jobs running for maximally 30 days.

Snowy does not have a login node. Instead, it uses a login node on Rackham.

Using Linux

Using Linux (and especially the so-called command-line/terminal) is essential to use Snowy. Learning the essential Linux commands is described here.

"},{"location":"cluster_guides/snowys_design/#snowys-system-configuration","title":"Snowy's system configuration","text":"

Snowy consists of 228 compute servers (nodes) where each compute server consists of two 8-core Xeon E5-2660 processors running at 2.2 GHz. We provide 198\u00a0nodes with 128 GB memory\u00a0(s1-s120, s151-s228), 13 nodes with 256 GB (s138-s150) and\u00a017\u00a0nodes with 512\u00a0GB (s121-s137). All nodes are interconnected with a 2:1 oversubscribed FDR (40\u00a0GB/s) InfiniBand fabric. In total Snowy provides 3548\u00a0CPU cores in compute nodes.

"},{"location":"cluster_guides/snowys_name/","title":"Snowy's name","text":"

Snowy, like all UPPMAX clusters, is named after a Tintin character, in this case after Snowy, Tintin's dog:

What are the UPPMAX clusters?

All UPPMAX clusters can be found here.

"},{"location":"cluster_guides/software_on_transit/","title":"Software on Transit","text":"

Transit is an UPPMAX service that can be used to securely transfer files.

This page describes the software on Transit.

After logging in to Transit, you cannot make lasting changes to anything, except for mounted wharf directories. However, anything you have added to your Rackham home directory is available on Transit.

In addition, some modules are available.

  • SciLifeLab Data Delivery System - https://delivery.scilifelab.se/
# Load the tool from the software module tree\nmodule load bioinfo-tools dds-cli\n\n# Run the tool\ndds\n

To download data from TCGA, log in to Rackham and install the GDC client to your home directory. Then log in to Transit, mount the wharf, and run ./gdc-client.

2FA on transit

If you connect from abroad and you are asked for the 2FA (two factor authentication), there is a grace period (about 5 minutes) in which you can ssh/scp/rsync/sftp to transit without the need for 2FA. This allows you to use these and other tools that might experience problems with the 2FA.

"},{"location":"cluster_guides/start_interactive_node/","title":"Starting an interactive node","text":"

Below we describe the general ideas of using an interactive node:

  • the types of nodes
  • When to use an interactive node

To start an interactive node on specific cluster:

  • Start an interactive node on Bianca
  • Start an interactive node on Rackham
  • Start an interactive node on Snowy
","tags":["start","interactive","node"]},{"location":"cluster_guides/start_interactive_node/#types-of-nodes","title":"Types of nodes","text":"

The UPPMAX HPC clusters have three types of nodes:

What are nodes?

What nodes are, is described in general terms here.

  • login nodes: nodes where a user enters and interacts with the system
  • calculation nodes: nodes that do the calculations
Requesting a calculation to run

Requesting a calculation is described here. This is done by using the Slurm scheduler.

  • interactive nodes: a type of calculation node, where a user can do calculations directly
How can I find out on which node I am?

In a terminal, type hostname:

  • the login node has name rackham[number], where [number] is the number of the login node
  • an interactive node has name r[number], where [number] is the compute node number

As a login is shared with all users, there is a simple rule to use it fairly:

Only do short and light things on the login node

Examples of short and light things are:

  • Editing files
  • Copying, deleting, moving files
  • Scheduling jobs
  • Starting an interactive session

Examples of heavy things are:

  • Running code with big calculations
  • Develop code with big calculations line-by-line
Develop code with big calculations line-by-line

This usage is typically done an interactive node

","tags":["start","interactive","node"]},{"location":"cluster_guides/start_interactive_node/#when-to-use-an-interactive-node","title":"When to use an interactive node","text":"

Some users develop computer code on an HPC cluster in a line-by-line fashion. These users typically want to run a (calculation-heavy) script frequently, to test if the code works.

However, scheduling each new line is too slow, as it can take minutes before the new code is run. Instead, there is a way to directly work with such code: use an interactive node.

An interactive node is a type of calculation node, where one can run heavy calculations directly.

flowchart TD\n    UPPMAX(What to run on which node?)\n    operation_type[What type of operation/calculation?]\n    interaction_type[What type of interaction?]\n    login_node(Work on login node)\n    interactive_node(Work on interactive node)\n    calculation_node(Schedule for calculation node)\n\n    UPPMAX-->operation_type\n    operation_type-->|light,short|login_node\n    operation_type-->|heavy,long|interaction_type\n    interaction_type-->|Direct|interactive_node\n    interaction_type-->|Indirect|calculation_node
","tags":["start","interactive","node"]},{"location":"cluster_guides/start_interactive_node_on_bianca/","title":"Starting an interactive node on Bianca","text":"

This page describes how to start an interactive node on Bianca. See the general information on starting an interactive node on how to do so in general.

Prefer a video?

See the video Starting an interactive node on Bianca

To use an interactive node, in a terminal, type:

interactive -A [project name] -n [number_of_cores] -t [session_duration]\n

Where

  • [project name] is your project name, for example sens2023598
  • [number_of_cores] is the number of core, for example 2
  • [session_duration] is the duration of the session, for example 8:00:00 for eight hours

For example:

interactive -A sens2023598 -n 2 -t 8:00:00\n

This starts an interactive session using project sens2023598 that uses 2 cores and has a maximum duration of 8 hours.

The script interactive has many same arguments as sbatch.

Forgot your Bianca projects?

One easy way to see your Bianca projects is to use the Bianca remote desktop login screen at https://bianca.uppmax.uu.se/.

Has Bianca frozen?

It can take tens of minutes before an interactive node is allocated.

Bianca has not frozen, go ahead and have a coffee break :-)

"},{"location":"cluster_guides/start_interactive_node_on_rackham/","title":"Starting an interactive node on Rackham","text":"

This page describes how to start an interactive node on Rackham, unlike the general information on starting an interactive node.

To use an interactive node, in a terminal, type:

interactive -A [project name]\n

For example:

interactive -A uppmax2023-2-25\n

This starts an interactive session using project uppmax2023-2-25 that has a default duration of 1 hour.

Forgot your Rackham project?

One can go to the SUPR NAISS pages to see one's projects,

An example of the Rackham project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Rackham, has a slightly different name: the account name to use on Rackham is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

To increase the duration of the interactive session, add the use of -t:

interactive -A [project name] -t [session_duration]\n

For example:

interactive -A uppmax2023-2-25 -t 8:00:00\n

This starts an interactive session using project uppmax2023-2-25 that has a maximum duration of 8 hours.

Has Rackham frozen?

It can take tens of seconds before an interactive node is allocated.

Rackham has not frozen, just be a bit more patient.

To stop the session, do:

exit\n

This will take you back to the login node.

"},{"location":"cluster_guides/start_interactive_node_on_snowy/","title":"Starting an interactive node on Snowy","text":"

This page describes how to start an interactive node on Snowy, unlike the general information on starting an interactive node.

To use an interactive node, in a terminal, type:

interactive -A [project name] -M snowy\n

For example:

interactive -A uppmax2023-2-25 -M snowy\n

This starts an interactive session using project uppmax2023-2-25 that has a default duration of 1 hours.

Forgot your Snowy project?

One can go to the SUPR NAISS pages to see one's projects,

Example of the Snowy project called 'UPPMAX 2023/2-25'

On the SUPR NAISS pages, projects are called 'UPPMAX [year]/[month]-[day]', for example, 'UPPMAX 2023/2-25'. The UPPMAX project name, as to be used on Snowy, has a slightly different name: the account name to use on Snowy is uppmax[year]-[month]-[day], for example, uppmax2023-2-25

To increase the duration of the interactive session, add the use of -t:

interactive -A [project name] -M snowy -t [session_duration]\n

For example:

interactive -A uppmax2023-2-25 -M snowy -t 8:00:00\n

This starts an interactive session using project uppmax2023-2-25 that has a maximum duration of 8 hours.

Has Snowy frozen?

It can take tens of seconds before an interactive node is allocated.

Snowy has not frozen, just be a bit more patient.

"},{"location":"cluster_guides/storage_compression/","title":"Storage and compression","text":""},{"location":"cluster_guides/storage_compression/#storage","title":"Storage","text":"

Disk storage guide

How does automatic backup of project areas work at UPPMAX?

Backup

What is this 'glob' folder in my home folder?
  • The glob directory found in your home has been deprecated since early 2017.
  • It is now a normal directory and shared your default 32GByte sized home.
  • The glob directory remains to not interfere with scripts who might reference ~/glob in the source code.

  • Historically, the glob directory was the main storage area for storage of user data.

    • It was shared by all nodes.
    • The directory was used for files needed by all job instances and could house files exceeding the quota of the home directory.
    • Job input and output files was (and can still be) stored here.
  • You might also be interested in our disk storage guide.

Disk quota exceeded when copying data

The problem is that if you have data in a project directory, e.g. /proj/snic2017-1-999, and are copying the data to another project directory, e.g. /proj/uppstore2017-999, then you may get a \"disk quota exceeded\" error.

This happens when your (snic2017-1-999) project quota is almost full and you're copying the data without changing the group ownership of the files. Even though the destination folder is owned by a project with sufficient quota, the files will for a short time be owned by the original project. By copying the files, the earlier project's disk usage is increased and the quota is exceeded.

The solution is one of these options:

  1. Use mv instead of cp
  2. Give the flag --no-g to rsync to set the group ownership of the destination files to that of the destination directory
  3. Use newgroup [the-group-i-want] to change the group ownership of the files first, then rsync -rlpt /old-location /new-location

Explanation:

`-r` is for recursive\n`-l` is to preserve links\n`-p` is to preserve permissions\n`-t` is to preserve times\n
"},{"location":"cluster_guides/storage_compression/#compression","title":"Compression","text":"File compression guide

Compression guide

How can I compress my files as quickly and efficiently as possible?
  • You can use this SBATCH script [1] to run the compression in parallel as a node job, with a parallel version of the highly efficient bzip2 compression software.

  • Remember to modify the appropriate #SBATCH parameters at the top of the file, according to your project, and the estimated time to compress your files.

  • [1] Thanks Roman Valls Guimera, for contributing this script.

How should I compress FastQ-format files?

Compress FastQ

Which compression format should I use for NGS-related files (FastQ, Fasta, VCF, GFF, etc.)?

Compression format

"},{"location":"cluster_guides/transfer_bianca/","title":"File transfer to/from Bianca","text":"
flowchart LR\n  subgraph sunet[SUNET]\n    subgraph bianca[Bianca]\n      wharf\n    end\n    transit[transit server]\n    sftp_server[SFTP server]\n    user[User in SUNET or user on Rackham or user on other NAISSS clusters]\n    wharf <--> transit\n    wharf <--> sftp_server\n    transit <--> user\n    sftp_server <--> user\n  end

File transfer is the process of getting files from one place to the other. This page shows how to do file transfer to/from the Bianca UPPMAX cluster.

For all file transfer on Bianca:

  • The user needs to be inside of SUNET
  • The files are moved from/to the wharf folder
"},{"location":"cluster_guides/transfer_bianca/#file-transfer-methods","title":"File transfer methods","text":"

There are multiple ways to transfer files to/from Bianca:

Method Features Using a graphical program Graphical interface, intuitive, for small amounts of data only Using rsync Terminal, recommended Using sftp Terminal, easy to learn, can use terminal commands to select files Using lftp Terminal Transit server from/to Rackham, see below Terminal, can be used to transfer data between clusters in general Mounting wharf on your local computer Both graphical and terminal, need a computer with sshfs installed"},{"location":"cluster_guides/transfer_bianca/#using-a-graphical-program","title":"Using a graphical program","text":"

FileZilla connected to Bianca

To transfer files to/from Bianca one can use a graphical tool, such as FileZilla and WinSCP. See Bianca file transfer using a graphical program for details.

"},{"location":"cluster_guides/transfer_bianca/#using-sftp","title":"Using sftp","text":"

sftp is a terminal SFTP client to transfer files to/from Bianca. See Bianca file transfer using sftp.

"},{"location":"cluster_guides/transfer_bianca/#using-lftp","title":"Using lftp","text":"

sftp is a terminal SFTP client to transfer files to/from Bianca. See Bianca file transfer using lftp.

"},{"location":"cluster_guides/transfer_bianca/#using-rsync","title":"Using rsync","text":"

rsync is a terminal program to transfer files to/from Bianca. See Bianca file transfer using rsync.

"},{"location":"cluster_guides/transfer_bianca/#transit-server","title":"Transit server","text":"

To facilitate secure data transfers to, from, and within the system for computing on sensitive data a special service is available via SSH at transit.uppmax.uu.se.

See the UPPMAX documentation on the Transit server.

  • Note that your home directory is mounted read-only, any changes you do to your \"local\" home directory (on transit) will be lost upon logging out.

  • You can use commands like rsync, scp to fetch data and transfer it to your bianca wharf.

    • You can use cp to copy from Rackham to the wharf
  • Remember that you cannot make lasting changes to anything except for mounted wharf directories. Therefore you have to use rsync and scp to transfer from the wharf to Rackham.
  • The mounted directory will be kept for later sessions.
"},{"location":"cluster_guides/transfer_bianca/#moving-data-from-transit-to-rackham","title":"Moving data from transit to Rackham","text":"
  • On Rackham: (or other computer) copy files to Bianca via transit:
# scp\nscp path/my_files my_user@transit.uppmax.uu.se:sens2023531/\n\n# rsync\nrsync -avh path/my_files my_user@transit.uppmax.uu.se:sens2023531/\n
  • On transit: copy files to Bianca from Rackham (or other computer)
# scp\nscp my_user@rackham.uppmax.uu.se:path/my_files ~/sens2023531/\n\n# rsync\nrsync -avh my_user@rackham.uppmax.uu.se:path/my_files ~/sens2023531/\n
:book:  `rsync` [tutorial](https://www.digitalocean.com/community/tutorials/how-to-use-rsync-to-sync-local-and-remote-directories) for beginners.\n

Keep in mind that project folders on Rackham are not available on transit.

"},{"location":"cluster_guides/transfer_bianca/#moving-data-between-projects","title":"Moving data between projects","text":"
  • You can use transit to transfer data between projects by mounting the wharfs for the different projects and transferring data with rsync.
  • Note that you may only do this if this is allowed (agreements, permissions, etc.)
"},{"location":"cluster_guides/transfer_bianca/#mounting-wharf-on-your-local-computer","title":"Mounting wharf on your local computer","text":"

Mounting wharf means that a wharf folder is added to the filesystem of your local computer, after which you can use it like any other folder.

See the UPPMAX documentation of wharf on how to do so.

Summary

  • For simple transfers use SFTP to connect to bianca-sftp.uppmax.uu.se - use command line sftp or tools that support SFTP protocol.
  • For rsync - sync files to pre-mounted wharf folder from Rackham or secure local computer.
  • Keep in mind that project folders on Rackham are not available on transit.
"},{"location":"cluster_guides/transfer_bianca/#bianca-file-transfer-as-image","title":"Bianca file transfer as image","text":""},{"location":"cluster_guides/transfer_dardel/","title":"File transfer to/from Dardel","text":"

This page describes how to transfer files to Dardel, the HPC cluster at PDC in Stockholm.

"},{"location":"cluster_guides/transfer_dardel/#why-do-i-need-this","title":"Why do I need this?","text":"

The Rackham cluster will be decommissioned at the end of 2024 so all projects have to migrate their data and calculations to other resources. The plan from NAISS is that all Rackham users will move to the Dardel cluster at PDC.

"},{"location":"cluster_guides/transfer_dardel/#how-do-i-do-this","title":"How do I do this?","text":"

First, we are here to help. Please contact support if you run into problems when trying the guide below.

To transfer your files to Dardel, follow the steps below.

flowchart TD\n  get_supr_project[1 Access to a SUPR project with Dardel]\n  get_pdc_account[2 Access to a PDC account]\n  create_ssh_key[3 Create SSH key pair on Rackham]\n  add_ssh_key[4 Add public SSH key to PDC Login Portal]\n  transfer_files[5 Transfer files to Dardel]\n\n  get_supr_project --> |requires| get_pdc_account\n  create_ssh_key --> |requires| add_ssh_key\n  get_pdc_account --> |requires| add_ssh_key\n  add_ssh_key --> |requires| transfer_files
"},{"location":"cluster_guides/transfer_dardel/#3-create-an-ssh-key-pair","title":"3. Create an SSH key pair","text":"

How to create an SSH key pair is described in detail at the PDC page on how to create an SSH key pair.

On Rackham, do:

# generate the key\nssh-keygen -t ed25519 -N \"\" -f ~/.ssh/id_ed25519_pdc\n

and you have created a SSH key pair.

How do I know this worked?

On Rackham, in a terminal, type:

$ cat ~/.ssh/id_ed25519_pdc.pub\n

This will show a text similar to:

ssh-ed25519 AAAA63Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se\n
"},{"location":"cluster_guides/transfer_dardel/#5-add-the-public-ssh-key-to-pdcs-login-portal","title":"5. Add the public SSH key to PDC:s Login Portal","text":"

How to add the SSH public key is described in detail in the PDC documentation on how to add SSH keys.

You will need to get the public part of the key in order to complete this step.i On Rackham, in a terminal, type:

cat ~/.ssh/id_ed25519_pdc.pub\n

This will show a text similar to:

ssh-ed25519 AAAA63Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se\n

Select and copy that text, it is the public key you will add.

In short,

  1. Open the PDC Login Portal
  2. Follow the instructions there to login.
  3. Click on the Add new key link.
  4. Paste the public key you copied after running the cat command above.
  5. Make up a name for the key so you know which computer it resides on. E.g. rackham-darsync
  6. Press the Save button.
How does the adding the key look like?

Click on 'Prove Indentity'

PDC key managements before any keys are added.

How it looks when adding a new key.

After having added your public SSH key, you will be able to see your registered keys.

How does that look like?

Here we see that there is an SSH key added.

The next thing you have to do is to add UPPMAX as a placer permitted to use your newly added key. Do that by pressing the Add address link for the key you just added. At the bottom of the form you have a section called Custom domain. Add *.uppmax.uu.se in that field and press Save.

How does that look like?

This is where you enter that UPPMAX is allowed to use this key.

For staff only

@Richel, need a screenshot of adding custom domain.

To validate that it work you can connect to Dardel via SSH:

# replace your_dardel_username with your actual Dardel username\nssh -i ~/.ssh/id_ed25519_pdc your_dardel_username@dardel.pdc.kth.se\n
For staff only

@Richel, need a screenshot of ssh working

"},{"location":"cluster_guides/transfer_dardel/#6-transfer-files","title":"6. Transfer files","text":"

To facilitate this move we have created Darsync, a tool that can inspect your files and make suggestions to make the transfer easier, as well as generating a script file you can submit to Slurm to perform the actual file transfer. Read more about how to use Darsync here.

Here is a summary of how to run it, using /path/to/dir as a placeholder for the actual path to the directory you want to copy to Dardel:

module load darsync\n\ndarsync check --local-dir /path/to/dir\n# fix any errors the check step found\ndarsync gen --local-dir /path/to/dir --outfile ~/dardel_transfer_script.sh\n
"},{"location":"cluster_guides/transfer_dardel/#6-submit-the-script-created-by-darsync","title":"6. Submit the script created by Darsync","text":"

Submit the transfer script created by Darsync to Slurm:

sbatch --output=~/dardel_transfer.out --error=~/dardel_transfer.err ~/dardel_transfer_script.sh\n
"},{"location":"cluster_guides/transfer_dardel/#7-check-logs","title":"7. Check logs","text":"

Once the submitted job has finished, have a look at the log file produced by the job and make sure it did not end in a error message.

tail ~/dardel_transfer.out\ntail ~/dardel_transfer.err\n
For staff only

@Richel, need a screenshot of successful rsync command, as well as a failed one?

If there are any errors you can either run darsync gen again and correct any mistakes you made and submit the new script file.

If you have updated your files at UPPMAX and want to sync over the changes, just submit the same script file again and it will only transfer over the modified files.

If your data transfer took too long and got killed by Slurm, or if it crashed for some other reason, just submit the same script again and it till pick up from where it left off.

"},{"location":"cluster_guides/transfer_dardel/#8-delete-the-ssh-key-pair","title":"8. Delete the SSH key pair","text":"

When you are done with transferring files you should delete your SSH keys you created in the previous steps in this guide. The SSH keys created where created without a password to protect them (required to run darsync as a unattended job), and it's best to delete them.

rm ~/.ssh/id_ed25519_pdc*\n

Create new ones if you still need to connect to Dardel from UPPMAX. To create new keys with password on them, simply run:

ssh-keygen -t ed25519\n

and add the new public key (~/.ssh/id_ed25519.pub) to the PDC Login Portal following the same steps as above.

Once you are sure your data has been transferred, we recommend that you switch over to only work on Dardel. If you keep working on both clusters you will easily forget which cluster has the most up-to-date version of the files.

"},{"location":"cluster_guides/transfer_dardel/#link","title":"Link","text":"
  • PDC's page on getting access to Dardel
"},{"location":"cluster_guides/transfer_rackham/","title":"File transfer to/from Rackham","text":"

There are multiple ways to transfer files to/from Rackham:

Method Features Using a graphical program Graphical interface, intuitive, for small amounts of data only Using SCP Terminal, easy to learn, can be used in scripts Using SFTP Terminal, easy to learn, secure Using transit Terminal, easy to learn, secure, can transfer between HPC clusters

Each of these methods is discussed below.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_rackham/#using-a-graphical-program","title":"Using a graphical program","text":"

One can transfer files to/from Rackham using a graphical program. A graphical interface is intuitive to most users. However, it can be used for small amounts of data only and whatever you do cannot be automated.

See Rackham file transfer using a graphical program for a step-by-step guide how to transfer files using a graphical tool.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_rackham/#using-scp","title":"Using SCP","text":"

One can transfer files to/from Rackham using SCP in a terminal. This works similar to a regular copy of files, except that a remote address needs to be specified. The advantage of SCP is that is can be used in scripts.

See Rackham file transfer using SCP for a step-by-step guide how to transfer files using SCP.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_rackham/#using-sftp","title":"Using SFTP","text":"

One can transfer files to/from Rackham using SFTP in a terminal. One connects a local and a remote folder, after which one can upload and download files. SFTP is considered a secure file transfer protocol.

See Rackham file transfer using SFTP for a step-by-step guide how to transfer files using SFTP.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_rackham/#using-transit","title":"Using transit","text":"

One can transfer files to/from Rackham using the UPPMAX transit server. One connects a local folder and the transit server, after which one can upload and download files.

See Rackham file transfer using transit for a step-by-step guide how to transfer files using the transit UPPMAX server.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_rackham/#overview","title":"Overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n    classDef transit_node fill:#fff,color:#000,stroke:#fff\n\n    subgraph sub_inside[SUNET]\n      direction LR\n      user(User)\n      user_local_files(Local user files):::file_node\n\n      subgraph sub_transit_env[Transit]\n        transit_login(Transit login):::calculation_node\n        files_on_transit(Files posted to Transit):::transit_node\n      end\n      subgraph sub_rackham_shared_env[Rackham]\n          rackham_login(Rackham login node):::calculation_node\n          files_in_rackham_home(Files in Rackham home folder):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#ccc,color:#000,stroke:#000\n    style sub_transit_env fill:#cfc,color:#000,stroke:#000\n    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000\n\n    user --> |has|user_local_files\n    user --> |logs in |transit_login\n    user --> |logs in |rackham_login\n\n    user_local_files <--> |graphical tool|files_in_rackham_home\n    user_local_files <--> |SCP|files_in_rackham_home\n    user_local_files <--> |SFTP|files_in_rackham_home\n    user_local_files <--> |graphical tool|files_on_transit\n    user_local_files <--> |SFTP|files_on_transit\n\n    rackham_login --> |can use|files_in_rackham_home\n\n    transit_login --> |can use|files_on_transit\n    files_on_transit <--> |transfer|files_in_rackham_home\n\n    files_in_rackham_home ~~~ transit_login

Overview of file transfer on Rackham The purple nodes are about file transfer, the blue nodes are about 'doing other things'. The user can be either inside or outside SUNET.

","tags":["Rackham","file","transfer","data"]},{"location":"cluster_guides/transfer_transit/","title":"File transfer to/from Transit","text":"

There are multiple ways to transfer files to/from Transit:

What is Transit?

Transit is an UPPMAX service to send files around. It is not a file server.

See the page about Transit for more detailed information.

Method Features Using a graphical program Graphical interface, intuitive, for small amounts of data only Using rsync Terminal, easy to learn, secure Using SFTP Terminal, easy to learn, secure Using SCP only download, terminal, easy to learn, can be used in scripts

Each of these methods is discussed below.

"},{"location":"cluster_guides/transfer_transit/#using-a-graphical-program","title":"Using a graphical program","text":"

One can transfer files to/from Transit using a graphical program. A graphical interface is intuitive to most users. However, it can be used for small amounts of data only and whatever you do cannot be automated.

See Transit file transfer using a graphical program for a step-by-step guide how to transfer files using a graphical tool.

"},{"location":"cluster_guides/transfer_transit/#using-rsync","title":"Using rsync","text":"

Transit is used as a stepping-stone to transfer files to Bianca using rsync.

"},{"location":"cluster_guides/transfer_transit/#using-scp","title":"Using SCP","text":"

One cannot upload files to Transit using SCP in a terminal: Transit only allows for sending files from A to B, not for storing them.

One can download the files on Transit. However, Transit is not a file server. Instead, the files that appear to be on Transit are the files in your Rackham home folder. Due to this, it makes more sense to use SCP to transfer files to/from Rackham.

For completeness sake, see Transit file transfer using SCP for a step-by-step guide how to transfer files using SCP. It show one cannot upload files to Transit.

"},{"location":"cluster_guides/transfer_transit/#using-sftp","title":"Using SFTP","text":"

One can transfer files to/from Transit using SFTP in a terminal. One connects a local and a remote folder, after which one can upload and download files. SFTP is considered a secure file transfer protocol.

See Transit file transfer using SFTP for a step-by-step guide how to transfer files using SFTP.

"},{"location":"cluster_guides/transfer_transit/#overview","title":"Overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fff,color:#000,stroke:#000\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n    classDef transit_node fill:#fff,color:#000,stroke:#fff\n\n    subgraph sub_inside[SUNET]\n      user_local_files(Local user files):::file_node\n\n      subgraph sub_transit_env[Transit]\n        files_on_transit(Files posted to Transit):::transit_node\n      end\n      subgraph sub_rackham_shared_env[Rackham]\n        files_in_rackham_home(Files in Rackham home folder):::file_node\n      end\n      subgraph sub_bianca_private_env[Bianca]\n        files_in_bianca_project(Files in Bianca project folder):::file_node\n      end\n      subgraph sub_other_clusters[Other clusters]\n        files_on_other_clusters(Files on other clusters):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#ccc,color:#000,stroke:#000\n    style sub_transit_env fill:#cfc,color:#000,stroke:#000\n    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000\n    style sub_bianca_private_env fill:#ccf,color:#000,stroke:#000\n    style sub_other_clusters fill:#ffc,color:#000,stroke:#000\n\n    user_local_files <--> |graphical tool|files_on_transit\n    user_local_files <--> |SFTP|files_on_transit\n\n    files_on_transit <--> |SCP|files_in_rackham_home\n    files_on_transit <--> |SFTP|files_in_rackham_home\n\n    files_on_transit <--> |SCP|files_in_bianca_project\n    files_on_transit <--> |SFTP|files_in_bianca_project\n\n    files_on_transit <--> |transfer|files_on_other_clusters\n

Overview of file transfer on Transit

"},{"location":"cluster_guides/transit/","title":"Transit","text":"

Transit is an UPPMAX service that can be used to securely transfer files between online locations, such as your local computer, Bianca, Rackham and other sensitive data clusters.

Is Transit a file server?

Transit is a service, not a file server. Transit is not a file server, as it does not store files.

This can be observed by uploading files to Transit and then closing this connection before sending the files to a permanent location: the Transit-only files will disappear.

What is Transit?

A Swedish post box. The yellow post box is for non-regional mail, the blue for regional mail.

Transit can be viewed as a post box, where the file you upload is a letter.

If you put a letter without an address in a post box, it will be thrown away.

If you put an address on the letter, the letter will be delivered. Here, 'putting an address on the letter' is to copy the file to the desired location.

  • how to log in to Transit
  • file transfer using Transit.
    • Bianca file transfer using rsync
  • software on Transit
"},{"location":"cluster_guides/transit_file_transfer_using_gui/","title":"File transfer to/from Transit using a graphical tool","text":"

There are multiple ways to transfer files to/from Transit. Here we describe how to do so using a graphical tool.

There are multiple graphical tools to do so:

Link to procedure Tool Description here FileZilla Free, open source, works on all platforms (recommended) here WinSCP Only works under Windows"},{"location":"cluster_guides/uppmax/","title":"UPPMAX","text":"

UPPMAX in an organization that provides HPC infrastructure that is physically located in Uppsala. To do so, it provides the UPPMAX systems below.

"},{"location":"cluster_guides/uppmax/#uppmax-systems","title":"UPPMAX systems","text":"

Here we place Bianca between the other UPPMAX systems.

There are three types of UPPMAX systems:

  • Computing systems
  • Storage systems
  • Cloud services

One can apply for these resources, as is described here.

"},{"location":"cluster_guides/uppmax/#uppmax-computing-systems","title":"UPPMAX computing systems","text":"

Computing systems allow a user to do heavier computational calculations. At UPPMAX, we use multiple HPC clusters, that are discussed here

"},{"location":"cluster_guides/uppmax/#uppmax-storage-systems","title":"UPPMAX storage systems","text":"

See UPPMAX systems.

"},{"location":"cluster_guides/uppmax/#uppmax-cloud-services","title":"UPPMAX Cloud services","text":"

See UPPMAX systems.

Cloud services allow a user to have something active (typically a website) that can be accessed by the internet.

"},{"location":"cluster_guides/uppmax_as_an_organization/","title":"UPPMAX as an organization","text":"

UPPMAX is a provider of HPC infrastructure that is physically located in Uppsala.

Where can I find an overview of UPPMAX?

One can find an overview of UPPMAX here

Here we place UPPMAX within the bigger, national, picture, starting from the biggest source of money for research in Sweden.

Vetenskapsr\u00e5det ('Science counsel', VR) is biggest funder of research in Sweden and funds the national HPC infrastructure.

The National Academic Infrastructure for Supercomputing in Sweden (NAISS) provides such HPC infrastructure: computing power, storage and data services. Applications for these resources starts at this NAISS page. These resources are physically located in multiple places in Sweden, among other Uppsala.

Uppsala Multidisciplinary Center for Advanced Computational Science (UPPMAX = UppMACS) provides the HPC infrastructure that is physically located in Uppsala. Part of this is to provide training and support.

flowchart TD\n    HPC_Sweden(HPC in Sweden)\n    HPC_others(HPC in other cities)\n    HPC_Uppsala(HPC in Uppsala)\n    NAISS(NAISS)\n    UPPMAX(UPPMAX)\n    UU(Uppsala University)\n    Users(Users)\n    VR(Vetenskapsr\u00e5det)\n\n    VR --> |money| HPC_Sweden\n    HPC_Sweden -->|done by| NAISS\n    NAISS --> |money| HPC_others\n    NAISS --> |money| HPC_Uppsala\n    HPC_Uppsala -->|done by| UPPMAX\n    UU -->|money| HPC_Uppsala\n    Users -->|apply for HPC|NAISS
"},{"location":"cluster_guides/uppmax_cloud/","title":"UPPMAX cloud","text":"

Cloud services allow a user to have something active (typically a website) that can be accessed by the internet.

The NAISS 'Swedish Science Cloud (SSC)', consists out of multiple regions. The eastern region (called EAST-1) of SCC is named 'Dis' (the Swedish word for 'haze') and is hosted by Uppsala university (the service is called 'UPPMAX cloud') and Ume\u00e5 University (north, HPC2N).

"},{"location":"cluster_guides/uppmax_cloud/#history-of-dis","title":"History of Dis","text":"

The UPPMAX cloud 'Dis' (Swedish word for 'haze') and successor of 'Smog' (Swedish for 'smog') was introduced in October 2017 and upgraded during 2020.

"},{"location":"cluster_guides/uppmax_cloud/#apply-for-an-scc-project","title":"Apply for an SCC project","text":"

See the UPPMAX pages on 'Apply for an SCC project'

"},{"location":"cluster_guides/uppmax_cloud/#technical-specifications","title":"Technical specifications","text":"
  • 40 compute nodes, 24 dedicated for NAISS and 16 for local projects. Each compute node is equipped with 128-256GB memory and dual CPU E5-2660 at 2.2GHz for a total of 16 cores per compute node
  • VM flavors for small (2 vCPUs) up to large (16 vCPUs) compute allocations
  • 250 TB of total volume storage.
  • Interconnect is 10GbE.

Object storage is planned for 2021 but currently unavailable.

"},{"location":"cluster_guides/uppmax_cluster/","title":"The UPPMAX clusters","text":"

UPPMAX is an organization that provides HPC clusters.

Where can I find an overview of UPPMAX?

One can find an overview of UPPMAX here

Where can I find an overview of UPPMAX's systems?

One can find an overview of UPPMAX's systems here

After giving an overview of the different UPPMAX clusters, it is discussed what a computer cluster is, how it differs from a supercomputer, what the restrictions of a computer cluster are, as well as some added restrictions on a sensitive data computer cluster.

This is followed by a detailed technical summary of the clusters and a detailed overview of the clusters.

"},{"location":"cluster_guides/uppmax_cluster/#overview-of-uppmax-clusters","title":"Overview of UPPMAX clusters","text":"

UPPMAX clusters are computing systems, i.e. they allow a user to do heavy computational calculations.

All UPPMAX clusters are named after Tintin characters. UPPMAX has, among others, the following clusters:

  • Bianca: for sensitive data, general use. In the near future, will be replaced by Maja
  • Rackham: regular data, general purpose. Will be only for UU staff at 2025-01-01. In the near future, will be replaced by Pelle
  • Snowy: regular data, long runs and GPU:s

Another cluster UPPMAX is involved in:

  • Dardel: a general purpose HPC cluster in Stockholm. Consider moving your files to it already
flowchart TD\n    UPPMAX(Which UPPMAX cluster?)\n    Bianca\n    Dardel\n    Maja\n    Pelle\n    Rackham\n    Snowy\n    is_sensitive[Do you use sensitive data?]\n    is_long[Do you use long runs and/or GPUs?]\n\n    UPPMAX --> is_sensitive\n    is_sensitive --> |yes|Bianca\n    is_sensitive --> |no|is_long\n    is_long --> |no|Rackham\n    is_long --> |yes|Snowy\n    Bianca --> |near future| Maja\n\n    Rackham --> |not UU, before 2025-01-01| Dardel\n    Rackham --> |UU, near future| Pelle

All UPPMAX clusters follow the same file system, with special folders. See the UPPMAX page on its file systems here.

"},{"location":"cluster_guides/uppmax_cluster/#what-is-a-computer-cluster-technically","title":"What is a computer cluster technically?","text":"

A computer cluster is a machine that consists out of many computers. These computers work together.

Each computer of a cluster is called a node.

There are three types of nodes:

  • login nodes: nodes where a user enters and interacts with the system
Logging in

Logging in is described separately per cluster:

  • Bianca.
  • Rackham.
  • Snowy.
  • calculation nodes: nodes that do the calculations
Requesting a calculation to run

Requesting a calculation to run is described here. This is done by using the Slurm scheduler.

  • interactive nodes: a type of calculation node, where a user can do calculations directly
Requesting an interactive node

Requesting an interactive node is described per cluster:

  • Bianca
  • Rackham

This is done by requesting an interactive node from the Slurm scheduler.

Each node contains several CPU/GPU cores, RAM and local storage space.

A user logs in to a login node via the Internet.

flowchart TD\n\n  login_node(User on login node)\n  interactive_node(User on interactive node)\n  computation_node(Computation node)\n\n  login_node --> |move user, interative|interactive_node\n  login_node --> |submit jobs, sbatch|computation_node\n  computation_node -.-> |can become| interactive_node

The different types of nodes an UPPMAX cluster has.

"},{"location":"cluster_guides/uppmax_cluster/#difference-between-a-supercomputer-and-a-high-performing-computer-cluster","title":"Difference between a supercomputer and a (high-performing) computer cluster","text":"

A supercomputer is a machine that is optimized for doing calculations quickly. For example, to predict the weather for tomorrow, the calculation may not take a week. The image above is a supercomputer.

A computer cluster is a set of computers that work together so that they can be viewed as a single system. The image above shows a home-made computer cluster. This home-made computer cluster may not be suitable for high-performance computing.

The image above shows Rackham, another UPPMAX computer cluster, suitable for high-performance computing. This makes Rackham an high-performance computing (HPC) cluster. Bianca and Rackham are HPC clusters.

When using this definition:

a supercomputer is one big computer, while high-performance computing is many computers working toward the same goal

Frank Downs

one could conclude that the UPPMAX HPC cluster can be used as a supercomputer when a user runs a calculation on all nodes.

"},{"location":"cluster_guides/uppmax_cluster/#restrictions-on-a-computer-cluster","title":"Restrictions on a computer cluster","text":"

A computer cluster is a group of computers that can run many calculations, as requested by multiple people, at the same time.

To ensure fair use of this shared resource, regular users are restricted in some ways:

  • Users cannot run calculations directly. Instead, users need to request either (1) a calculation to be run, or (2) an interactive node
Requesting a calculation to run

Requesting a calculation to run is described here. This is done by using the Slurm scheduler.

Requesting an interactive node

Requesting an interactive node is described per cluster:

  • Bianca
  • Rackham

This is done by requesting an interactive node from the Slurm scheduler.

  • Users cannot install software directly. Instead, users need to use pre-installed software or learn techniques how to run custom software anyway
Using pre-installed software

Using pre-installed software is described here. This is done by using the module system.

How to run custom software

Using a Singularity container allows you to run most custom software on any HPC cluster

These restrictions apply to most general-purpose clusters and all UPPMAX clusters.

"},{"location":"cluster_guides/uppmax_cluster/#restrictions-on-a-sensitive-data-computer-cluster","title":"Restrictions on a sensitive data computer cluster","text":"

Next to the general restrictions above, a sensitive data cluster has additional restrictions.

Here is an overview which clusters are designed for sensitive data:

Cluster name Sensitive data yes/no? Bianca Yes Rackham No Snowy No

On a sensitive data cluster, (sensitive) data must be protected to remain there, due to which there are these additional restrictions to users:

  • Users have no direct access to internet. Instead, users can up/download files from/to a special folder.
File transfer

Transferring files is described per sensitive data cluster:

  • Bianca.

The goal is to prevent the accidental up/download of sensitive data. As these up/downloads are monitored, in case of an accident, the extent of the leak and the person (accidentally) causing it is known. Identifying a responsible person in case of such an accident is required by law.

"},{"location":"cluster_guides/uppmax_cluster/#uppmax-clusters-technical-summary","title":"UPPMAX clusters technical summary","text":"

This is a technical summary of the UPPMAX clusters:

. Rackham Snowy Bianca Purpose General-purpose General-purpose Sensitive # Intel CPU Nodes 486+144 228 288 # GPU Nodes - 50, Nvidia T4 10, 2x Nvidia A100 each Cores per node 20/16 16 16/64 Memory per node 128 GB 128 GB 128 GB Fat nodes 256 GB & 1 TB 256, 512 GB & 4 TB 256 & 512 GB Local disk (scratch) 2/3 TB 4 TB 4 TB Login nodes Yes No (reached from Rackham) Yes (2 cores and 15 GB) \"Home\" storage Domus Domus Castor/Cygnus \"Project\" Storage Crex, Lutra Crex, Lutra Castor/Cygnus"},{"location":"cluster_guides/uppmax_cluster/#detailed-overview-of-the-uppmax-systems","title":"Detailed overview of the UPPMAX systems","text":"
\n  graph TB\n\n  Node1 -- interactive --> SubGraph2Flow\n  Node1 -- sbatch --> SubGraph2Flow\n  subgraph \"Snowy\"\n  SubGraph2Flow(calculation nodes)\n        end\n\n        thinlinc -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow\n        terminal -- usr --> Node1\n        terminal -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow\n        Node1 -- usr-sensXXX + 2FA + no VPN ----> SubGraph1Flow\n\n        subgraph \"Bianca\"\n        SubGraph1Flow(Bianca login) -- usr+passwd --> private(private cluster)\n        private -- interactive --> calcB(calculation nodes)\n        private -- sbatch --> calcB\n        end\n\n        subgraph \"Rackham\"\n        Node1[Login] -- interactive --> Node2[calculation nodes]\n        Node1 -- sbatch --> Node2\n        end
"},{"location":"cluster_guides/uppmax_filesystem/","title":"UPPMAX filesystem","text":"

One can store files on the UPPMAX clusters.

Here we show some common directories and best practices.

Directory name Description backup A folder that is guaranteed to have a backup for 30 days Home folder Your home folder, /home/[username], e.g. /home/sven nobackup A folder without a backup Project folder Your project folder, /proj/[project_name], e.g. /proj/snic2021-22-780 Wharf A Bianca-only folder for file transfer"},{"location":"cluster_guides/uppmax_filesystem/#best-practices","title":"Best practices","text":"Are there any horror story on this?

Yes, ask the UPPMAX staff :-)

  1. Keep an inventory of important data and make a plan for how it should be treated. Inform collaborators of this plan.
  2. Make sure you keep a separate copy of the most important data.
  3. Put important data in a backed up directory (and nothing else, so that the backup system does not get bogged down with junk).
  4. Run chmod -R -w . on directories containing critical data that should normally be preserved.
"},{"location":"cluster_guides/uppmax_history/","title":"UPPMAX history","text":"Resource Out of commission Size Price Start End Features Grendel 2004-06-30 ?16 nodes - ? ? Together with NSC Ngorongoro 2008-12-31 48 CPUs - ? ? SunFire 15k Hagrid 2008-01-31 100 nodes - 2003-12-01 2007-12-31 SNIC/SweGrid Ra 2009-08-03 100 nodes - 2005-02-01 2009-01-31 SNIC/matvet Set 2010-12-31 10 nodes 0.9 2006-07-01 2010-06-30 SNIC, power5+IB Isis 2010-12-31 200 nodes 4.49 2007-02-01 2010-01-31 SNIC/matvet Os 2010-12-31 10 nodes 0 2007-02-01 2011-12-31 SNIC, IB Grad 2013-01-31 64 nodes - 2008-04-01 2012-03-31 SNIC/SweGrid Cell 2012-01-31 2 nodes - 2008-09-01 2009-08-31 2 nodes with cell-processors Kalkyl 2013-12-31 348 nodes 8.6 2009-12-12 2013-12-31 KAW/SNIC Bubo 2013-12-09 500TB 5.3 2009-09-12 2013-12-01 KAW/SNIC H\u00f6keborg . ca 230 m2, 7 cooling aggregates, 90 kVA UPS, racks A-D - 2011-09-01 - Computer hall, faculty means H\u00f6keborg . +3 cooling aggregates, racks E-F - 2013-06-01 - Computer hall, faculty means H\u00f6keborg . +3 cooling aggregates, +30kVA UPS, moved batteries, racks G-H - 2015-04-15 - Computer hall, faculty means Lynx 2015-11-09 500 TB 4.7 2011-12-01 2015-12-01 KAW/SNIC Halvan 2016-04-06 64 core, 2TB 1.2 2011-02-11 2016-02-29 Misc, extended support 1 year Tintin . 164 nodes 5.3 2012-02-01 2016-02-01 SNIC Kali . 1 nod, 30TB disk 0.1 X X+1 year iRODS, KAW? dCache . 600 TB 0.6 2012-11-19 2016-11-18 SNIC Gulo . 1.2 PB 1.9 2012-11-19 2016-11-18 KAW/BILS Pica . 5.5 PB 10 2013-10-01 2017-10-01 KAW Host . 8 nodes 0.45 2013-11-01 2017-11-01 Used Ganeti, UPPMAX Milou . 248 nodes 9.9 2013-11-01 2017-11-01 KAW/BILS Milou-f2 . 1 nod, 4 TB 1 2014-02-01 2018-02-01 Login node Nestor 2016-05-31 48 nodes - 2014-04-08 2018-04-08 . Apus 2016-05-31 500 TB - 2014-01-13 2018-01-13 . Topolino . 24 nodes - 2014-04-08 2018-04-08 BILS Meles . 279 TB - 2014-01-13 2018-01-13 . Das . 48 TB 0.07 2015-07-01 2020-06-30 New back mount, HP, data network redesign IT 2015/25 Core network . 2 switches 0.22 2015-07-01 2020-06-30 Dell, core network, data network redesign IT 2015/50 Irma . 250 nodes 15.8 2015-10-01 2019-09-30 Supermicro, data network redesign IT2014/93 Lupus . 1 PB Lustre 2.1 2016-03-03 2021-03-02 Dell, data network redesign IT214/92 CEPH . 252 TB 0.35 2015-12-14 2019-12-13 Dell, 7 servers, data network redesign IT 2015/84 Bianca . 100 nodes 3.1 2016-04-01 2020-03-31 SouthPole, Huawei data network redesign IT 2015/65 Castor . 1 PB, 18 servers 1 2016-04-01 2020-03-31 SouthPole, Huawei data network redesign IT 2015/65 Castor, +1 PB . 1 PB, 18 servers 2.3 2016-07-01 2020-07-31 SouthPole, Huawei data network redesign IT 2015/65 Grus . 1.5 PB, 14 servers 1.8? 2016-07-01 2020-07-31 SouthPole, Huawei data network redesign IT 2015/65 Irham . . . 2016-07-01 2024-01-12 Decommissioned Irma nodes added to Rackham, became r[1001-1072,1179-1250] Miarka . . . 2021 . . Rackham . . . . . . Snowy . . . . . . Pelle . . . . . . Maja . . . . . . Gorilla . . . . . .
  • Price is in millions of Swedish kroner
  • 'Start': start of the guarantee
  • 'End': end of the guarantee
  • 'data network redesign' is assumed to be the unabbreviated form of dnr
  • 'processors' is assumed to be the unabbreviated form of procs
"},{"location":"cluster_guides/uppmax_storage_system/","title":"UPPMAX storage system","text":"

A system to store data on.

  • Castor
  • Crex
  • Cygnus
  • Domus
  • Lutra
  • Spirula
  • Vulpes
"},{"location":"cluster_guides/uppmax_systems/","title":"UPPMAX systems","text":"

UPPMAX is an organization that provides HPC infrastructure that is physically located in Uppsala.

Where can I find an overview of UPPMAX?

One can find an overview of UPPMAX here

This HPC infrastructure consists out of:

  • Computing systems, to do calculations
  • Storage systems, to store data
  • Cloud services, to provide webservices

Below these systems are discussed.

"},{"location":"cluster_guides/uppmax_systems/#uppmax-computing-systems","title":"UPPMAX computing systems","text":"

Computing systems allow a user to do heavier computational calculations.

UPPMAX has, among others, the following clusters:

  • Rackham: regular data, general purpose
  • Snowy: regular data, long runs and GPU:s
  • Bianca: for sensitive data, general use

A technical summary can be found below.

flowchart TD\n    UPPMAX(Which UPPMAX cluster?)\n    Bianca\n    Rackham\n    Snowy\n    is_sensitive[Do you use sensitive data?]\n    is_long[Do you use long runs and/or GPUs?]\n\n    UPPMAX --> is_sensitive\n    is_sensitive --> |yes|Bianca\n    is_sensitive --> |no|is_long\n    is_long --> |no|Rackham\n    is_long --> |yes|Snowy
"},{"location":"cluster_guides/uppmax_systems/#uppmax-storage-systems","title":"UPPMAX storage systems","text":"

Storage systems allow a user to storage (big amounts of) data, for either active use (i.e. in calculations) or to archive it (cold data).

You are not supposed to do calculations on the cold data. This is stored on off-load storage where the file system is much slower. You need to transfer the data to an active storage first.

The UPPMAX storage systems are:

  • Active: Cygnus for Bianca, Crex for Rackham
  • Off-load: Lutra for Rackham
flowchart TD\n    UPPMAX[Which UPPMAX storage system?]\n    which_cluster[Which UPPMAX cluster?]\n    Cygnus\n    Lutra\n    usage_type{Type of use?}\n\n    UPPMAX-->which_cluster\n    which_cluster-->|Rackham|usage_type\n    which_cluster-->|Bianca|Cygnus\n    usage_type-->|active|Crex\n    usage_type-->|off-load|Lutra

See here for more information.

"},{"location":"cluster_guides/uppmax_systems/#uppmax-cloud-services","title":"UPPMAX Cloud services","text":"

See the UPPMAX cloud.

"},{"location":"cluster_guides/uppmax_systems/#difference-between-supercomputer-and-high-performing-computer-cluster","title":"Difference between supercomputer and (high-performing) computer cluster","text":"

A supercomputer is a machine that is optimized for doing calculations quickly. For example, to predict the weather for tomorrow, the calculation may not take a week. The image above is a supercomputer.

A computer cluster is a machine that is optimized for doing a lot of calculations. The image above shows a home-made computer cluster. This home-made computer cluster may not be suitable for high-performance.

The image above shows Rackham, another UPPMAX computer cluster, suitable for high-performance computing. This makes Rackham an high-performance computing (HPC) cluster. Bianca and Rackham are HPC clusters.

"},{"location":"cluster_guides/uppmax_systems/#restrictions-on-a-computer-cluster","title":"Restrictions on a computer cluster","text":"

A computer cluster is a group of computers that can run many calculations, as requested by multiple people, at the same time.

To ensure fair use of this shared resource, regular users are restricted in some ways:

  • Users cannot run calculations directly. Instead, users need to request either (1) a calculation to be run, or (2) an interactive node
Requesting a calculation to run

Requesting a calculation to run is described here. This is done by using the Slurm scheduler.

Requesting an interactive node

Requesting an interactive node is described here. This is done by requesting an interactive node from the Slurm scheduler.

  • Users cannot install software directly. Instead, users need to use pre-installed software or learn techniques how to run custom software anyway
Using pre-installed software

Using pre-installed software is described here. This is done by using the module system.

How to run custom software

One can use Singularity containers to run software on an HPC cluster.

These restrictions apply to most general-purpose clusters. However, Bianca is a sensitive data cluster, to which more restrictions apply.

"},{"location":"cluster_guides/uppmax_systems/#restrictions-on-a-sensitive-data-computer-cluster","title":"Restrictions on a sensitive data computer cluster","text":"

Next to the general restrictions above, Bianca also is a sensitive data cluster. This sensitive data must be protected to remain only on Bianca, due to which there are these additional restrictions to users:

  • Users have no direct access to internet. Instead, users can up/download files from/to a special folder.
File transfer

Transferring file is described here.

The goal is to prevent the accidental up/download of sensitive data. As these up/downloads are monitored, in case of an accident, the extent of the leak and the person (accidentally) causing it is known. Identifying a responsible person in case of such an accident is required by law.

"},{"location":"cluster_guides/uppmax_systems/#what-is-a-computer-cluster-technically","title":"What is a computer cluster technically?","text":"

A computer cluster is a machine that consists out of many computers. These computers work together.

Each computer of a cluster is called a node.

There are three types of nodes:

  • login nodes: nodes where a user enters and interacts with the system
Logging in

Logging in is described here.

  • calculation nodes: nodes that do the calculations
Requesting a calculation to run

Requesting a calculation to run is part of this course and is described here. This is done by using the Slurm scheduler.

  • interactive nodes: a type of calculation node, where a user can do calculations directly
Requesting an interactive node

Requesting an interactive node is part of this course and is described here. This is done by requesting an interactive node from the Slurm scheduler.

Each node contains several CPU/GPU cores, RAM and local storage space.

A user logs in to a login node via the Internet.

"},{"location":"cluster_guides/uppmax_systems/#summary","title":"Summary","text":"

keypoints

  • NAISS provides HPC resources for Swedish research.
  • UPPMAX takes care of the Uppsala HPC facilities
  • Bianca is an HPC cluster for sensitive data
  • The restrictions on Bianca follow from Bianca being a shared resource that uses sensitive data
"},{"location":"cluster_guides/uppmax_systems/#extra-material","title":"Extra material","text":""},{"location":"cluster_guides/uppmax_systems/#uppmax-clusters-technical-summary","title":"UPPMAX clusters technical summary","text":"Rackham Snowy Bianca Purpose General-purpose General-purpose Sensitive # Intel CPU Nodes 486+144 228 288 # GPU Nodes - 50, Nvidia T4 10, 2x Nvidia A100 each Cores per node 20/16 16 16/64 Memory per node 128 GB 128 GB 128 GB Fat nodes 256 GB & 1 TB 256, 512 GB & 4 TB 256 & 512 GB Local disk (scratch) 2/3 TB 4 TB 4 TB Login nodes Yes No (reached from Rackham) Yes (2 cores and 15 GB) \"Home\" storage Domus Domus Castor \"Project\" Storage Crex, Lutra Crex, Lutra Castor"},{"location":"cluster_guides/uppmax_systems/#detailed-overview-of-the-uppmax-systems","title":"Detailed overview of the UPPMAX systems","text":"
\n  graph TB\n\n  Node1 -- interactive --> SubGraph2Flow\n  Node1 -- sbatch --> SubGraph2Flow\n  subgraph \"Snowy\"\n  SubGraph2Flow(calculation nodes)\n        end\n\n        thinlinc -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow\n        terminal -- usr --> Node1\n        terminal -- usr-sensXXX + 2FA + VPN ----> SubGraph1Flow\n        Node1 -- usr-sensXXX + 2FA + no VPN ----> SubGraph1Flow\n\n        subgraph \"Bianca\"\n        SubGraph1Flow(Bianca login) -- usr+passwd --> private(private cluster)\n        private -- interactive --> calcB(calculation nodes)\n        private -- sbatch --> calcB\n        end\n\n        subgraph \"Rackham\"\n        Node1[Login] -- interactive --> Node2[calculation nodes]\n        Node1 -- sbatch --> Node2\n        end
"},{"location":"cluster_guides/webexport/","title":"Webexport guide","text":"

You can enable webexport by creating a publicly readable folder called webexport in your project directory (/proj/[project id]). The contents of that folder will be accessible through https://export.uppmax.uu.se/[project id]/.

This will not work on Bianca for security reasons.

"},{"location":"cluster_guides/webexport/#publicly-readable-folder","title":"Publicly readable folder","text":"
  • A publicly readable folder has the execute permission set for \"other\" users.
  • Run the command chmod o+x webexport to ensure that the webexport directory has the correct permissions.
"},{"location":"cluster_guides/webexport/#control-access","title":"Control access","text":"
  • A subset of .htaccess/.htpasswd functionality is available to control access.

  • Example:

    • /crex/proj/naiss2024-1-123/webexport/Project_portal/.htaccess
    • /crex/proj/naiss2024-1-123/Nisse/.htpasswd

    • Note that you need the full physical /crex/proj... path. This full path is given from the command pwd -P.

see also

You may want to check the external Easy_webshare_on_UPPMAX while we update this page

"},{"location":"cluster_guides/wharf/","title":"wharf","text":"

wharf is a folder on Bianca used for file transfer on Bianca.

He it is described:

  • What is wharf?
  • The wharf location
  • wharf use
  • mounting wharf
"},{"location":"cluster_guides/wharf/#what-is-wharf","title":"What is wharf?","text":"

The wharf is like a \"postbox\" for data/file exchange between the Internet restricted Bianca cluster and the remaining of the World Wide Internet. This \"postbox\" is reachable to transfer data from two internal servers - bianca-sftp.uppmax.uu.se and transit.uppmax.uu.se.

"},{"location":"cluster_guides/wharf/#the-wharf-location","title":"The wharf location","text":"

The path to this special folder is:

/proj/nobackup/[project_id]/wharf/[user_name]/[user_name]-[project_id]\n

where

  • [project_id] is the ID of your NAISS project
  • [user_name] is the name of your UPPMAX user account

For example:

/proj/nobackup/sens2023598/wharf/sven/sven-sens2023598\n
"},{"location":"cluster_guides/wharf/#wharf-use","title":"wharf use","text":"

To transfer data from/to Bianca, wharf is to folder where files are sent to/from.

Do not keep files in wharf, as this folder is connected to the outside world and hence is a security risk. Instead, move your data to your project folder.

You have full access to your wharf and read-only access to other users' wharf folders in that same project.

wharf is only accessible when inside the university networks.

"},{"location":"cluster_guides/wharf/#mounting-wharf","title":"Mounting wharf","text":"

Mounting wharf means that a wharf folder is added to the filesystem of your local computer, after which you can use it like any other folder. The data shown in the folder is on Bianca, not on your local storage.

One can mount wharf on your local computer using sshfs when inside the university networks. sshfs is available on most Linux distributions:

Distribution Package name Ubuntu sshfs Fedora fuse-sshfs RHEL7/CentOS7 [1] fuse-sshfs RHEL8 [2] fuse-sshfs CentOS8 [3] fuse-sshfs
  • [1] Enable EPEL repository
  • [2] Enable codeready-builder repository
  • [3] Enable powertools repository

UPPMAX does not have sshfs installed for security reasons.

"},{"location":"courses_workshops/R_matlab_julia/","title":"Introduction to running Julia, R, and Matlab in HPC","text":"

Learn how to run R, Matlab, and Julia at Swedish HPC centres. We will show you how to find and load the needed modules, how to write a batch script, as well as how to install and use your own packages, and more. The course will consist of lectures interspersed with hands-on sessions where you get to try out what you have just learned.

We will mainly use Tetralith at NSC for the examples for the course, but there is little difference in how you use the various HPC centres in Sweden and you should have no problems applying the knowledge to the other systems.

NOTE: the course will NOT cover the topic of improving your programming skills in R, Matlab, and Julia. Likewise, we will not cover advanced techniques for code optimization.

NOTE if you are interested in running Python at Swedish HPC centres, then we recommend the course \"Introduction to Python and Using Python in an HPC environment\" which will run 24-25 April + 28-29 April. The first day is the introduction to Python and it is possible to just participate that day.

Remote/online participation: The course will be completely online and we will use Zoom. More information about connecting and such will be sent to the participants close to the course.

Prerequisites: some familiarity with the LINUX command line (recordings from HPC2N's Linux intro here and UPPMAX Linux Intro here and also here), basic R, Matlab, or Julia, depending on which language(s) you are interested in. See below for links to useful material if you need a refresher before the course.

"},{"location":"courses_workshops/R_matlab_julia/#schedule","title":"Schedule","text":"

This course will consist of three days (9:00-16:00), one for each language. It is a cooperation between HPC2N, LUNARC, and UPPMAX.

Full schedule can be found on the rendered presentations for each course day: https://uppmax.github.io/R-python-julia-matlab-HPC/

  • Day 1, Mon. 24. March

    • 9:00 - 16:00 R
  • Day 2, Tue. 25. March

    • 9:00 - 16:00 Matlab
  • Day 3, Wed. 26. March

    • 9:00 - 16:00 Julia
"},{"location":"courses_workshops/R_matlab_julia/#materials","title":"Materials","text":"
Exercises and .rst files can be downloaded from the course's GitHub page: <https://github.com/UPPMAX/R-python-julia-matlab-HPC>\nRendered presentations can be found here: <https://uppmax.github.io/R-python-julia-matlab-HPC/>\nRecordings are here: TBA\nQ/A document for each day, as PDF: TBA\n
"},{"location":"courses_workshops/R_matlab_julia/#links-to-refresher-material","title":"Links to refresher material","text":"

This is NOT in any way mandatory for participation or part of the course. It is a list of links to useful refresher material for those who would like to read up on Julia/R/Matlab/Linux/etc. before the course.

Julia\n    Aalto Univ.: <https://github.com/AaltoRSE/julia-introduction>\n    Software Carpentry: <https://carpentries-incubator.github.io/julia-novice/>\nR\n    Software Carpentry: <https://swcarpentry.github.io/r-novice-gapminder/index.html>\n    Parallel R: <https://github.com/menzzana/parallel_R_course>\nMatlab\n    Software Carpentry: <https://swcarpentry.github.io/matlab-novice-inflammation/>\n    Matlab documentation at MathWorks: <https://se.mathworks.com/help/matlab/index.html>\nLinux intro\n    Linux intro from \"Introduction to Kebnekaise\": <https://hpc2n.github.io/intro-linux/>  (Recordings)\n    Material contained in the UPPMAX introduction course: <https://www.uu.se/centrum/uppmax/utbildning/kurser-och-workshops/introduktion-till-uppmax>\nSlurm\n    Contained in the \"Introduction to Kebnekaise\" course: <https://hpc2n.github.io/intro-course/batch/> (Recordings)\n    UPPMAX SLURM guide: <https://docs.uppmax.uu.se/cluster_guides/slurm/>\n    Material contained in the UPPAX intro course: <https://www.uu.se/en/centre/uppmax/study/courses-and-workshops/introduction-to-uppmax>\n

Time and Dates: 24-26 March 2025, three days, one for each language. 9:00 - 16:00 each day. The last hour each day will be used for extra time for exercises.

Onboarding: Friday, 21. March (1 hour - time to be decided)

Location: ONLINE. Zoom link will be sent to participants a few days before the course.

Deadline for registration: 17. March 2025

Registration from HPC2N page

Participation in the course is free.

Please make sure have an account at SUPR as well as at NSC if you want to participate in the hands-on part of the training. There will be a course project on NSC that can be used to run the examples in during the hands-on. If you are affiliated with IRF, LTU, UMU, MIUN, or SLU and have account/project at HPC2N you can use HPC2N's local cluster if you prefer. Also, if you have an account/project at LUNARC or one at UPPMAX, you may use that instead if you want. If you do not have an account at SUPR and/or UPPMAX/HPC2N/LUNARC/NSC, you will be contacted with further instructions for how to create those. You are STRONGLY encouraged to sign up to SUPR as soon as possible after registering for the course.

NOTE:

Kebnekaise has become a local resource. Please also read the page about \"Kebnekaise will be retired as a national resource\". HPC2N accounts are ONLY meant for people who are at Ume\u00e5 university, one of HPC2N's partnersites (IRF, LTU, MIUN, SLU), or are in a research group with a PI at one of those.\nCosmos (LUNARC) is also a local resource, for those at Lund University.\nUPPMAX accounts are only for local Uppsala people.\nEveryone else must use NSC for the course.\n

Course project: As part of the hands-on, you may be given temporary access to a course project, which will be used for running the hands-on examples. There are some policies regarding this, that we ask that you follow:

You may be given access to the project before the course; please do not use the allocation for running your own codes in. Usage of the project before the course means the priority of jobs submitted to it goes down, diminishing the opportunity for you and your fellow participants to run the examples during the course. You can read more detailed information about the job policies of NSC here and NSC usage rules here.\nThe course project will be open 1-2 weeks after the course, giving the participants the opportunity to test run examples and shorter codes related to the course. During this time, we ask that you only use it for running course related jobs. Use your own discretion, but it could be: (modified) examples from the hands-on, short personal codes that have been modified to test things learned at the course, etc.\nAnyone found to be misusing the course project, using up large amounts of the allocation for their own production runs, will be removed from the course project.\nYou will likely also be given access to a storage area connected to the compute project. Any data you store there should be course-related and if you wish to save it you should copy it to somewhere else soon after the course as it will be deleted about a month later.\n

The course uses compute resources provided by the National Academic Infrastructure for Supercomputing in Sweden (NAISS) at NSC partially funded by the Swedish Research Council through grant agreement no. 2022-06725.

"},{"location":"courses_workshops/awk/","title":"Awk workshop","text":"

AWK is an interpreted programming language designed for text processing and typically used as a data extraction and reporting tool.

This two-days workshop aims to promote and demonstrate the flexibility of the tool, where the overhead of more sophisticated approaches and programming languages is not worth the bother.

"},{"location":"courses_workshops/awk/#learn-how-to","title":"Learn how to","text":"
  • use Awk as an advanced grep command, capable of arithmetic selection rules with control over the content of the matched lines
  • perform simple conversions, analysis or filter you data on the fly making it easy to plot or read in your favorite research tool
  • handle and take advantage on data split over multiple file data sets.
  • use Awk as simple function or data generator
  • perform simple sanity checks on your results
"},{"location":"courses_workshops/awk/#awk-for-bioinformaticians","title":"Awk for bioinformaticians","text":"

Use what you learn and dive into the basic concepts of bioinformatics with simple exercises on typical scientific problems and tasks.

Venue and registration:

Date: 16 and 17 January, 2025 Time: 9:15 - 12:00 and 13:15 -16:00 Location: Zoom: link will be sent to applicants Application: form.

"},{"location":"courses_workshops/awk/#schedule","title":"Schedule","text":""},{"location":"courses_workshops/awk/#1-st-day-915-1200","title":"1-st day 9:15 - 12:00","text":"

Seminar session

  • Examples of typical problems suitable for Awk \u201ctreatment\u201d
  • Introduction to the basics of Awk scripting language
  • Solving interactively simple problems
"},{"location":"courses_workshops/awk/#1-st-day-lunch-break","title":"1-st day lunch break","text":"

Exercises 13:15 -16:00

  • Solving interactively the exercise problems
"},{"location":"courses_workshops/awk/#2-nd-day-915-1200","title":"2-nd day 9:15 - 12:00","text":"
  • Awk for bioinformaticians - seminar
  • Case Study: Manipulating the output from a genome analysis - vcf and gff
  • Filtering and formatting raw data
  • Counting and piling features
  • Indexing and hashing to compare variants and annotations
"},{"location":"courses_workshops/awk/#2-nd-day-lunch-break","title":"2-nd day lunch break","text":"

Walk-through session on various topics:

  • Awk parsing \u201csimultaneously\u201d multiple input files
  • Multiple input files - second approach scenario will be discussed.
  • How to trick awk to accept options on the command line like regular program i.e. $ script.awk filename parameter1 parameter2 link
  • Declaring and calling functions in awk - link
  • Input/output to/from an external programs
  • Learn how to send input to an external program (might be based on your data) and read the result back - link
  • Handy tips: awk oneliners use with Vim, gnuplot\u2026

Also: Suggest topic for discussion or see recently suggested topics.

"},{"location":"courses_workshops/awk/#prerequisites","title":"Prerequisites","text":""},{"location":"courses_workshops/awk/#macos","title":"MacOS","text":"

The system provided awk version will work for most of the examples during the workshop with few exceptions, which are noted in the online material.

Tilda ~ sign on Mac with Swedish keyboard layout - Alt + ^

"},{"location":"courses_workshops/awk/#linux","title":"Linux","text":"

Several distributions have other awk flavors installed by default. The easiest fix is to install the gnu version gawk i.e. for Ubuntu: sudo apt install gawk

"},{"location":"courses_workshops/awk/#windows-1011","title":"Windows 10/11","text":"
  • Ubuntu for Windows 10 - it is better to read from the source, despite it might not be the easiest tutorial. To my experience, this is the best Linux environment without virtualization.
  • MobaXterm use the internal package manager to install gawk. The default is provided by Busybox and is not enough for the purpose of the workshop.
"},{"location":"courses_workshops/awk/#linux-computer-center","title":"Linux computer center","text":"
  • Just login to your account and use the provided awk - any version newer than 4 will work.
rackham3:[~] awk -V GNU Awk 4.0.2 Copyright (C) 1989, 1991-2012 Free Software Foundation.\n
"},{"location":"courses_workshops/awk/#virtual-linux-machine","title":"Virtual Linux Machine","text":"

Just follow some tutorial on how to setup and use the virtual Linux environment.

  • VirtualBox
  • Ubuntu on Public Clouds
  • GitHub & Binder (you need only a browser)
  • Singularity singularity run shub://pmitev/Teoroo-singularity:gawk 'BEGIN{ for(i=1;i<=4;i++) print i}'
Feedback from previous workshops
  • 2024.08 | 2024.01
  • 2023.09 | 2023.01 (not enough data to be anonymous)
  • 2022.09 | 2022.01 \u200b- 2021.09 | 2021.01
  • 2020.08 | 2020.01
  • 2019.08 | 2019.01
  • 2018.08 | 2018.01
  • 2017.01 | 2017.08
  • 2016.08 | 2016.01
  • 2015.10
"},{"location":"courses_workshops/awk/#contacts-for-the-course","title":"Contacts for the course","text":"

Pavlin Mitev Jonas S\u00f6derberg Lars Eklund Richel Bilderbeek UPPMAX

"},{"location":"courses_workshops/bianca_intro/","title":"Introduction to Bianca: Handling Sensitive Research Data","text":"

Are you just beginning to work with sensitive data in your research? If yes, welcome to a 1-day introduction to handling sensitive data on the UPPMAX cluster, Bianca. We will tell you about NAISS-SENS, how to login to Bianca, transfer files via wharf, basics of the SLURM workload manager and the module system.

This is a workshop is intended for beginner users of Bianca.

You do not need to be a member of a NAISS-SENS project in order to join the workshop. A SUPR course project will be available to all participants. The workshop will consist of both lectures and exercise sessions.

Prerequisites: none.

When: Wednesday, March 19, 2025.

Time: 09:00 - 12:00, and 13:00 - 16:00.

Where: online via Zoom. Connection details will be sent to registered participants.

Login help session: TBD

Registration form

"},{"location":"courses_workshops/bianca_intro/#content","title":"Content","text":"
  • Introduction
  • Intro to NAISS-Sens
  • Login: ThinLinc
  • Command line intro specific to Bianca
  • Module system
  • Intro to transferring files to and from Bianca
  • Compute nodes and slurm
  • Summary
  • Q/A

Workshop material

"},{"location":"courses_workshops/courses_workshops/","title":"Courses and workshops","text":"

At UPPMAX, we teach, by providing workshops and courses. This page gives an overview of these.

Course dates are (or should be) provided at each course's website.

"},{"location":"courses_workshops/courses_workshops/#uppmax-local","title":"UPPMAX Local","text":"

The courses on how to use our local clusters, such as Rackham and Snowy.

  • Introduction to Linux and UPPMAX
"},{"location":"courses_workshops/courses_workshops/#naiss-sens","title":"NAISS-Sens","text":"

The courses on how to use Bianca, a NAISS HPC cluster for sensitive data.

  • Introduction to Bianca: Handling Sensitive Research Data
"},{"location":"courses_workshops/courses_workshops/#naiss-centre-agnostic","title":"NAISS centre agnostic","text":"

UPPMAX is part of NAISS and we do teach things that apply to all NAISS HPC clusters.

"},{"location":"courses_workshops/courses_workshops/#getting-started","title":"Getting started","text":"
  • Connecting and file transfer
"},{"location":"courses_workshops/courses_workshops/#programming","title":"Programming","text":""},{"location":"courses_workshops/courses_workshops/#python","title":"Python","text":"
  • Intro to Python

  • HPC-Python

    • Course material
"},{"location":"courses_workshops/courses_workshops/#other","title":"Other","text":"
  • Introduction to running R, MATLAB, and Julia in HPC
    • Course material
  • Programming formalisms
  • To awk or not
  • Basic Singularity
"},{"location":"courses_workshops/courses_workshops/#other-centers","title":"Other centers","text":"
  • HPC2N courses
  • CodeRefinery workshops
  • ENCCS training events
  • ENCCS Lessons
  • Codecademy
  • Learn X in Y minutes lessons
  • Swedish Science Cloud training
"},{"location":"courses_workshops/intro_to_python/","title":"Introduction to Python","text":"Announcement text

Great course. Now, Python is not scary anymore.

A learner from this course (source)

This 1-day course helps you get started with Python, by working through an online and free book. We make use of HPC clusters to write and run Python code. The pace of this highly interactive course is set by the majority of learners, ensuring that any complete beginner has enough time for exercises. At the end of the day, you should feel comfortable with the basics of Python and feel familiar to a book on Python to help you on your next steps.

  • Course information and registration: https://docs.uppmax.uu.se/workshops_courses/intro_to_python/
  • When: Tuesday March 4th 2025, 9:00-16:00 (course schedule)
  • Where: Online via Zoom
  • Course material: https://uppmax.github.io/uppmax_intro_python/
  • Earlier evaluations of the course: https://uppmax.github.io/uppmax_intro_python/evaluations/
Registration form text

Intro to Python 2025-03-07

This is the registration form for the UPPMAX course 'Intro to Python', https://docs.uppmax.uu.se/courses_workshops/intro_to_python/

What is your email address?

Great course. Now, Python is not scary anymore.

A learner from this course (source)

This 1-day course helps you get started with Python, by working through an online and free book. We make use of the UPPMAX HPC cluster to write and run Python code, but you may use a different machine if you prefer. The pace of this highly interactive course is set by the majority of learners, ensuring that any complete beginner has enough time for exercises. At the end of the day, you should feel comfortable with the basics of Python and feel familiar to a book on Python to help you on your next steps.

You will:

  • Feel comfortable with learning Python
  • Feel comfortable using an online and free book on Python
  • Write Python code on an HPC cluster
  • Run Python scripts on an HPC cluster

Practical matters:

  • Registration form: here
  • When: Tuesday March 4th 2025, 9:00-16:00 (course schedule)
  • Where: Online via Zoom. Zoom room and password will be mailed
  • Course material
  • Earlier evaluations of the course

Before the course, you must have done these four things:

  • Prerequisite 1/4: You have registered at our registration form at here

  • Prerequisite 2/4: A user account on a Swedish academic HPC cluster

How can I check if I have this?

When you can login at https://supr.naiss.se/.

It should look similar to this:

How to get this?

Register at https://supr.naiss.se/person/register/.

What if I have a problem here?

Contact richel.bilderbeek@uppmax.uu.se

  • Prerequisite 3/4: Be able to login to an HPC cluster using SSH
How can I check if I have this?

This depends on the HPC cluster you are using. For UPPMAX's Rackham, it look similar to this:

Is it OK if I can login using other methods?

Probably: yes

  • Using a website: yes
  • Using a local ThinLinc client: yes
How to get this?

Follow the instructions of your favorite HPC center or the UPPMAX instruction

What if I have a problem here?

Contact richel.bilderbeek@uppmax.uu.se

  • Prerequisite 4/4: have a good Zoom setup
How can I check if I have this?
  • You are in a room where you can talk
  • You talk into a (standalone or headset) microphone
  • Others can clearly hear you when you talk
  • Others can see you
  • You can hear others clearly when they talk
How to get this?
  • Find/schedule/book a room where you can talk
  • Buy a simple headset
What if I don't have this? Is that OK?

No.

You will feel left out, as the course in highly interactive. It would be weird to the other learners.

What if I have social anxiety?

Sorry to hear that. In this course, it is OK to give a wrong answer or to say 'I do not know'. This is what a former learner had to say on this:

As a learner, you do not want to be berated when giving an answer. Richel tries to gently deal with a wrong answer and he does this great

A learner from this course (source)

You are welcome to try and leave anytime you want. The course material is made for self-study too, with videos for all exercises. Do fill in the evaluation when you leave early :-)

"},{"location":"courses_workshops/intro_to_python/#coordinators","title":"Coordinators","text":"
  • ?
"},{"location":"courses_workshops/naiss_transfer/","title":"Transferring Files to/from HPC Clusters","text":"

In this 3-hour workshop you learn to transfer files to or from Swedish academic HPC clusters. We will cover graphical as well as terminal tools and you will work highly interactively. At the end of the day, you should be comfortable in transferring files between local computer and a cluster and cross-clusters, and choosing the right tool for your use cases.

The workshop is intended for beginner users but with some Linux experience, see the course link below. You do not need to be a member of a NAISS project in order to join the workshop. A course project on one of the NAISS clusters will be available to those.

"},{"location":"courses_workshops/naiss_transfer/#prerequisites","title":"Prerequisites","text":"
  • Be able to login to your cluster's remote desktop environment, using either a website or a local ThinLinc client
  • Be able to login using an SSH client
  • Be able to navigate the filesystem under Linux
    • example linux material: https://uppmax.github.io/uppmax_intro_day_1/sessions/use_terminal/ and exercise 1-3
"},{"location":"courses_workshops/naiss_transfer/#preliminary-schedule-overview","title":"Preliminary schedule overview","text":"
  • FileZilla
  • Log in with terminal and file transfer using rsync
  • File transfer using scp and sftp
"},{"location":"courses_workshops/naiss_transfer/#coming-course-instance","title":"Coming course instance","text":"
  • When: Fri 7 March, 2025, 9.00-12.00
  • Where: Online via Zoom

  • Registration

  • Course material: TBA

"},{"location":"courses_workshops/naiss_transfer/#coming-course-instances","title":"Coming course instances","text":"
  • Friday May 16th 9:00-12:00 (week 20)
  • Friday Sep 5th (week 36)
  • Friday 14 Nov (week 46)
"},{"location":"courses_workshops/uppmax_intro_course/","title":"Introduction to Linux and UPPMAX","text":""},{"location":"courses_workshops/uppmax_intro_course/#overview-and-schedule","title":"Overview and Schedule","text":"

UPPMAX application experts want to share their skills in a 3-day series of lectures. We will help you move from being a Linux novice to an UPPMAX expert. If you already have the fundamentals down, you are still sure to enjoy the tips and tricks in the later parts of the course. The lectures covering Linux and bash scripting are cluster-agnostic and may be attended by non-UPPMAX users as well. It is possible to sign up only for the lectures that are interesting to you.

When: February 10-12, 2025.

Where: online via Zoom.

Registration form

"},{"location":"courses_workshops/uppmax_intro_course/#schedule","title":"Schedule","text":"Monday, February 10 Tuesday, February 11 Wednesday, February 12 Morning Intro to Linux and UPPMAXRich\u00e8l Bilderbeek Linux IIDouglas Scofield Bash ScriptingDouglas Scofield Afternoon Intro to UPPMAXRich\u00e8l Bilderbeek Linux IIIDouglas Scofield Slurm at UPPMAXDiana Iusan

The lectures are scheduled 09:00 to 12:00 and 13:00 to 16:00 daily.

"},{"location":"courses_workshops/uppmax_intro_course/#startup-instructions-to-course-participants","title":"Startup instructions to course participants","text":"

Approximately two weeks before the course starts, you will receive a set of instructions for creating an account and joining the course project. It is important that you complete these steps well in advance of the course.

"},{"location":"databases/1000-genome_project/","title":"1000 genomes project","text":"

The 1000-genome project is an international collaboration to sequence the genomes of a large number of people. The complete archive is available from NCBI and EBI but downloading this massive quantity of next-gen data is time- and resource-consuming. UPPMAX now has a local copy of the sequencing and index files (BAM, BAI and BAS) as a shared resource.

The main archive is stored at /sw/data/KGP/central. Within this folder, \"low\" holds the primary dataset with one individual per folder (eg, \"HG00096\", \"NA11831\") holding data files for each sequencing technology applied. In the main folder, \"high\" holds the high-coverage data for a subset of the individuals.

One level up in the file system, /sw/data/KGP/regional holds sequence data for some individual countries outside the 1000-genome project. So far, very little data has been stored but this may be expanded.

Users interesting in any of this data should request membership in the \"KGP\" group (via support@uppmax.uu.se). This requirement is not intended to restrict the resource in any way, but makes it easier to inform interested users of possible changes. Considering the large storage space used, it is possible that the data would need to be reorganized or possibly even reduced in the future, depending of course on the perceived need for the resource by the members of the KGP group.

"},{"location":"databases/blast/","title":"Blast databases available locally","text":"

Many pipelines involving annotation/assembly comparison involve Blast. Several Blast versions are available as modules, for example:

  • blast/2.12.0+, etc. : the Blast+ suites (blastp, tblastn, etc.), recommended
  • diamond/2.0.14 : the DIAMOND protein aligner, recommended for protein databases. See UPPMAX's DIAMOND database webpage for more information.
  • blast/2.2.26, etc. : 'legacy' Blast (blastall, megablast, etc)

Use module spider blast to see available versions. As for all bioinformatics tools at Uppmax, module load bioinfo-tools is required before the blast modules are available.

Uppmax maintains local copies of many Blast databases, including many available at NCBI:

  • ftp://ftp.ncbi.nih.gov/blast/db/README
  • ftp://ftp.ncbi.nlm.nih.gov/blast/documents/blastdb.html
  • https://www.ncbi.nlm.nih.gov/books/NBK62345/
  • https://ncbiinsights.ncbi.nlm.nih.gov/2020/02/21/rrna-databases/
  • https://www.ncbi.nlm.nih.gov/sars-cov-2/
  • https://www.ncbi.nlm.nih.gov/refseq/refseq_select/
  • https://blast.ncbi.nlm.nih.gov/smartblast/smartBlast.cgi?CMD=Web&PAGE_TYPE=BlastDocs#searchSets

as well as several UniProt databases.

Note that:

The local UPPMAX copies are found at /sw/data/blast_databases\nDoing module load blast_databases sets the environment variable BLASTDB to this directory; this is loaded as a prerequisite when loading any blast modules\nNew versions are installed the first day of each month at 00.01 from local copies updated the 28th of the previous month beginning at 00.01\nWhen new versions are installed, the directory containing the previous versions is renamed to blast_databases_old\nblast_databases_old is deleted the second data of each month at 00.01\n

These databases use the \"v5\" format, which includes rich taxonomic information with sequences, and will only work with the Blast tools from the module blast/2.8.0+ and later. Earlier module versions can still be used, but you will need to provide/build your own databases. NCBI no longer updates databases with the older \"v4\" databases as of February 2020, and they have been deleted from UPPMAX. The final updates of these databases (again, as of this writing nearly two years old) are available from NCBI over FTP at ftp://ftp.ncbi.nlm.nih.gov/blast/db/v4.

Each NCBI-hosted database also includes a JSON file containing additional metadata for that particular database. These are found in /sw/data/blast_databases/ and are named databasename*.json. The exact name varies based on the format of the database. For example, the contents of the JSON file for the nr database can be see by running

cat /sw/data/blast_databases/nr*.json\n

The Blast databases available at UPPMAX are:

Name Type Source Notes 16S_ribosomal_RNA nucleotide NCBI 16S ribosomal RNA (Bacteria and Archaea type strains) 18S_fungal_sequences nucleotide NCBI 18S ribosomal RNA sequences (SSU) from Fungi type and reference material (BioProject PRJNA39195) 28S_fungal_sequences nucleotide NCBI 28S ribosomal RNA sequences (LSU) from Fungi type and reference material (BioProject PRJNA51803) Betacoronavirus nucleotide NCBI Betacoronavirus nucleotide sequences cdd_delta protein NCBI Conserved domain database for use with delta-blast env_nr protein NCBI Protein sequences for metagenomes (EXCLUDED from nr) env_nt nucleotide NCBI Nucleotide sequences for metagenomes human_genome nucleotide NCBI Current RefSeq human genome assembly with various database masking ITS_eukaryote_sequences nucleotide NCBI Internal transcribed spacer region (ITS) for eukaryotic sequences ITS_RefSeq_Fungi nucleotide NCBI Internal transcribed spacer region (ITS) from Fungi type and reference material (BioProject PRJNA177353) landmark protein NCBI Proteomes of 27 model organisms. The landmark database includes complete proteomes from a few selected representative genomes spanning a wide taxonomic range, the main database used by the SmartBLAST services. LSU_eukaryote_rRNA nucleotide NCBI Large subunit ribosomal RNA sequences for eukaryotic sequences LSU_prokaryote_rRNA nucleotide NCBI Large subunit ribosomal RNA sequences for prokaryotic sequences mito nucleotide NCBI NCBI Genomic Mitochondrial Reference Sequences mouse_genome nucleotide NCBI Current RefSeq mouse genome assembly with various database masking nr protein NCBI Non-redundant protein sequences from GenPept, Swissprot, PIR, PDF, PDB, and NCBI RefSeq nt nucleotide NCBI Partially non-redundant nucleotide sequences from all traditional divisions of GenBank, EMBL, and DDBJ pataa protein NCBI Patent protein sequences patnt nucleotide NCBI Patent nucleotide sequences. Both patent databases are directly from the USPTO, or from the EPO/JPO via EMBL/DDBJ pdbaa protein NCBI Sequences for the protein structure from the Protein Data Bank pdbnt nucleitide NCBI Sequences for the nucleotide structure from the Protein Data Bank. They are NOT the protein coding sequences for the corresponding pdbaa entries. ref_euk_rep_genomes nucleotide NCBI Refseq Representative Eukaryotic genomes (1000+ organisms) ref_prok_rep_genomes nucleotide NCBI Refseq Representative Prokaryotic genomes (5700+ organisms) ref_viroid_rep_genomes nucleotide NCBI Refseq Representative Viroid genomes (46 organisms) ref_viruses_rep_genomes nucleotide NCBI Refseq Representative Virus genomes (9000+ organisms) refseq_protein protein NCBI NCBI protein reference sequences refseq_rna nucleotide NCBI NCBI Transcript reference sequences refseq_select_prot protein NCBI NCBI RefSeq protein sequences from human, mouse, and prokaryotes, restricted to the RefSeq Select set of proteins. RefSeq Select includes one representative protein per protein-coding gene for human and mouse, and RefSeq proteins annotated on reference and representative genomes for prokaryotes refseq_select_rna nucleotide NCBI NCBI RefSeq transcript sequences from human and mouse, restricted to the RefSeq Select set with one representative transcript per protein-coding gene SSU_eukaryote_rRNA nucleotide NCBI Small subunit ribosomal RNA sequences for eukaryotic sequences swissprot protein NCBI Swiss-Prot sequence database (last major update) tsa_nr protein NCBI Protein sequences from the Transcriptome Shotgun Assembly. Its entries are EXCLUDED from the nr database. tsa_nt nucleotide NCBI A database with earlier non-project based Transcriptome Shotgun Assembly (TSA) entries. Project-based TSA entries are NOT included. Entries are EXCLUDED from the nt database. uniprot_sprot protein UniProt Swiss-Prot high quality manually annotated and non-redundant protein sequence database uniprot_trembl protein UniProt TrEMBL high quality but unreviewed protein sequence database uniprot_sptrembl protein uniprot_sprot and uniprot_trembl combined uniprot_all protein alias for uniprot_sptrembl uniprot_all.fasta protein alias for uniprot_sptrembl uniprot_sprot_varsplic protein UniProt UniProt canonical and isoform sequences (see link) uniprot_uniref50 protein UniProt Clustered sets of 50%-similar protein sequences (see link) uniprot_uniref90 protein UniProt Clustered sets of 90%-similar protein sequences (see link) uniprot_uniref100 protein UniProt Clustered sets of identical protein sequences (see link) UniVec nucleotide UniVec Sequences commonly attached to cDNA/genomic DNA during the cloning process UniVec_Core nucleotide UniVec A subset of UniVec chosen to minimise false positives

Additionally, taxdb.btd and taxdb.bti are downloaded, which provide additional taxonomy information for these databases. Local copies of the NCBI Taxonomy databases are also available; further details are available on a separate page.

For UniVec and UniVec_Core, Fasta-format files containing the vector sequences are also available with the given names (e.g., /sw/data/uppnex/blast_databases/UniVec), alongside the Blast-format databases built from the same Fasta files.

The exact times all databases were updated are provided by database.timestamp files located in the directory Databases are available automatically after loading any blast module

When any of the blast modules is loaded, the BLASTDB environment variable is set to the location of the local database copies (/sw/data/uppnex/blast_databases). The various Blast tools can use this variable to find the locations of databases, so that only the name needs to be specified.

module load bioinfo-tools blast/2.7.1+\nblastp -db nr -query input.fasta\n

After loading the blast/2.7.1+ module, specifying blastp -db nr results in blastp searching the local copy of nr, because the BLASTDB environment variable is set when the module is loaded. Similarly, each of these would result in searching the local copy of the given database:

blastp -db pdbaa ...\nblastp -db uniprot_sprot ...\nblastp -db uniprot_uniref90 ...\nblastn -db nt ...\nblastn -db refseq_genomic ...\n

WGS and SRA sequence databases are not included

The NCBI Whole-Genome Shotgun is not available locally. NCBI provides special versions of Blast and other tools that can be used to search the remote versions of WGS and the Sequence Read Archive.

These special blast versions and other tools are part of NCBI's SRA Tools, which is available at Uppmax as the sratools module. We have also include auxiliary NCBI scripts in the sratools module to convert taxonomic IDs to WGS and SRA identifiers.

Note that NCBI's TSA database is available at UPPMAX, just use the database name tsa_nr or tsa_nt.

"},{"location":"databases/diamond/","title":"DIAMOND protein alignment databases","text":"

The DIAMOND protein aligner is a recent tool offering much faster (100\u00d7 to 1000\u00d7 faster than Blast) alignment of protein sequences against reference databases. On UPPMAX, DIAMOND is available by loading the diamond module, the most recent installed version of which which as of this writing is diamond/2.0.14.

As for BLAST databases, UPPMAX provides several pre-built databases suitable for direct usage with the --db flag to diamond, as well as runs diamond prepdb on each of its downloaded BLAST protein databases whenever they are installed. The BLAST databases are updated according to the schedule given on their webpage. The diamond-format NCBI protein databases are updated once a month.

For each of the databases listed below, the method of versioning is indicated. To determine the version at UPPMAX, check the path given below after removing the database name from the last position; latest is a symbolic link that points to a directory with a name equivalent to the version of the most recent update. Old database versions will be removed after updates, so please use latest rather than directly addressing a database version.

Each of the database locations below is also available in the indicated environment variable set when any version of the diamond module is loaded. These are simple to use, for example to search nr:

diamond --db $DIAMOND_NR ...\n

NCBI BLAST Protein Databases

Whenever the BLAST databases are updated and installed, diamond prepdb is run on each of the protein-format databases so that they can be searched directly by diamond. See the BLAST databases webpage for a description of these.

To search any of them using diamond, load the blast_databases/latest module. This defines the environment variable BLASTDB, which contains the directory holding these databases. Once this module is loaded, you can run diamond on any of the protein databases. For example:

diamond --db $BLASTDB/nr ...\ndiamond --db $BLASTDB/cdd_delta ...\ndiamond --db $BLASTDB/swissprot ...\ndiamond --db $BLASTDB/pdbaa ...\n

According to DIAMOND's developer, these are faster to load than DIAMOND's own .dmnd-format databases. So, you may want to load the blast_databases/latest data module and use --db $BLASTDB/nr for your NCBI nr searches, for example, instead of --db $DIAMOND_NR.

Diamond-format NCBI Protein Databases

Downloaded from ftp://ftp.ncbi.nlm.nih.gov/blast/db/FASTA. These are updated frequently at NCBI, so they are versioned here by the monthly download date. There is no longer a separate FASTA version of env_nr, so its Blast database is downloaded from ftp://ftp.ncbi.nlm.nih.gov/blast/db and FASTA sequences are extracted using blastdbcmd -entry all from module blast/2.12.0+.

Database Environment variable for diamond --db UPPMAX path nr DIAMOND_NR /sw/data/diamond_databases/Blast/latest/nr env_nr DIAMOND_ENV_NR /sw/data/diamond_databases/Blast/latest/env_nr swissprot DIAMOND_SWISSPROT /sw/data/diamond_databases/Blast/latest/swissprot pdbaa DIAMOND_PDBAA /sw/data/diamond_databases/Blast/latest/pdbaa

NCBI RefSeq Proteins

RefSeq protein databases are downloaded from ftp://ftp.ncbi.nlm.nih.gov/refseq/release/complete/, with an update occurring if there is a new release as indicated by the contents of ftp://ftp.ncbi.nlm.nih.gov/refseq/release/RELEASE_NUMBER.

Database Environment variable for diamond --db UPPMAX path complete.nonredundant_protein.protein DIAMOND_REFSEQ_NONREDUNDANT /sw/data/diamond_databases/RefSeq/latest/complete.nonredundant_protein.protein complete.protein DIAMOND_REFSEQ /sw/data/diamond_databases/RefSeq/latest/complete.protein

UniRef90

The UniRef90 protein database is downloaded as Fasta from its UK mirror at ftp://ftp.expasy.org/databases/uniprot/current_release/uniref/uniref90/, with an update occurring if there is a new version as indicated by the <version> tag in the XML description available at ftp://ftp.expasy.org/databases/uniprot/current_release/uniref/uniref90/RELEASE.metalink.

Database Environment variable for diamond --db UPPMAX path uniref90 DIAMOND_UNIREF90 /sw/data/diamond_databases/UniRef90/latest/uniref90

UniProt Reference Proteomes

The UniProt Reference Proteomes protein database is downloaded as Fasta from its UK mirror at ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/reference_proteomes, with an update occurring if there is a new version as indicated by the <version> tag in the XML description available at ftp://ftp.expasy.org/databases/uniprot/current_release/knowledgebase/reference_proteomes/RELEASE.metalink. If there is a new release, then the file Reference_Proteomes_RELEASE.tar.gz is downloaded, with RELEASE replaced by the release number. The reference_proteomes.dmnd database is created from this file using the protocol described after the table.

Database Environment variable for diamond --db UPPMAX path UniProt Reference Proteomes DIAMOND_REFERENCE_PROTEOMES /sw/data/diamond_databases/reference_proteomes/latest/reference_proteomes

The reference_proteomes.dmnd database is created using the following protocol for the BlobToolKit. This uses UPPMAX's most recently downloaded NCBI taxonomy database for its taxonomic metadata.

module load bioinfo-tools\nmodule load diamond/2.0.14\nmodule load ncbi_taxonomy/latest\n
"},{"location":"databases/diamond/#after-downloading","title":"after downloading","text":"
tar xf Reference_Proteomes_RELEASE.tar.gz\ntouch reference_proteomes.fasta.gz\nfind . -mindepth 2 | grep \"fasta.gz\" | grep -v 'DNA' | grep -v 'additional' | xargs cat >> reference_proteomes.fasta.gz\nprintf \"accession\\taccession.version\\ttaxid\\tgi\\n\" > reference_proteomes.taxid_map\nzcat */*/*.idmapping.gz | grep \"NCBI_TaxID\" | awk '{print $1 \"\\t\" $1 \"\\t\" $3 \"\\t\" 0}' >> reference_proteomes.taxid_map\ndiamond makedb --db reference_proteomes.dmnd --in reference_proteomes.fasta.gz --threads 10 --taxonmap reference_proteomes.taxid_map --taxonnames $NCBI_TAXONOMY_ROOT/names.dmp --taxonnodes $NCBI_TAXONOMY_ROOT/nodes.dmp\n
"},{"location":"databases/ncbi/","title":"NCBI taxonomy databases","text":"

Uppmax maintains local copies of the full set of NCBI Taxonomy databases. Note that:

  • The local copies are found at /sw/data/ncbi_taxonomy/latest
  • The data module ncbi_taxonomy/latest defines the environment variable NCBI_TAXONOMY_ROOT to this location. We recommend loading this module and using this environment variable to access these data.
  • This also contains the subdirectories new_taxdump, accession2taxid and biocollections containing those databases, see the tables below for their contents
  • latest is a symbolic link to a directory named from the date of the most recent update
  • There is also a subdirectory download containing the files as downloaded from NCBI
  • The installation of new versions begins Sunday of each week at 00.10. The update may take several minutes up to an hour, depending on network speeds.
  • When new versions are successfully installed, the latest/ symbolic link is updated to point to the new dated directory
  • The previous version of the taxonomy databases are removed when the new versions have completed installation

See the links for each database for specifics on file format and contents. Many tools know how to make use of these databases; follow each tool's specific instructions. The files can be found in the indicated directories.

The databases available within /sw/data/ncbi_taxonomy/latest are below. For more on each, see the links.

Name Source Notes taxdump NCBI NCBI taxonomic database, in multiple .dmp files (see taxdump_readme.txt or link) taxcat NCBI NCBI taxonomic categories, in categories.dmp (see taxcat_readme.txt or link) taxdump_readme.txt NCBI NCBI taxdump file description taxcat_readme.txt NCBI NCBI taxcat file description gi_taxid_nucl.dmp NCBI Mappings of nucleotide GI to taxid (DEPRECATED) gi_taxid_prot.dmp NCBI Mappings of protein GI to taxid (DEPRECATED)

The databases available within /sw/data/ncbi_taxonomy/latest/new_taxdump are below. For more on each, see the links.

Name Source Notes new_taxdump NCBI NCBI new-format taxonomic database, in multiple .dmp files (see this taxdump_readme.txt or link) taxdump_readme.txt NCBI NCBI new-format taxonomic database file description

The databases available within /sw/data/ncbi_taxonomy/latest/accession2taxid are below. The dead_ files contain accession-to-TaxID mappings for dead (suppressed or withdrawn) sequence records. For more on each, see the links.

Name Source Notes nucl_wgs.accession2taxid NCBI TaxID mapping for nucleotide records of type WGS or TSA nucl_gb.accession2taxid NCBI TaxID mapping for nucleotide records not of the above types prot.accession2taxid NCBI TaxID mapping for protein records pdb.accession2taxid NCBI TaxID mapping for PDB protein records dead_nucl.accession2taxid NCBI TaxID mapping for dead nucleotide records dead_prot.accession2taxid NCBI TaxID mapping for dead protein records dead_wgs.accession2taxid NCBI TaxID mapping for dead WGS or TSA records

The biocollections databases contain collections location information. coll_dump.txt is located within the /sw/data/ncbi_taxonomy/latest directory. Those marked biocollections are located within the /sw/data/ncbi_taxonomy/latest/biocollections directory.

Name Source Notes coll_dump.txt NCBI . Collection_codes.txt NCBI biocollections Institution_codes.txt NCBI biocollections Unique_institution_codes.txt NCBI biocollections"},{"location":"databases/other_local/","title":"Other bioinformatics-oriented local data resources","text":"

Haplotype Reference Consortium

The Haplotype Reference Consortium VCF database is a large reference panel of human haplotypes produced by combining together sequencing data from multiple cohorts. Version r1.1 is installed on all systems as data module HaplotypeReferenceConsortium/r1.1. GnomAD: Genome Aggregation Database

The Genome Aggregation Database (gnomAD) VCF database is downloaded and located in /sw/data/gnomad_data/vcf/{exomes, genomes}. ExAC: Exome Aggregation Consortium

The ExAC Exome Aggregation Consortium database releases 0.1, 0.2, 0.3 and 0.3.1 are downloaded in their entirety and are available at /sw/data/ExAC/release{0.1,0.2,0.3,0.3.1}. Pfam

The Pfam database versions 2011, 28.0, 31.0 and 35.0 are downloaded in their entirety and available via the data modules Pfam/{2011,28.0,31.0,35.0} which each define the environment variable PFAM_ROOT to the location of the Pfam downloads. See the appropriate module help for further information. In particular, the family-specific trees are available in $PFAM_ROOT/trees. The given directory can be used for the -dir argument to the pfam_scan.pl script provided by the pfam_scan modules, which each load the appropriate Pfam data module. Module version pfam_scan/1.5 is for Pfam/28.0, and module version pfam_scan/1.6 is for Pfam/31.0. This latter module might also work with Pfam/35.0

pfam_scan.pl -dir $PFAM_ROOT ...\n

The pfam_scan.pl script is designed to work with the Pfam database. dbCAN

The dbCAN 4.0 database for automated carbohydrate-active enzyme annotation is now available in directory /sw/data/dbCAN/4.0 on Uppmax servers. The database is formatted for use with the hmmer/3.1b1-{gcc,intel} modules. For more information see /sw/data/dbCAN/4.0/readme.txt or the remote version.

The local path to the script for post-processing hmmscan --domtblout output is /sw/data/dbCAN/4.0/hmmscan-parser.sh. The CAZyDB trees have also been unpacked and are available in /sw/data/dbCAN/4.0/CAZyDB-phylogeny. Variant Effect Predictor cache files

A local cache for all database files available for Ensembl Variant Effect Predictor 87, 89 and 91 are available in directories /sw/data/vep/{87,89,91}. When module version vep/89 or vep/91 is loaded, the environment variable VEP_CACHE is set to the directory for the appropriate version. Local caches for versions 82, 84 and 86 exist only for homo_sapiens. To use the cached databases, run the script using the --cache option to indicate the use of a locally-cached database, and the --dir option to specify where this is:

vep --cache --dir $VEP_CACHE  ...\n

If you are using vep/89, use:

variant_effect_predictor.pl --cache --dir $VEP_CACHE  ...\n

All plugins are also available. For more script options, see its online help page. CDD - Position-Specific Scoring Matrices for CD-Search

The CDD database versions 3.14 and 3.16 are downloaded in their entirety and are available at /sw/data/cdd/{3.14,3.16}. These directories contains collections of position-specific scoring matrices (PSSMs) that have been created for the CD-Search service.

The PSSMs are meant to be used for compiling RPS-BLAST search databases, which can be used with the standalone RPS-BLAST programs (rpsblast and rpsblastn). These programs, as well as the makeprofiledb application needed to convert files in this directory, are part of the BLAST+ executables (available on Uppmax as part of bioinfo-tools, e.g., module blast/2.2.31+). The makeprofiledb application is described at http://www.ncbi.nlm.nih.gov/books/NBK1763.

More information is available in the CDD README either via FTP or its local copy /sw/data/cdd/README. iGenomes - Collection of reference sequences and annotation files

A local copy of illumina's iGenomes collection of commonly analyzed organisms is available at /sw/data/igenomes. In addition to the annotations provided by the collection, Bismark and STAR indexes have been added. UK Biobank institutional data set (GENETICS)

The UKBB data set is available for eligible projects in the system for sensitive research SNIC-SENS Bianca. If you believe you are eligible, contact Professor Tove Fall to gain access.

"},{"location":"databases/overview/","title":"Overview of databases","text":"

Many commonly used data sources are stored locally at UPPMAX. This page provides an index to pages where they are described in more detail. Available databases:

  • BLAST databases available locally
  • NCBI taxonomy databases
  • DIAMOND protein alignment databases
  • Reference genomes
  • 1000-genome project
  • Simons Genome Diversity Project datasets
  • Other bioinformatics-oriented local data resources

In order for you to access Swegen or 1000-genomes you must first send an email to datacentre@scilifelab.se and ask for access. When they approve you, they will contact UPPMAX and we will grant access to Swegen.

","tags":["database","databases","overview","list"]},{"location":"databases/reference_genomes/","title":"Reference genomes","text":"

NOTE: The Illumina igenomes are also available at UPPMAX, with additional indices built for Bismarck and STAR. The scripts used to build the additional indices are available at the UPPMAX/bio-data github repository.

Many next-generation sequencing applications involves alignment of the sequence reads to a reference genome. We store reference sequences in a directory that is accessible for all users in the system. The table below shows all currently available genomes.

Reference genome Assembly version Homo sapiens Feb. 2009 (GRCh37/hg19) Pan troglodytes Mar. 2006 (CGSC2.1/PanTro2) Macaca mulatta Jan. 2006 (RheMac2) Sus scrofa Apr. 2009 (Sscrofa9) Canis familiaris Sep. 2011 (CanFam3) Mus musculus July 2007 (NCBIM37/mm9), Jan. 2012 (GRCm38) Gallus gallus May 2006 (WASHUC2/galGal3) Taeniopygia guttata Mar. 2010 (TaeGut3.2.4) Saccharomyces cerevisiae Mar 2010 (ScereEF2) Equus caballus Sep. 2007 (EquCab2) Pichia stipitis Picst3 Rattus norvegicus Nov. 2004 (RGSC3.4.61) Schizosaccharomyces pombe 20090701

Directory structure

The data files are located at /sw/data/reference and the directory structure is e.g.: Homo_sapiens/GRCh37.

Each directory contains several subdirectories, explained below:

dna_ftp.ensembl.org_ contains the original data files from the ENSEMBL ftp server, and should not be modified.

chromosomes contains fasta files for individual chromosomes.

chromosomes_rm contains the same files, masked with RepeatMasker.

concat contains most of the fasta files in \"chromosome\" concatenated into a single fasta file. The exceptions are alternate contig files and DNA not mapped to any chromosome.

concat_rm contains most of the fasta files in \"chromosome_rm\" concatenated into a single fasta file. The exceptions are alternate contig files and DNA not mapped to any chromosome.

program_files contains index files and metadata for software packages used to work with reference genomes, e.g. SAMtools and aligners such as Bowtie, BWA.

Requests for additional reference genomes or software data/index files should be directed to UPPMAX support.

"},{"location":"databases/simons_genome/","title":"Simons Genome Diversity Project datasets","text":"

The Simons Foundation's Genome Diversity Project datasets are now available on UPPMAX. These represent deep human genome sequence data sampled to represent as much diversity as possible:

sgdp geographical distribution

There are currently approximately 14 TB of data, in the form of CRAM files with associated indices and summaries of the BAM files from which the CRAM files werre derived.

Our current SGDP data are those aligned to human reference genome GRCh38DH found at ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/simons_diversity_data/. The local UPPMAX directory for these data is /sw/data/SGDP/. The command used to collect the data was

echo \"mirror data\" | lftp ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/data_collections/simons_diversity_data\n

As a result, the local UPPMAX archive is found at /sw/data/SGDP/data/. Within this directory are subdirectories for each of the populations included in the full dataset, with individual samples found within each population directory. For example,

rackham1: /sw/data/SGDP $ ls -l data/Greek\ntotal 8\ndrwxr-s--- 3 douglas kgp 4096 Apr 29 14:03 SAMEA3302732\ndrwxr-s--- 3 douglas kgp 4096 Apr 29 14:03 SAMEA3302763\n

and one of these sample directories contains

rackham1: /sw/data/SGDP $ ls -l data/Greek/SAMEA3302732/alignment/\ntotal 34529204\n-rw-r----- 1 douglas kgp         635 Nov 30  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.bam.bas\n-rw-r----- 1 douglas kgp 35355769475 Nov 30  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.cram\n-rw-r----- 1 douglas kgp     2079029 Dec  1  2020 SAMEA3302732.alt_bwamem_GRCh38DH.20200922.Greek.simons.cram.crai\n

To access this data, please request membership in the kgp group by emailing support@uppmax.uu.se. As for the 1000 Genomes Project, this is not to restrict access in any way, but rather to make it easier to inform UPPMAX users using the datasets of any relevant changes. Because the local copies of these datasets are hosted on UPPMAX systems, access is restricted to UPPMAX users; non-UPPMAX users will need to follow the procedures described on the SGDP website to download their own copies of the datasets.

"},{"location":"databases/swegen/","title":"Access to Swegen","text":"

In order for you to access Swegen (or 1000 genomes) you must first send an email to datacentre@scilifelab.se and ask for access.

When they approve you, they will contact UPPMAX and we will grant access to Swegen.

"},{"location":"getting_started/bianca_usage_prerequisites/","title":"Prerequisites for using Bianca","text":"

To be allowed to log in to Bianca, one needs all of these:

  • An active research project
  • An UPPMAX account
  • An UPPMAX password

These prerequisites are discussed in detail below.

"},{"location":"getting_started/bianca_usage_prerequisites/#an-active-research-project","title":"An active research project","text":"

One prerequisite for using Bianca is that you need to be a member of an active SNIC SENS or SIMPLER research project (these are called sens[number] or simp[number], where [number] represent a number, for example sens123456 or simp123456).

Forgot your Bianca projects?

One easy way to see your Bianca projects is to use the Bianca remote desktop login screen at https://bianca.uppmax.uu.se/.

SUPR (the 'Swedish User and Project Repository') is the website that allows one to request access to Bianca and to get an overview of the requested resources.

How does the SUPR website look like?

First SUPR page

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

After logging in, the SUPR website will show all projects you are a member of, under the 'Projects' tab.

How does the 'Projects' tab of the SUPR website look like?

Example overview of SUPR projects

To see if a project has access to Bianca, click on the project and scroll to the 'Resources' section. In the 'Compute' subsection, there is a table. Under 'Resource' it should state 'Bianca @ UPPMAX'.

How does the 'Resources' page of an example project look like?

The 'Resources' page of an example project.

Note that the 'Accounts' tab can be useful to verify your username.

How does the 'Accounts' tab help me find my username?

An example of a SUPR 'Accounts' tab. The example user has username sven-sens2023598, which means his/her UPPMAX username is sven

You can become a member of an active SNIC SENS by:

  • request membership to an existing project in SUPR
  • create a project. See the UPPMAX page on how to submit a project application here
"},{"location":"getting_started/bianca_usage_prerequisites/#an-uppmax-user-account","title":"An UPPMAX user account","text":"

Another prerequisite for using Bianca is that you must have a personal UPPMAX user account.

"},{"location":"getting_started/bianca_usage_prerequisites/#an-uppmax-password","title":"An UPPMAX password","text":"

Another prerequisite for using Bianca is that you need to know your UPPMAX password. If you change it, it may take up to an hour before changes are reflected in Bianca.

For advice on handling sensitive personal data correctly on Bianca, see our FAQ page.

"},{"location":"getting_started/change_uppmax_password/","title":"Change your UPPMAX password","text":"Prefer a video?

See the YouTube video 'How to reset your UPPMAX password' at 1:53

If you know your UPPMAX password, here is how to change it.

Forgot your UPPMAX password?

Go to How to reset your UPPMAX password.

","tags":["UPPMAX","password","change","edit","modify"]},{"location":"getting_started/change_uppmax_password/#procedure","title":"Procedure","text":"","tags":["UPPMAX","password","change","edit","modify"]},{"location":"getting_started/change_uppmax_password/#1-log-in-to-your-favorite-uppmax-cluster","title":"1. Log in to your favorite UPPMAX cluster","text":"

See How to login to an UPPMAX cluster.

","tags":["UPPMAX","password","change","edit","modify"]},{"location":"getting_started/change_uppmax_password/#2-open-a-terminal","title":"2. Open a terminal","text":"

When logged in to an UPPMAX cluster, open a terminal. If you've logged in via SSH, you are already in a terminal :-)

","tags":["UPPMAX","password","change","edit","modify"]},{"location":"getting_started/change_uppmax_password/#3-set-your-own-password","title":"3. Set your own password","text":"

In that terminal, type:

passwd\n

Now you will be asked to repeat the old password and set a new one!

Your new password will work immediately!

","tags":["UPPMAX","password","change","edit","modify"]},{"location":"getting_started/first_job/","title":"Run your first job","text":"

This page guides you through a possible workflow

  • This is an example and gives you a quick start of the steps that may be required for you to do your work.
  • There are links to topics on the way, but you should be able to follow the steps anyway.
  • managing directory
  • transferring
  • loading modules
  • writing batch script
  • view your CPU hours and disk usage
"},{"location":"getting_started/first_job/#transferring-some-files","title":"Transferring some files","text":""},{"location":"getting_started/first_job/#graphical-file-manager","title":"Graphical file manager","text":"Want more detailed information of file transfer to/from Rackham using a graphical tool?

More detailed information of file transfer to/from Rackham using a graphical tool can be found here

  • This is good if you want to move many files between host and local and cannot use wildcards.
LinuxMacWindows

More detailed information of file transfer to/from Rackham using a graphical tool can be found here

One such graphical tool is FileZilla:

  • For copying of files with sftp (secure file transfer protocol) between your client computer (where you are) and the cluster Filezilla can be the choice.
  • https://filezilla-project.org/download.php?type=client

  • Cyberduck

  • For copying of files between your client computer (where you are) and the cluster WinSCP can also be the choice.

    • https://winscp.net/eng/download.php
  • Cyberduck

  • Filezilla

Type-along

TODO

"},{"location":"getting_started/first_job/#using-the-compute-nodes","title":"Using the compute nodes","text":""},{"location":"getting_started/get_inside_sunet/","title":"Get inside the university networks","text":"

One cannot connect to all UPPMAX clusters everywhere around the world. Instead, one needs to get inside the university networks first. This page described how to get inside the university networks, or, to use more precise language, to obtain a SUNET Internet Protocol ('IP') address.

How do I know if I am inside the university networks?

Go to https://bianca.uppmax.uu.se/.

  • If nothing happens, you are outside of the university networks

A user that is outside of the university network sees nothing.

  • If you so a login screen, you are inside of the university networks

A user that is outside of the university network sees a login screen.

There are these ways to do this:

  • Physically move inside SUNET
  • Use a VPN (a 'virtual private network')
  • Use an HPC cluster within SUNET

Each of these three ways are described below.

flowchart TD\n\n    subgraph sub_outside[IP outside SUNET]\n      outside(Physically outside SUNET)\n    end\n\n    subgraph sub_inside[IP inside SUNET]\n      physically_inside(Physically inside SUNET)\n      inside_using_vpn(Inside SUNET using VPN)\n      inside_using_rackham(Inside SUNET using Rackham)\n    end\n\n    %% Outside SUNET\n    outside-->|Move physically|physically_inside\n    outside-->|Use a VPN|inside_using_vpn\n    outside-->|Login to Rackham|inside_using_rackham\n\n    %% Inside SUNET\n    physically_inside-.->inside_using_rackham\n    physically_inside-.->inside_using_vpn
","tags":["SUNET","University network","University networks","Get inside","edoroam"]},{"location":"getting_started/get_inside_sunet/#physically-move-inside-sunet","title":"Physically move inside SUNET","text":"

To connect to all UPPMAX clusters, one must be inside SUNET.

All Swedish university buildings are within SUNET. Hence, working from a University building is a non-technical solution to get direct access to Bianca.

","tags":["SUNET","University network","University networks","Get inside","edoroam"]},{"location":"getting_started/get_inside_sunet/#use-a-virtual-private-network","title":"Use a virtual private network","text":"Want a video to see how to install the UU VPN?
  • Install VPN client for Ubuntu and Uppsala university

To connect to all UPPMAX clusters, one must be inside SUNET.

A virtual private network (VPN) allows one to access all UPPMAX clusters indirectly: your computer connects to the VPN within SUNET, where that VPN accesses your favorite UPPMAX cluster.

To setup a VPN, see the UPPMAX documentation on how to setup a VPN.

Want a video to see how the UU VPN is used?
  • Use the UU VPN with 2FA
  • Use the UU VPN (yet without 2FA) to access the Bianca remote desktop website
","tags":["SUNET","University network","University networks","Get inside","edoroam"]},{"location":"getting_started/get_inside_sunet/#use-an-hpc-cluster-within-sunet","title":"Use an HPC cluster within SUNET","text":"

To connect to all UPPMAX clusters, one must be inside SUNET.

An HPC cluster within SUNET (for example, Rackham) allows one to access all other clusters: your computer connects to the HPC cluster within SUNET, after which one accesses all other clusters.

However, when using this method, one can only use the console environments (i.e. no remote desktop environment).

","tags":["SUNET","University network","University networks","Get inside","edoroam"]},{"location":"getting_started/get_started/","title":"Get started here","text":"

In order to use UPPMAX resources, you need to be a member of a project and a user account.

"},{"location":"getting_started/get_started/#pis","title":"PIs","text":"

Do you or members in your research group need compute and storage resources on a HPC cluster or Infrastructure-as-a-Service cloud? Learn how to apply for a project by following the link below:

  • Project apply

Are you interested in other services, e.g. large volume data storage? Let us know by contacting UPPMAX Support!

"},{"location":"getting_started/get_started/#users","title":"Users","text":"

Once you or someone in your group or collaboration has a project, you must apply for a user account by following the link below.

  • User account

Have an account already? Then check out these basic user guides:

  • Bianca first steps
  • Rackham first steps
  • Snowy first steps
"},{"location":"getting_started/get_started/#students","title":"Students","text":"

Are you taking a university course that uses UPPMAX and need help? Ask your instructor! If they can't help, contact us through IT Support.

"},{"location":"getting_started/get_started/#getting-started-first-login-to-uppmax","title":"Getting started: First login to UPPMAX","text":"

See Log in to an UPPMAX cluster.

"},{"location":"getting_started/get_started/#changing-your-password","title":"Changing your password","text":"

See How to change your UPPMAX password

"},{"location":"getting_started/get_started/#copying-files-fromto-your-uppmax-account","title":"Copying files from/to your UPPMAX account","text":"

See How to transfer files from/to your UPPMAX account

"},{"location":"getting_started/get_started/#where-are-my-files-or-what-are-the-different-file-systems","title":"Where are my files? Or, what are the different file systems?","text":"

See files on UPPMAX

"},{"location":"getting_started/get_started/#modules","title":"Modules","text":"

In order to run installed programs, one uses the module system.

"},{"location":"getting_started/get_started/#how-to-run-jobs","title":"How to run jobs","text":"

All jobs should be run using the job scheduler.

"},{"location":"getting_started/get_started/#uppmax-homepage","title":"UPPMAX homepage","text":"

Please check our homepage regularly for information, news and announcements. We will announce maintenance stops and down time there.

  • https://www.uu.se/en/centre/uppmax
  • https://www.uppmax.uu.se
"},{"location":"getting_started/get_uppmax_2fa/","title":"Setting up two factor authentication for UPPMAX","text":"

Two factor authentication (abbreviated to '2FA') increases the security of your UPPMAX account and is mandatory is multiple contexts.

Why is this important?

See Why is 2FA important?

This page describes how to set this up.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#procedure","title":"Procedure","text":"Prefer a video?

Watch the YouTube video 'Get your UPPMAX 2FA'

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#1-install-a-2fa-app","title":"1. Install a 2FA app","text":"

Install an app to use for 2FA.

Which app do you recommend?

Any app that works for you.

Search for '2FA' or 'OTP' (short for 'one time password')..

Wikipedia maintains a list of 2FA apps here.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#2-go-to-httpssuprintegrationuppmaxuusebootstrapotp","title":"2. Go to https://suprintegration.uppmax.uu.se/bootstrapotp/","text":"

In your web browser, go to https://suprintegration.uppmax.uu.se/bootstrapotp/.

How does this look like?

Here is how https://suprintegration.uppmax.uu.se/bootstrapotp/ looks like:

This will take you to the UU page to request a second factor for your UPPMAX account.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#3-click-on-continue","title":"3. Click on 'Continue'","text":"

At this page, click on 'Continue' to be sent to a 'Login to SUPR' page.

Click on 'Continue' to be sent to a 'Login to SUPR' page.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#4-log-in-to-supr","title":"4. Log in to SUPR","text":"

At the 'Login to SUPR' page, log in, in any way that works for you.

How does this look like?

In case you are not logged in already, log in to SUPR.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#5-press-the-button-prove-my-identity-to-uppmax","title":"5. Press the button 'Prove My Identity to UPPMAX'","text":"

Acknowledge to SUPR that they may tell UPPMAX who you are, by pressing the button \"Prove My Identity to UPPMAX\" on the page.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#6-scan-the-qr-code-with-your-2fa-app","title":"6. Scan the QR-code with your 2FA app","text":"

Scan the QR-code with your 2FA app.

How does that look like?

More details here.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#7-enter-the-code-on-the-webpage","title":"7. Enter the code on the webpage","text":"

Your application will show you a code, enter this code on the same webpage.

More details here.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#8-see-acknowledgement-that-the-new-two-factor-has-been-registered","title":"8. See acknowledgement that the new two factor has been registered","text":"

You should see an acknowledgement that the new two factor has been registered.

How does that look like?

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#9-wait-for-a-confirmation-email","title":"9. Wait for a confirmation email","text":"

After this procedure, it takes around 15 minutes before you can use the 2FA to log in.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#faq","title":"FAQ","text":"How does the use of a 2FA app looks like?

UPPMAX 2FA set up for a fictional UPPMAX user called sven

How do I know I used my new 2FA too early?

Simple answer: when you've used your new 2FA before having received an email.

Another way to find out: go to and try to use your new 2FA. You will get a 'Authentication failed' error when your new 2FA is not active yet.

How long does it take before my 2FA is active?

This is a matter of minutes.

It takes a little while before your newly registered factor is usable, but this should be a matter of minutes, not days.

Will I get an email when my 2FA is active?

No.

There is no extra mail sent to let you know that the newly registered factor is usable, just the confirmation mail that mentions that it will be activated soon.

","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa/#troubleshooting","title":"Troubleshooting","text":"

Some of the common problems we've seen include

  • Not having an account at UPPMAX. This is required to get the second factor for your account.
  • Using a device having it's time set differently from our systems. There are services on the internet (e.g. https://time.is/) you can visit from the device you try to manage the code on that will show you if your device settings are problematic.
  • Noting the code given at first and trying to use it every time when asked for a code. The code to give will change every thirty second and you should give whatever code is shown at the time.
  • Expecting something else to be sent to you. You register the new second factor as part of the process. A confirmation mail is sent as well, but this is mostly to let you know in case your account details in SUPR have gone astray and someone else has registered a second factor for your account.
","tags":["2FA","MFA","UPPMAX"]},{"location":"getting_started/get_uppmax_2fa_qr/","title":"Setting up the QR code for two factor authentication for UPPMAX","text":"

Part of setting up two factor authentication for UPPMAX is to get a QR code.

You need to scan this QR code to add your account to your software. Most softwares call this \"Add account\" or similar and will offer an option to scan a QR code using the smartphone camera or select an area of the screen where the code is.

Note that this must often be done from within the app for two factor authentication.

If you see a string similar to

otpauth://totp/username@UPPMAX?secret=SOMETEXT&issuer=UPPMAX\n

it didn't work and you probably need to do something different (such as starting the app and select scan from within).

Once you've scanned the code, you are often allowed to change the name the software will use for the account before you add it. You can change the name if you want - changing the name does not affect the codes generated. Finish adding the account to the software.

","tags":["2FA","MFA","UPPMAX","QR"]},{"location":"getting_started/get_uppmax_2fa_qr_code/","title":"Setting up the QR code for two factor authentication for UPPMAX","text":"

Once you have the new account, you should get one time codes for it when you have it selected. To finish the registration at UPPMAX, you need to enter the code that is displayed in the field where it says \"Code:\" and submit. The codes will change over time, don't worry about this, you just need to use whatever code is current.

Once you have entered and submitted the current code, you should see a final page:

When you see that page, it will take a little while and the token will be activated (you should also receive an e-mail about the new token).

","tags":["2FA","MFA","UPPMAX","QR","code"]},{"location":"getting_started/join_existing_project/","title":"Join an existing project","text":"

To use an UPPMAX cluster, one needs to apply to a project. This page describes how to join an existing project.

"},{"location":"getting_started/join_existing_project/#procedure","title":"Procedure","text":""},{"location":"getting_started/join_existing_project/#1-go-to-httpssuprnaissse","title":"1. Go to https://supr.naiss.se/","text":"

Example SUPR NAISS main page

"},{"location":"getting_started/join_existing_project/#2-click-on-projects","title":"2. Click on 'Projects'","text":"

On the main page, click on 'Projects'

On the main page, click on 'Projects'

"},{"location":"getting_started/join_existing_project/#3-scroll-to-request-membership-in-project","title":"3. Scroll to 'Request Membership in Project'","text":"

At the 'Projects' page, scroll down to 'Request Membership in Project'.

At the 'Projects' page, scroll down to 'Request Membership in Project'

This is the 'Request Membership in Project' section:

At the 'Projects' page, here is the 'Request Membership in Project'

"},{"location":"getting_started/join_existing_project/#4-search-for-a-project","title":"4. Search for a project","text":"

At 'Request Membership in Project' in the 'Projects' page, enter a search term and click 'Search for project'

At 'Request Membership in Project' in the 'Projects' page, enter a search term and click 'Search for project'. In this example, the search term is 'DNA'

"},{"location":"getting_started/join_existing_project/#5-search-for-a-project","title":"5. Search for a project","text":"

At the 'Request Membership in Project', click on 'Request' for the project you want to request membership of.

At the 'Request Membership in Project', click on 'Request' for the project you want to request membership of.

"},{"location":"getting_started/join_existing_project/#6-wait-for-email","title":"6. Wait for email","text":"

After your request, the PI of the project will receive an email and will accept or reject your proposal.

"},{"location":"getting_started/linux/","title":"Linux","text":"
  • The \"operating system\" of the UPPMAX and most of the other clusters is Linux.

Questions

  • What is Linux?
  • How to use the command line?

Objectives

  • We'll briefly get an overview of Linux
  • How the command line works
  • Some text editors
  • Things to be aware of
Want a video?

Here is video that gives an introduction to Linux

"},{"location":"getting_started/linux/#what-is-linux","title":"What is Linux?","text":"
  • Daily speaking: The Linux Operating system is a UNIX like and UNIX compatible Operating system.
  • Linux is a \"Kernel\" on which many different programs can run.
  • The shell (bash, sh, ksh, csh, tcsh and many more) is one such program.
  • Actually, for it to be an OS, it is supplied with GNU software and other additions giving us the name GNU/Linux.
    • Linux naming controversy
  • Linux has a multiuser platform at its base which means permissions and security comes easy.
"},{"location":"getting_started/linux/#linux-comes-in-different-distributions-dialects-or-say-flavours","title":"Linux comes in different distributions, dialects or, say, flavours","text":"
  • UPPMAX runs CentOS and RedHat

Local Linux environment

  • You may sometimes benefit from having a local Linux environment.
  • Examples:
    • Mimic cluster environment to work with your local files and data as on the Cluster
    • get used to Linux (!)
  • Mac is UNIX and very Linux-like
  • Windows requires WSL (Windows subsystem for Linux)
For windows users who wants to get started with WSL (not covered here)
  • Install WSL (Windows Subsystem for Linux)

    • https://docs.microsoft.com/en-us/windows/wsl/install-win10 (Links to an external site.)
    • Don\u2019t forget to update to WSL2
  • Install a distribution or a ssh (secure shell) program

    • Distribution such as ubuntu or
    • (recommended) a ssh program such as MobaXTerm
    • https://mobaxterm.mobatek.net/ (Links to an external site.)
      • sftp frame makes it easy to move, upload and download files.
  • You may want to check this webpage as well!
    • https://hackmd.io/@pmitev/Linux4WinUsers (Links to an external site.)
"},{"location":"getting_started/linux/#using-the-command-line","title":"Using the command line","text":"

Below usage of the command line is discussed in text. If you prefer video, here is how to use the command-line on the UPPMAX Bianca cluster.

"},{"location":"getting_started/linux/#command-line-with-bash-bourne-again-shell","title":"Command line with bash (Bourne Again Shell)","text":"
  • A Unix shell and command language.
  • Often default shell
  • The command-line interface: the bash prompt $
  • bash can be seen as a program that finds and runs other programs
  • bash is scripting language that is referred to as a shell
    • (because it sits around the kernel making it easy to interact with)
"},{"location":"getting_started/linux/#the-prompt","title":"The prompt","text":"
[info]$ word1 word2 word3 [...]\n
  • [info] is configurable, and usually tells you who you are, on what system, and where in the file system.

    • Example:
    [bjornc@rackham3 linux_tutorial]$\n
    • For changing info (only for advanced users!): How to Change / Set up bash custom prompt
    • The program to run is the first word
    • All words are separated by spaces

"},{"location":"getting_started/linux/#example-bash-command","title":"Example bash command","text":"
  • Terminal screen shows
"},{"location":"getting_started/linux/#tab-completion","title":"Tab Completion","text":"
  • Whenever you\u2019re writing a path or filename on the bash prompt, you can strike the \u2018tab\u2019 key to ask Bash to complete what you\u2019re writing.

  • Get in the habit of this \u2014 it will save you many hours!

"},{"location":"getting_started/linux/#editing-files","title":"Editing files","text":"

To edit files, you will use a text editor. The UPPMAX HPC clusters have multiple text editors installed, which are described at the UPPMAX 'Text editors' page here.

Example

Start nano and save a file called first.txt

$ nano first.txt\n
  • Type test text
  • End and save with <ctrl>-X followed by Y and <enter>.
"},{"location":"getting_started/linux/#typical-sources-of-error","title":"Typical sources of error","text":"

Warning

  • Capitalization matters in file names and program names
  • Spaces matter.
    • Always have a space after the program name.
    • Don\u2019t add spaces within file names.
  • Check that you are in the right place in the file system.
  • File permissions. Check that the right read, write and execute permission are set. See next session.
"},{"location":"getting_started/linux/#caution","title":"Caution","text":"

Warning

  • There is no undo for:
    • copy (cp),
    • move (mv), and
    • remove (rm).
  • Beware of overwriting files and deleting the wrong ones.

Tip

    • Within a session: Type in the command prompt

      alias rm='rm -i'

    Tip: make \"rm\" ask if you really want to erase:

    • Override asking with

      rm \u2013f <>

    • Edit file .bashrc in home directory by adding the alias line for this to start everytime.

  • This will also work for mv and cp!

Note

  • If you do destroy your data, email UPPMAX support, we may be able to help.

Keypoints

  • Linux Operating system is a UNIX-like and UNIX compatible Operating system.
  • Typical command: $ program word1 word2 word3 [\u2026]
  • Use text editors to edit files
  • Tips
    • use Tab completion
    • capitalization and spaces matters
    • no undo:s for copying, moving and removing
      • Solution: alias rm='rm -i'
"},{"location":"getting_started/linux/#links","title":"Links","text":"
  • A free online book about Linux: 'The Linux Command Line'.
"},{"location":"getting_started/linux_basics/","title":"Basic toolkit","text":"

Objectives

  • Let's dig into the most important BASH commands
  • We'll do a type-along session
Like videos?

Below usage of the command line is discussed in text. If you prefer video, here is how to use the command-line on the UPPMAX Bianca cluster.

"},{"location":"getting_started/linux_basics/#we-will-cover-these-commands","title":"We will cover these commands","text":""},{"location":"getting_started/linux_basics/#navigation-and-file-management","title":"Navigation and file management","text":"
  1. pwd \u2003 present directory
  2. ls \u2003list content
  3. cd \u2003change directory
  4. mkdir \u2003make directory
  5. cp \u2003copy
  6. scp \u2003securely remotely copy
  7. mv \u2003move
  8. rm \u2003remove
  9. rmdir \u2003remove empty directory
"},{"location":"getting_started/linux_basics/#read-files-and-change-file-properties","title":"Read files and change file properties","text":"
  1. cat \u2003print content on screen
  2. head \u2003print first part
  3. tail \u2003print last part
  4. less \u2003browse content
  5. tar \u2003compress or extract file
  6. chmod \u2003change file permissions
  7. man \u2003info about a command
"},{"location":"getting_started/linux_basics/#file-system-navigation","title":"File system Navigation","text":""},{"location":"getting_started/linux_basics/#pwd-where-are-you-now-print-name-of-currentworking-directory","title":"pwd \u2014 where are you now? \u201cPrint name of current/Working Directory`","text":"
pwd\n\npwd -P\n
  • -P gives you the physical path,
    • ignores how you got there
"},{"location":"getting_started/linux_basics/#ls-list-directory-contents","title":"ls \u2014 list directory contents","text":"

Type ls to display the contents of the current directory.

ls -a\n

-a also shows hidden files and directories.

ls -l\n

-l gives you listed and detailed information.

ls -lt\n

-lt sorts things by time modified.

ls -lrt\n

-r gives reversed order, so in this case newest in last line.

man ls\n
  • for complete information about a command.
  • TIP: -$ man <command> works for almost any command!
    • scroll with arrows and quit with q.
"},{"location":"getting_started/linux_basics/#cd-change-the-shell-working-directory","title":"cd \u2014 Change the shell working Directory","text":"
  • To change directory, use cd <target>

Warning

  • Some of following steps will only be available for the Introduction course members.
  • These involve the `/proj/introtouppmax`` directory
cd /proj/introtouppmax\n\npwd\n\nls\n\ncd labs\n\npwd\n

Challenge

  • Experiment with cd
  • Try adding <spaces> or extra / in various places
  • Use tab completion to avoid typos and typing ls a lot

  • Figure out the use of the following:

$ cd -\n\n$ cd ..\n\n$ cd\n\n$ cd ~\n
Solution
  • cd - : goes back to your last directory

  • cd .. : goes a level up in the hierarchy

  • cd : goes to home directory

  • cd ~ : also goes to home directory

"},{"location":"getting_started/linux_basics/#copy-create-move","title":"Copy, Create, Move","text":""},{"location":"getting_started/linux_basics/#mkdir-make-directories","title":"mkdir \u2014 make directories","text":"

Warning

  • Make sure you\u2019re in your home directory by cd ~
  • Create a new directory uppmax-intro
cd ~\nmkdir uppmax-intro\n
  • Go in there:
cd uppmax-intro/\n
"},{"location":"getting_started/linux_basics/#cp-copy-files-and-directories","title":"cp \u2014 copy files and directories","text":"
  • Copy files with: cp <source> <target>
  • Set target to . to keep name and to point at present directory.
cp /proj/introtouppmax/labs/linux_tutorial/ .\n
  • Well, that didn\u2019t work. What does the error say?
  • So... try
cp -r /proj/introtouppmax/labs/linux_tutorial/ .\n

-r is for recursive, meaning including files and subdirectories!

  • Move to your just created linux_tutorial/
cd linux_tutorial\n
  • Make a copy of the file newfile in the same directory:
cp newfile copyfile\n
"},{"location":"getting_started/linux_basics/#scp-secure-copy-remote-file-copy-program","title":"scp \u2014 secure copy (remote file copy program)","text":"
  • Linux/MacOS: To copy data to/from Rackham, you can use scp from the terminal on your local machine:
"},{"location":"getting_started/linux_basics/#download-from-rackham","title":"Download from Rackham","text":"
  • Download
[bob@macbook]$ scp bob@rackham.uppmax.uu.se:~/mydata copyofmydata\n\n[bob@macbook]$ scp bob@rackham.uppmax.uu.se:~/mydata .                      # (keeping file name)\n

Example

Download the file first.txt

  • In your local terminal:
[bob@macbook]$ scp <username>@rackham.uppmax.uu.se:~/first.txt .                      # (keeping file name)\n
"},{"location":"getting_started/linux_basics/#upload-to-rackham","title":"Upload to Rackham","text":"
  • Upload from present directory on local machine to your home directory on cluster.
    • Example:
[bob@macbook]$ scp myinput bob@rackham.uppmax.uu.se:~/copyofmyinput\n\n[bob@macbook]$ scp myinput bob@rackham.uppmax.uu.se:~/                      # (keeping filename)\n

Example

upload the file first.txt after some modification

  1. Open the file you just downloaded in any editor.
  2. Add a row, like: A new row
  3. Save and quit.
  4. Upload your file but save it as second.txt on Rackham. In your local terminal:
[bob@macbook]$ scp first.txt <username>@rackham.uppmax.uu.se:~/second.txt                     # (new filename)\n

See

  • Rackham file transfer using scp
"},{"location":"getting_started/linux_basics/#mv-moverename-file","title":"mv \u2014 move/rename file","text":"
  • Moving files works just like copying files:
  • mv <source> <target>
  • Move the copy you just made to another place:
mv copyfile ../\n
  • Rename it.
mv ../copyfile ../renamedfile\n
"},{"location":"getting_started/linux_basics/#archiving","title":"Archiving","text":""},{"location":"getting_started/linux_basics/#tar-archiving-and-compression","title":"tar \u2014 archiving and compression","text":"
  • We\u2019re going to need more files. Let's extract the tar.gz file (tared and gzipped file)
tar -vxzf files.tar.gz\n
  • The flags mean: - v*erbosely - extract - gzipped - f**ilename
  • Order of flags may matter!
    • f should be in the start or in the end!
  • You should see a list of files being extracted

Tip

  • To compress use the flag -cinstead of -x
$ tar -czfv <tar file> <path/to/directory/file(s)-or-directory>\n
"},{"location":"getting_started/linux_basics/#deleting","title":"Deleting","text":""},{"location":"getting_started/linux_basics/#rm-delete-files-or-directories","title":"rm \u2014 delete files or directories","text":"

Note

  • Tip: make \"rm\" ask if you really want to erase:
  • Within a session: Type in the command prompt
alias rm='rm -i'\n
  • Override asking with
rm -f <>\n
  • Do you want this to be the case everytime you start a new session?
    • Edit file .bashrc in /home directory by adding the above alias line on any but the first line.
  • These steps will also work for mv and cp.
  • Deleting files works just like copying or moving them: rm <target>

    • Try it out:
rm ../renamedfile\n\nrm this_is_empty\n
  • hmmmm...
"},{"location":"getting_started/linux_basics/#rmdir-delete-an-empty-directory","title":"rmdir \u2014 delete an empty directory","text":"
  • We need another command to delete directories
rmdir this_is_empty\n\nrmdir this_has_a_file\n
  • Problem again??

  • Is there a way to use rm to delete directories?

Solution

  • Recursive commands -r are applied to directories and their contents
$ rm -r this_has_a_file\n
"},{"location":"getting_started/linux_basics/#help","title":"Help","text":""},{"location":"getting_started/linux_basics/#man-manual-look-up-the-right-flags","title":"man \u2014 manual, look up the right flags","text":"
  • Nobody can remember whether it\u2019s -R or -r for recursive, or if -f lets you choose a file or forces an action.
man ls\n
  • shows you how to use ls and all its options
  • Type /<keyword> to search for a keyword, use n (forward) and \u00b4N` (backward) to scan through hits.
  • Scroll with arrows.
  • Type q to quit.

Challenge

  • Spend some time now to browse the man pages for the commands you\u2019ve just learned!
"},{"location":"getting_started/linux_basics/#lets-get-wild-with-wildcards","title":"Let\u2019s get wild with Wildcards","text":"
ls many_files\n\nls many_files/*.txt\n\nls many_files/file_1*1.docx\n
  • Want to clean out temporary files ending in .tmp in all the subdirectories?

Warning

  • It could be wise to do ls -a */*.tmp first to see what will be deleted...
$ rm */*.tmp\n

Challenge

  • Exercise: Create a new directory and move all .txt files in many_files to it.
"},{"location":"getting_started/linux_basics/#reading-files","title":"Reading files","text":"
  • In Linux, you can (if you wish) also display files without being able to change them
cd old_project\n\nls\n
  • Hmm, which of these files are useful?
"},{"location":"getting_started/linux_basics/#cat-concatenate-files-and-print-on-the-standard-output","title":"cat - concatenate files and print on the standard output","text":"
  • cat dumps the contents of files to the terminal as text
cat the_best\n
  • Yummy!
cat a\n
  • What's this????

  • Concatenate files with this wizardry:

cat a the_best > combinedfiles.txt\n
  • File a is written first and the_best is appended
"},{"location":"getting_started/linux_basics/#head-display-the-top-heading-of-a-file","title":"head \u2014 display the top (heading) of a file","text":"
head a\n
  • You can choose how many lines to display (default 10)
head -n 4 a\n
"},{"location":"getting_started/linux_basics/#tail-display-the-end-of-a-file","title":"tail \u2014 display the end of a file","text":"
  • Tail is the same as head, but for the other end.
tail -n 5 a\n
  • Handy to look at log files or to figure out the structure of a text file.
"},{"location":"getting_started/linux_basics/#less-read-a-whole-file","title":"less \u2014 read a whole file","text":"
  • cat doesn\u2019t really work for long files
 less a\n
  • Search with /<keyword> and n/N
  • Hit q to quit.
  • scroll with arrows.
  • man uses `less!

    \u201cless is more`

"},{"location":"getting_started/linux_basics/#history","title":"History","text":"
  • history shows previous commands
  • You can rerun earlier commands by:
    • copy-pasting and pressing <enter>
    • !990 will run the command of line 990 of last history output.
  • Search for earlier commands you just remember parts of:
    • history | grep 'jobstats'
  • More info
"},{"location":"getting_started/linux_basics/#file-permissions","title":"File permissions","text":""},{"location":"getting_started/linux_basics/#example","title":"Example","text":"
$ ls -l\n\ndrwxrwxr-x 2 marcusl marcusl 4096 Sep 19 2012 external_hdd\n-rwxr-xr-x 1 marcusl marcusl 17198 Jul 16 14:12 files.tar.gz\n
  • Leading symbol:
    • d directory
    • - regular file
    • l symbolic link (more on this tomorrow)
    • Others exist, but you can ignore them for now
$ ls -l\n\n  drwxrwxr-x 2 marcusl marcusl 4096 Sep 19 2012 external_hdd\n\n  -rwxr-xr-x 1 marcusl marcusl 17198 Jul 16 14:12 files.tar.gz\n
  • Three sets of \u201crwx` permissions

    • rwx: r ead, w rite, ex ecute
    • User: the user account that owns the file (usually the one that created it)
    • Group: the group that owns the file (usually the project group in /proj/xyz or the user\u2019s group elsewhere)
    • Others: everyone else on the system (literally a thousand strangers)
  • r - read

    • Files: Read the contents of the file
    • Directories: List the files in the directory
  • w - write

    • Files: Modify the file
    • Directories: Add, rename, or delete files in the directory
  • x - execute

    • Files: Run the file as a program
    • Directories: Traverse the directory (e.g. with \u201ccd`)
"},{"location":"getting_started/linux_basics/#changing-permissions","title":"Changing permissions","text":"

chmod \u2014 change file mode bits

If you own, i.e. created, the file or directory, you can modify the content.

Common issues

  • Files with w can be modified and destroyed by accident. Protect your data!
  • If you want to share data or scripts with a person not in your project (e.g. support staff like me), you can!
  • If you want to keep non-members from even seeing which files you have, you can!
"},{"location":"getting_started/linux_basics/#syntax","title":"Syntax","text":"

chmod <mode> <files>

  • <mode> is of the form: For whom, Modify, What permission(s)
  • For whom?
    • u: user/owner
    • g: group, often the members to a certain project
    • o: others
    • a: all
    • if not set changes are applied for user AND group
  • Modify?
    • +: add permissions,
    • -: remove
    • =: set equal to
      • = usually causes unmentioned bits to be removed except that a directory's unmentioned set user and group ID bits are not affected.
  • What permissions?
    • r, w, x, i.e. the actual permission
"},{"location":"getting_started/linux_basics/#examples","title":"Examples","text":"
  • <mode> can be e.g.:

    • u+x : lets You (owner) run a script you just wrote
    • -w : no write permissions for owner+group
      • warning: if w was already set for others it will be kept!!
    • +rw : let user and group members read and edit this file, not others if not already set
    • =xw : let group members go into your directory and put files there, but not see which files are there, others are not affected
    • a=xw : set xw for everyone
  • chmod takes flags as usual, e.g.

    • -R for recursive (i.e. all files and sub-directories therein)

chmod 755 style \u2014 binary sum \u2014 octal bit mask

  • Online, you will come across e.g. chmod 755 <file/dir>. What does this mean? It\u2019s an \"octal bit mask`:

  • Each digit corresponds to the binary sum for the owner, group and others, respectively.

    • 7 = 4 + 2 + 1 = r + w + x All permissions
    • 5 = 4 + 0 + 1 = r + + x Read and execute permission
  • 755 then means all permissions for owner, but limiting write permissions for the group and all others

  • What number would rw be?

Solution

6

chmod \u2014 Hands-on
  • In your locally created linux_tutorial directory, find important files and old saved data that you wouldn\u2019t want to lose (imagine).
  • Directories: important_results/, old_project/
  • File: last_years_data
  • Use chmod to remove write permission from those files and directories (use the -R flag (not -r) to also do the files in the directories).
  • Take a moment to play around with chmod and explore the effects of permissions on files and directories.
Solution
$ chmod -wR <target>\n
"},{"location":"getting_started/linux_basics/#links","title":"Links","text":"
  • A free online book about Linux: 'The Linux Command Line'.
"},{"location":"getting_started/login/","title":"Log in","text":"

One needs to log in into an UPPMAX cluster to use it.

There are two environments one can login to:

  • a remote desktop environment
    • using a webbrowser
    • using a local ThinLinc client
  • a console environment, using an SSH client

The two environments to work on Bianca. At the left is a remote desktop environment. At the the right is the console environment.

Because logging in differs between clusters, each cluster has its own login page:

  • Login to Bianca
  • Login to Rackham
  • Login to Snowy

Go to those pages for more details.

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

Other things to log in to, shown for completeness:

  • Login to Dardel (this is not an UPPMAX cluster)
  • Login to Transit (this is an UPPMAX service, not a cluster)
","tags":["login","log in","general"]},{"location":"getting_started/login_bianca/","title":"Log in to Bianca","text":"

The two Bianca environments to work on Bianca. At the left is a remote desktop environment. At the the right is the console environment.

There are multiple UPPMAX clusters one can log in to. Here it is described how to log in to Bianca:

","tags":["login","log in","Bianca"]},{"location":"getting_started/login_bianca/#which-way-to-log-in-to-bianca","title":"Which way to log in to Bianca","text":"

After you've fulfilled all prerequisites for using Bianca, there are many ways to log in to Bianca.

Here is the decision tree, with more detailed explanation below it:

flowchart TD\n  in_sunet(A.Can you get inside the university networks?)\n  need_remote_desktop(B.Need/prefer a remote desktop?)\n  how_login(C.How to log in?)\n  need_remote_desktop_no_sunet(B.Need/prefer a remote desktop?)\n  how_login_no_sunet(C.How to log in?)\n\n  use_website[1.Use the Bianca remote desktop website]\n  use_password[2.Use a terminal and password to access Bianca directly]\n  use_ssh_keys[3.Use a terminal and SSH keys to access Bianca directly]\n\n  use_website_no_sunet[4.Use the Rackham remote desktop website]\n  use_password_no_sunet[5.Use a terminal and password via Rackham]\n  use_ssh_keys_no_sunet[Use a terminal and SSH keys via Rackham]\n\n  in_sunet --> |yes| need_remote_desktop\n\n  need_remote_desktop --> |no| how_login\n  need_remote_desktop --> |yes| use_website\n\n  how_login --> |Using a password| use_password\n  how_login --> |Using SSH keys| use_ssh_keys\n\n  in_sunet --> |no| need_remote_desktop_no_sunet\n\n  need_remote_desktop_no_sunet --> |no| how_login_no_sunet\n  need_remote_desktop_no_sunet --> |yes| use_website_no_sunet\n\n  how_login_no_sunet --> |Using a password| use_password_no_sunet\n  how_login_no_sunet --> |Using SSH keys| use_ssh_keys_no_sunet

Decision tree on how to log in to Bianca

Question A, 'Can you get inside the university networks?' is commonly answered 'yes' for anyone with an email address at a university in Sweden. The UPPMAX documentation on how to get inside the university networks should allow anyone to do so.

Question B, 'Need/prefer a remote desktop?' is about if you prefer a visual/graphical environment to work with Bianca, which will be similar to what most of us are used to. A 'yes' is more suitable for new users, although it is considered a more clunky (it responds slower on user input) and clumsy (copy-pasting to it needs multiple mouse clicks) way to work. A 'no' is more suitable for users comfortable with a terminal and works smoothly.

How does the Bianca remote desktop look like?

One can pick multiple remote desktop environments, such as GNOME and XFCE (and KDE, don't pick KDE!).

The Bianca XFCE remote desktop environment

A more populated Bianca XFCE remote desktop

  • A remote desktop environment, also called 'graphical environment', 'GUI environment', 'ThinLinc environment'
How does the Bianca console environment look like?

The Bianca console environment

  • A console environment, also called 'terminal environment' or 'terminal'

Question C, 'How to log in?' is about how you prefer to login. The option 'Using a password' is more suitable for new users, as it is easy to setup and understand. However, one does need to type his/her password every time one logs in. 'Using SSH keys' is harder to setup, yet more convenient.

Will a local ThinLinc client work too?

No.

One really can only access the Bianca remote desktop environment via the website.

Here are the ways to log in to Bianca:

  • 1.Use the Bianca remote desktop website
  • 2.Use a terminal and password to access Bianca directly
  • 3.Use a terminal and SSH keys to access Bianca directly
  • (not recommended) 4.Use the Rackham remote desktop website to log in to Bianca's remote desktop environment
  • (not recommended) 5.Use a terminal and password via Rackham to log in to Bianca's console environment
  • (not recommended) 6.Use a terminal and SSH keys via Rackham to log in to Bianca's console environment

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

","tags":["login","log in","Bianca"]},{"location":"getting_started/login_bianca_console_password/","title":"Login to the Bianca console environment with a password","text":"

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a terminal and a password:

  • Procedure: describes the procedure
  • Troubleshooting: describes how to fix errors
","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#procedure","title":"Procedure","text":"Video: how to use a terminal and SSH to access the Bianca console environment

This video shows how to use a terminal and SSH to access the Bianca console environment: YouTube

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#1-get-inside-the-university-networks","title":"1. Get inside the university networks","text":"

Get inside the university networks.

Forgot how to get within the university networks?

See the 'get inside the university networks' page here

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#2-use-ssh-to-log-in","title":"2. Use ssh to log in","text":"

From a terminal, use ssh to log in:

ssh [user]-[project name]@bianca.uppmax.uu.se\n

For example:

ssh sven-sens2023598@bianca.uppmax.uu.se\n
How does this look like (when inside of SUNET)?
ssh sven-sens2023598@bianca.uppmax.uu.se\n

After which a password will be asked. Go to the next step.

How does it look like when outside of SUNET?
$ ssh sven-sens2023598@bianca.uppmax.uu.se\n

After which there is only waiting...

Why no -A?

On Bianca, one can use -A:

ssh -A username@bianca.uppmax.uu.se\n

this option is only useful when you want to log in to Bianca via the console using an SSH key. As we here use passwords (i.e. no SSH keys) to access Bianca, -A is unused and hence we simplify this documentation by omitting it.

Why no -X?

On Rackham, one can use -X:

ssh -X username@rackham.uppmax.uu.se\n

However, on Bianca, this so-called X forwarding is disabled. Hence, we do not teach it :-)

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#3-type-your-uppmax-password-with-2fa","title":"3. Type your UPPMAX password with 2FA","text":"

Type your UPPMAX password, directly followed by the UPPMAX 2-factor authentication number, for example verysecret678123, then press enter. In this case, the password is verysecret and 678123 is the 2FA number.

How does this look like?
sven@sven-N141CU:~/GitHubs/UPPMAX-documentation/docs/getting_started$ ssh sven-sens2023598@bianca.uppmax.uu.se\nsven-sens2023598@bianca.uppmax.uu.se's password: \n

After which you'll asked for another password. Go to the next step.

After authenticated using the UPPMAX password and 2FA, you are logged in on Bianca's shared network, on a so-called 'jumphost'.

However, you will still need to login to your own private virtual project cluster. As you are already properly authenticated (i.e. using an UPPMAX password and UPPMAX 2FA), you don't need 2FA anymore.

What is a virtual project cluster?

As Bianca holds sensitive data, by regulations, each Bianca project must be isolated from each other and are not allowed to, for example, share the same memory.

One way to achieve this, would be to build one HPC cluster per project. While this would guarantee isolated project environments, this would be quite impractical.

Instead, we create isolated project environments by using software, that creates so-called virtual clusters, as if they would be physical clusters. Like physical clusters, a virtual cluster has a guaranteed isolated project environment.

When you login to Bianca's shared network, you will get a message of your project's login node status. It can be up and running or down. If it is down, the virtual cluster is started, which may take some minutes.

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#4-type-your-uppmax-password","title":"4. Type your UPPMAX password","text":"

Type your UPPMAX password, for example verysecret

How does this look like?
Last login: Thu Sep 19 08:54:12 2024 from vpnpool188-186.anst.uu.se\nNotice(s) for upcoming maintenance on bianca:\n\n  Lost e-mail support tickets, working again\n  http://status.uppmax.uu.se/2024-09-19/lost-tickets/\n\n****************************************************************************\n* Login node up and running. Redirecting now!                              *\n* Notice! No second factor if you use password.                            *\n* If you use ssh keys, you can get rid of this (second) prompt.            *\n****************************************************************************\n\nsven@sens2023598-bianca.uppmax.uu.se's password: \n
","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#5-you-are-in","title":"5. You are in","text":"

Enjoy! You are in! Or, to be precise, you are on the login node of your own virtual project cluster.

How does this look like?

```bash _ _ _ __ __ _ __ __ | | | | _ | _ | \\/ | / \\/ / | System: sens2023598-bianca | | | | |) | |) | |\\/| | / _ / | User: sven | || | /| /| | | |/ ___ / | ___/|| || || |// _\\//_ |

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

By default, this node has one core, hence if you need more memory or more CPU power, you submit a job (interactive or batch), and an idle node will be moved into your project cluster.

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#_1","title":"Login to the Bianca console environment with a password","text":"
    User Guides: http://www.uppmax.uu.se/support/user-guides\n    FAQ: http://www.uppmax.uu.se/support/faq\n\n    Write to support@uppmax.uu.se, if you have questions or comments.\n

````

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#troubleshooting","title":"Troubleshooting","text":"

Here are some common errors and their solutions:

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password/#permission-denied-please-try-again","title":"Permission denied, please try again","text":"
Permission denied, please try again.\n

Here are the questions we will ask to solve your problem:

flowchart TD\n    error[Permission denied, please try again.]\n    correct_password[Is your password correct?]\n    added_2fa[Have you added a 2FA number at the end of your password?]\n    added_correct_2fa[Have you added the correct 2FA number at the end of your password?]\n    in_sunet[Are you within the university networks?]\n    active_bianca_project[Is that Bianca project active?]\n    member_of_bianca_project[Are you a member of that Bianca project]\n    contact_support[Contact support]\n\n    error --> correct_password\n    error --> in_sunet\n\n    in_sunet --> |yes| active_bianca_project\n\n    correct_password --> |yes| added_2fa\n    added_2fa --> |yes| added_correct_2fa\n    active_bianca_project -->  |yes| member_of_bianca_project\n    member_of_bianca_project --> |yes| contact_support\n    added_correct_2fa --> |yes| contact_support
How do I know my password is correct?

You don't.

It could be a typo: you don't see your password when you type (this is a security measure), so a typo is likely to occur. Also check if 'Caps Lock' is off.

It could be that you've forgotten your password. That can happen to all of us. You can then reset your password at https://suprintegration.uppmax.uu.se/getpasswd

What do you mean 'Have you added a 2FA number at the end of your password?'?

When you type your password, this needs to be followed by a two-factor authentication number.

For example, if your password is verysecret and 314159 is the 2FA number, you should type verysecret314159

What is the correct 2FA number?

The UPPMAX one, titled [username]@UPPMAX, for example sven@UPPMAX.

When using UPPMAX, one needs to create other 2FAs too, such as for SUPR or the Uppsala VPN. Don't use those numbers to login to Bianca.

How do I know if I am within the university networks?

If you login via eduroam you are within the university networks.

When unsure, go to the Bianca remote desktop website at https://bianca.uppmax.uu.se: if this page does not load, you are outside of the university networks.

See How to get inside of the university networks if you outside of the university networks.

How do I know if the Bianca project is active?

A quick way to confirm your Bianca project is active: go to https://bianca.uppmax.uu.se and type your username. If the project is displayed, it is active.

To confirm your project is active or inactive, use the SUPR NAISS website. See the UPPMAX documentation on projects how to see if your project is active?

How do I know if I am a member of the Bianca project?

A quick way to confirm you are a member of the Bianca project: go to https://bianca.uppmax.uu.se and type your username. If the project is displayed, you are a member of the Bianca project.

To confirm your project is active or inactive, use the SUPR NAISS website. See the UPPMAX documentation on projects how to see which projects you are a member of.

See the UPPMAX page on contacting support on how to contact us.

","tags":["login","log in","Bianca","console","terminal","password"]},{"location":"getting_started/login_bianca_console_password_no_sunet/","title":"Login to the Bianca console environment with a password from outside of the Swedish university networks","text":"

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a terminal and a password from outside of the Swedish university networks.

","tags":["login","log in","Bianca","console","terminal","password","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_password_no_sunet/#procedure","title":"Procedure","text":"","tags":["login","log in","Bianca","console","terminal","password","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_password_no_sunet/#1-log-in-to-rackhams-console-environment","title":"1. Log in to Rackham's console environment","text":"

See the UPPMAX documentation on how to log in to Rackham how to do so.

","tags":["login","log in","Bianca","console","terminal","password","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_password_no_sunet/#2-from-rackham-log-in-to-bianca","title":"2. From Rackham, log in to Bianca","text":"

From Rackham (which is inside of the university networks), log in to Bianca. See the UPPMAX documentation on how to log in to Bianca how to do so.

","tags":["login","log in","Bianca","console","terminal","password","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_ssh_key/","title":"Login to the Bianca console environment using SSH keys","text":"

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a terminal and an SSH key pair.

","tags":["login","log in","Bianca","console","terminal","SSH"]},{"location":"getting_started/login_bianca_console_ssh_key/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

When inside SUNET, one can access a Bianca console environment using SSH and SSH keys.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

This is considered a bit harder to setup, but one only needs to type one password to login to Bianca. If you don't mind typing your UPPMAX password twice, an easier setup is log in to the Bianca console environment with a password.

","tags":["login","log in","Bianca","console","terminal","SSH"]},{"location":"getting_started/login_bianca_console_ssh_key/#2-use-ssh-to-log-in","title":"2. Use ssh to log in","text":"

From a terminal, use ssh to log in:

ssh -A [user]-[project name]@bianca.uppmax.uu.se\n

For example:

ssh -A sven-sens2023598@bianca.uppmax.uu.se\n
How does it look like when outside of SUNET?

Here you can see how this looks like when outside of SUNET.

Spoiler: quite dull, as nothing happens until these is a timeout.

Why no -X?

On Rackham, one can use -X:

ssh -X username@rackham.uppmax.uu.se\n

However, on Bianca, this so-called X forwarding is disabled. Hence, we do not teach it :-)

","tags":["login","log in","Bianca","console","terminal","SSH"]},{"location":"getting_started/login_bianca_console_ssh_key/#3-type-your-uppmax-password-and-2fa","title":"3. Type your UPPMAX password and 2FA","text":"

Type your UPPMAX password, directly followed by the UPPMAX 2-factor authentication number, for example verysecret678123, then press enter. In this case, the password is verysecret and 678123 is the 2FA number.

","tags":["login","log in","Bianca","console","terminal","SSH"]},{"location":"getting_started/login_bianca_console_ssh_key/#4-you-are-in","title":"4. You are in","text":"

Enjoy! You are in! To be precise, you are on a Bianca login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

In a Bianca console environment:

  • Text display is limited to 50kBit/s. This means that if you create a lot of text output, you will have to wait some time before you get your prompt back.
  • Cut, copy and paste work as usual. Be careful to not copy-paste sensitive data!
Why does one need two passwords?

The first password is needed to get into the shared Bianca environment. This password contains both an UPPMAX password and an UPPMAX 2FA number.

The second password is needed to go to the login node of a project's virtual cluster.

flowchart TD\n\n    %% Give a white background, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n    classDef focus_node fill:#fff,color:#000,stroke:#000,stroke-width:4px\n\n    subgraph sub_bianca_shared_env[Bianca shared network]\n      bianca_shared_console[Bianca console environment login]\n      bianca_shared_remote_desktop[Bianca remote desktop login]\n      subgraph sub_bianca_private_env[The project's private virtual project cluster]\n        bianca_private_console[Bianca console environment]\n        bianca_private_remote_desktop[Bianca remote desktop]\n        bianca_private_terminal[Terminal]\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    %% style sub_inside fill:#fcc,color:#000,stroke:#fcc\n    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc\n    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc\n\n    %% Shared Bianca\n    bianca_shared_console --> |UPPMAX password|bianca_private_console\n    bianca_shared_remote_desktop-->|UPPMAX password|bianca_private_remote_desktop\n\n    %% Private Bianca\n    bianca_private_console---|is a|bianca_private_terminal\n    bianca_private_remote_desktop-->|must also use|bianca_private_terminal
","tags":["login","log in","Bianca","console","terminal","SSH"]},{"location":"getting_started/login_bianca_console_ssh_key_no_sunet/","title":"Login to the Bianca console environment using SSH keys from outside of the Swedish university networks","text":"

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a terminal and an SSH key pair from outside of the Swedish university networks.

","tags":["login","log in","Bianca","console","terminal","SSH","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_ssh_key_no_sunet/#procedure","title":"Procedure","text":"","tags":["login","log in","Bianca","console","terminal","SSH","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_ssh_key_no_sunet/#1-log-in-to-rackhams-console-environment","title":"1. Log in to Rackham's console environment","text":"

See the UPPMAX documentation on how to log in to Rackham how to do so.

","tags":["login","log in","Bianca","console","terminal","SSH","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_console_ssh_key_no_sunet/#2-from-rackham-log-in-to-bianca","title":"2. From Rackham, log in to Bianca","text":"

From Rackham (which is inside of the university networks), log in to Bianca. See the UPPMAX documentation on how to log in to Bianca how to do so.

","tags":["login","log in","Bianca","console","terminal","SSH","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_local_thinlinc_client/","title":"Login to the Bianca remote desktop environment via a ThinLinc client","text":"

There are multiple ways to log in to Bianca. Accessing Bianca's remote desktop environment via a local ThinLinc client, however, is impossible: one can only access Bianca remote desktop environment via a website.

This page is here for UPPMAX staff only (no page links to this one), so they remember this is impossible. See this Issue for a dialog.

"},{"location":"getting_started/login_bianca_remote_desktop_website/","title":"Log in to the Bianca remote desktop environment website","text":"

The Bianca remote desktop environment

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a remote desktop that is accessible from a webbrowser.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#procedure","title":"Procedure","text":"Prefer a video?

See this page explained in a YouTube video here

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

As Bianca is an HPC cluster for sensitive data, one needs to be within SUNET to be able to access her.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

Bianca does not support any so-called X forwarding (unlike Rackham), so instead UPPMAX maintains a website that uses ThinLinc to get a full remote desktop environment. All you should need is a rather modern browser on any platform.

How does it look like to try to access a remote desktop from outside of SUNET?

When accessing the Bianca UPPMAX login website from outside of SUNET, nothing will appear in your browser.

You can see it in action in this video you can see how this looks like when outside of SUNET.

It looks quite dull, as nothing happens until these is a timeout.

Will a local ThinLinc client work too?

No.

One really can only access Bianca remote desktop environment via a website

When inside SUNET, one can access a remote desktop environment using a website:

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#2-go-to-httpsbiancauppmaxuuse","title":"2. Go to https://bianca.uppmax.uu.se","text":"

When inside SUNET, in your web browser, go to https://bianca.uppmax.uu.se.

How does it look like when outside of SUNET?

When accessing the Bianca UPPMAX login website from outside of SUNET, nothing will appear in your browser.

You can see it in action in this video you can see how this looks like when outside of SUNET.

It looks quite dull, as nothing happens until these is a timeout.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#3-fill-in-the-first-dialog","title":"3. Fill in the first dialog","text":"

Fill in the first dialog.

Do use the UPPMAX 2-factor authentication (i.e. not SUPR!)

How do I setup 2-factor authentication?

See the guide at 2-factor authentication to setup an UPPMAX 2-factor authentication method.

You really need to use the UPPMAX 2-factor authentication, i.e not the SUPR one, to login to Bianca.

Screenshot of a two-factor authentication app. Use the 2-factor authentication called 'UPPMAX' to access Bianca

How does that web page look like?

The first page of https://bianca.uppmax.uu.se

Sometimes a webpage will be shown that asks you to wait. Simply do that :-)

How does that web page look like?

No c Web Access active The login node for your project cluster is probably asleep. Boot initiated. The startup can take from 2 to 8 minutes.

This page will attempt to automatically reload. If nothing happens even after multiple minutes, you can do so manually. It is a bit more controlled in text mode.

When this takes long, your original second factor code might expire. In that scenario, you'll be redirected to the first login page again.

This is the webpage that is shown when a login node needs to be created.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#4-fill-in-the-second-dialog-using-your-regular-password","title":"4. Fill in the second dialog, using your regular password","text":"

Fill in the second dialog, using your regular password (i.e. no need for two-factor authentication).

How does that web page look like?

The second Bianca remote desktop login dialog. Note that it uses ThinLinc to establish this connection

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#5-picking-a-remote-desktop-flavor-but-not-kde","title":"5. Picking a remote desktop flavor, but not KDE","text":"

When picking a remote desktop flavor, pick GNOME or XFCE, avoid picking KDE.

How does that look like?

Here you are told you will need to pick a remote desktop flavor

Here you are asked to pick a remote desktop flavor, with Xfce as the default. Pick any, except KDE.

Avoid choosing KDE

Avoid choosing the KDE desktop, as it gives problems when running interactive sessions.

Instead, we recommend GNOME or XFCE.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#6-you-are-in","title":"6. You are in","text":"

Enjoy! You are in: you are now on a Bianca login node.

How do I copy-paste text?

The Bianca remote desktop environment via a website uses ThinLinc.

At the ThinLinc page you can find how to work with its interface.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

What is the difference between 'disconnect session' and 'end session'?

'disconnect session' will save the current state of your session. When you connect again, you will get the remote desktop back in exactly in the same place you left the system. For example: if you were editing a file before disconnecting, your prompt will be in the same place you left it.

'end session' will not save the current state of your session. Instead, you will start with a clean slate at the next login.

Bianca has a automatically disconnect after 30 minutes of inactivity. In the future it is possible that we implement some kind of \"automatic log out from active graphical session\".

flowchart TD\n\n    subgraph sub_inside[IP inside SUNET]\n\n      user(User)\n\n      subgraph sub_bianca_shared_env[Bianca shared network]\n        bianca_shared_remote_desktop[Bianca remote desktop login]\n        subgraph sub_bianca_private_env[The project's private virtual project cluster]\n          bianca_private_remote_desktop[Bianca remote desktop]\n\n          %% Ensure the innermost square gets big enough\n          END:::hidden\n        end\n      end\n    end\n\n    %% Inside SUNET\n    user-->|Bianca website, UPPMAX password and 2FA|bianca_shared_remote_desktop\n\n    bianca_shared_remote_desktop --> |UPPMAX password| bianca_private_remote_desktop
","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#troubleshooting","title":"Troubleshooting","text":"","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#access-denied","title":"Access denied","text":"How does that look like?

Contact support.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website/#authentication-failed","title":"Authentication failed","text":"How does that look like?

Contact support.

","tags":["login","log in","Bianca","remote desktop","website","URL","in","inside","within","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website_no_sunet/","title":"Log in to the Bianca remote desktop environment website from outside of the Swedish university networks","text":"

The Bianca remote desktop environment via the Rackham remote desktop environment

There are multiple ways to log in to Bianca.

This page describes how to log in to Bianca using a remote desktop that is accessible from a webbrowser from outside of the Swedish university networks.

","tags":["login","log in","Bianca","remote desktop","website","URL","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website_no_sunet/#procedure","title":"Procedure","text":"Prefer a video?

See this page explained in a YouTube video here

","tags":["login","log in","Bianca","remote desktop","website","URL","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website_no_sunet/#1-go-to-httpsrackham-guiuppmaxuuse","title":"1. Go to https://rackham-gui.uppmax.uu.se","text":"

In a webbrowser, go to https://rackham-gui.uppmax.uu.se.

  • In the first field, fill in your UPPMAX username, e.g. sven
  • In the second field, fill in your UPPMAX password (e.g. password and your UPPMAX 2FA (e.g. 123456) together, without a space (e.g. `password123456)
How does that page look like?

After login, you will be on the Rackham remote desktop environment.

","tags":["login","log in","Bianca","remote desktop","website","URL","out","outside","SUNET","university networks"]},{"location":"getting_started/login_bianca_remote_desktop_website_no_sunet/#2-log-in-to-the-bianca-remote-desktop-environment-website","title":"2. Log in to the Bianca remote desktop environment website","text":"

In the web browser of the Rackham remote desktop environment (which is inside the university networks), follow the steps at how to log in to the Bianca remote desktop environment website.

","tags":["login","log in","Bianca","remote desktop","website","URL","out","outside","SUNET","university networks"]},{"location":"getting_started/login_dardel/","title":"Log in to Dardel","text":"

There are multiple clusters one can log in to. Here it is described how to login to Dardel.

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#procedure","title":"Procedure","text":"Prefer a video?

Go to a YouTube video on how to log in to Dardel to view what to do from step 3 and onwards.

First, we are here to help. Please contact support if you run into problems when trying the guide below.

Note that step 1 requires some hours of waiting and step 2 requires an overnight wait.

flowchart TD\n  get_supr_project[1.Access to a SUPR project with Dardel]\n  get_pdc_account[2.Access to a PDC account]\n  create_ssh_key[3.Create temporary SSH keys]\n  add_ssh_key[4.Add the SSH keys to the PDC Login Portal]\n  login[5. Login]\n\n  get_supr_project --> |needed for| get_pdc_account\n\n  create_ssh_key --> |needed for| add_ssh_key\n  get_pdc_account --> |needed for| add_ssh_key\n  add_ssh_key --> |needed for| login

Overview of the migration process. Note that step 1 requires some hours of waiting and step 2 requires an overnight wait.

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#1-get-access-to-a-supr-project-with-dardel","title":"1. Get access to a SUPR project with Dardel","text":"

First step is to get get access to a SUPR project with Dardel. This is described at PDC's page on getting access to Dardel. You will get an email when you are added to a project, this can take some hours.

How do I know I have access to a Dardel project?

Login to https://supr.naiss.se/. If there is a PDC project, you may have access to a project with Dardel.

Example user that has access to a PDC project

If you may a PDC project that does not use Dardel, click on the project to go the the project overview.

Example PDC project overview

From there, scroll down to 'Resources'. If you see 'Dardel' among the compute resources, you have confirmed you have access to a Dardel project.

Resources from an example PDC project

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#2-get-a-pdc-account-via-supr","title":"2. Get a PDC account via SUPR","text":"

Get a PDC account via SUPR. This is described at the PDC page on getting access. You will get a PDC account overnight.

How do I know I have a PDC account?

Login to https://supr.naiss.se/. and click on 'Accounts' in the main menu bar at the left.

If you see 'Dardel' among the resources, and status 'Enabled' in the same row, you have a PDC account!

Example of a user having an account at PDC's Dardel HPC cluster

How do I find out my PDC username?

In the PDC login portal, after logging in, you can see your Dardel username in the top-right corner:

Example screenshot of the PDC login portal. The Dardel username of this user is svenbi

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#3-create-ssh-key-pair","title":"3. Create SSH key pair","text":"

Create SSH key and add it to the PDC Login Portal.

  • Create the password less SSH key in a Linux terminal (e.g. from Rackham):
module load darsync\n
darsync sshkey\n
","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#4-add-the-public-key-to-the-pdc-login-portal","title":"4. Add the public key to the PDC Login Portal","text":"

When creating the SSH key pair, darsync will already display the public key.

If, however, you missed it, you can view the public SSH key again; in a terminal logged into Rackham:

cat ~/id_ed25519_pdc.pub\n
How does that look like?

The text will look similar to this:

ssh-ed25519 AAAA69Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se\n

Open the PDC Login Portal.

Follow our step-by-step instructions on how to add SSH keys.

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_dardel/#5-login","title":"5. Login","text":"
  • the PDC documentation on 'SSH login'

On a terminal, do:

ssh -X [username]@dardel.pdc.kth.se\n

where [username] is your PDC username, for example ssh -X sven@dardel.pdc.kth.se.

Why the -X?

The -X is for so-called X forwarding. It allows you to view graphical things, such as viewing plots or running graphical programs

","tags":["login","log in","Dardel"]},{"location":"getting_started/login_rackham/","title":"Log in to Rackham","text":"

There are multiple UPPMAX clusters one can log in to. Here we describe how to log in to Rackham.

  • Prerequisites describes what is needed before one can access Rackham
  • Which way to login?
    • Website
    • Terminal
    • Local ThinLinc client
","tags":["login","log in","Rackham"]},{"location":"getting_started/login_rackham/#which-way-to-login","title":"Which way to login?","text":"

There are multiple ways to log in to Rackham:

Login Description Screenshot Website Remote desktop, no installation needed, slow Terminal Console environment, recommended Local ThinLinc client Remote desktop, recommended, need installation

Here is a decision tree, to determine which way to log in:

flowchart TD\n  need_gui(Need to run a graphical program?)\n  use_terminal[Use a terminal]\n  use_website[Use the remote desktop website]\n  need_easy_or_speedy(Need easiest or fastest?)\n  use_local[Use a local ThinLinc client]\n\n  need_gui --> |no| use_terminal\n  need_gui --> |yes| need_easy_or_speedy\n  need_easy_or_speedy --> |easiest| use_website\n  need_easy_or_speedy --> |fastest| use_local\n\n  how_login(How to log in?)\n\n  use_password[Use password. Start here]\n  use_ssh_keys[Use SSH keys. No more password needed]\n\n  use_terminal --> how_login\n  how_login --> use_password\n  how_login --> use_ssh_keys

The procedures can be found at:

  • Login to the Rackham remote desktop environment using the website
  • Login to the Rackham console environment with a password. If you want to get rid of using a password every time, see login to the Rackham console environment with an SSH key
  • Login to the Rackham remote desktop environment using a local ThinLinc client

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

","tags":["login","log in","Rackham"]},{"location":"getting_started/login_rackham_console_password/","title":"Login to the Rackham console environment with a password","text":"

There are multiple ways to log in to Rackham. This page describes how to do so using a terminal and a password.

If you want to get rid of using a password every time, see login to the Rackham console environment with an SSH key.

","tags":["login","log in","Rackham","console","terminal","password","ssh","SSH"]},{"location":"getting_started/login_rackham_console_password/#procedure","title":"Procedure","text":"Prefer a video?

This procedure is also shown by this YouTube video.

","tags":["login","log in","Rackham","console","terminal","password","ssh","SSH"]},{"location":"getting_started/login_rackham_console_password/#1-use-ssh-to-log-in","title":"1. Use ssh to log in","text":"

From a terminal, use ssh to log in:

ssh -X [username]@rackham.uppmax.uu.se\n

[username] is your UPPMAX username, for example, sven, resulting in:

ssh -X sven@rackham.uppmax.uu.se\n

-X enables so-called X forwarding, which allows you to run programs that require light graphics, such as eog to display an image.

Can I log in without -X?

Yes!

If you do not need X forwarding to run programs that require light graphics, omitting the -X is just fine.

Why no -A?

On Rackham, one can use -A:

ssh -A username@rackham.uppmax.uu.se\n

this option is only useful when you want to log in to Rackham via the console using an SSH key. As we here use passwords (i.e. no SSH keys) to access Rackham, -A is unused and hence we simplify this documentation by omitting it.

","tags":["login","log in","Rackham","console","terminal","password","ssh","SSH"]},{"location":"getting_started/login_rackham_console_password/#2-type-your-uppmax-password","title":"2. Type your UPPMAX password","text":"

Type your UPPMAX password and press enter. You will see no asterisks to indicate how many characters you've typed in.

If you are outside the university networks you will be asked for your UPPMAX 2-factor authentication number.

","tags":["login","log in","Rackham","console","terminal","password","ssh","SSH"]},{"location":"getting_started/login_rackham_console_password/#3-you-are-in","title":"3. You are in","text":"

Enjoy! You are in! Or, to be precise, you are in your home folder on a Rackham login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

","tags":["login","log in","Rackham","console","terminal","password","ssh","SSH"]},{"location":"getting_started/login_rackham_console_ssh_key/","title":"Login to the Rackham console environment using SSH keys","text":"

There are multiple ways to log in to Rackham.

This page describes how to log in to Rackham using a terminal and an SSH key pair.

","tags":["login","log in","Rackham","console","terminal","SSH"]},{"location":"getting_started/login_rackham_console_ssh_key/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

When inside SUNET, one can access a Rackham console environment using SSH and SSH keys.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

This is considered a bit harder to setup, but one only needs to type one password to login to Rackham. If you don't mind typing your UPPMAX password twice, an easier setup is log in to the Rackham console environment with a password.

","tags":["login","log in","Rackham","console","terminal","SSH"]},{"location":"getting_started/login_rackham_console_ssh_key/#2-use-ssh-to-log-in","title":"2. Use ssh to log in","text":"

From a terminal, use ssh to log in:

ssh -AX [user]@rackham.uppmax.uu.se\n

For example:

ssh -AX sven@rackham.uppmax.uu.se\n
","tags":["login","log in","Rackham","console","terminal","SSH"]},{"location":"getting_started/login_rackham_console_ssh_key/#3-type-your-uppmax-password","title":"3. Type your UPPMAX password","text":"

Type your UPPMAX password.

","tags":["login","log in","Rackham","console","terminal","SSH"]},{"location":"getting_started/login_rackham_console_ssh_key/#4-you-are-in","title":"4. You are in","text":"

Enjoy! You are in! To be precise, you are on a Rackham login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

In a Rackham console environment:

  • Text display is limited to 50kBit/s. This means that if you create a lot of text output, you will have to wait some time before you get your prompt back.
  • Cut, copy and paste work as usual. Be careful to not copy-paste sensitive data!
","tags":["login","log in","Rackham","console","terminal","SSH"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/","title":"Log in to Rackham's remote desktop environment using a local ThinLinc client","text":"

There are multiple ways to log in to Rackham. This page described how to log in to its remote desktop environment via a local ThinLinc client.

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#procedure","title":"Procedure","text":"Prefer a video?

This procedure is also shown by this YouTube video.

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#1-install-thinlinc","title":"1. Install ThinLinc","text":"

Install ThinLinc. For help, see the UPPMAX page on ThinLinc.

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#2-start-thinlinc","title":"2. Start ThinLinc","text":"

In the ThinLinc login dialog, set the server name to rackham-gui.uppmax.uu.se.

How does that look like?

The ThinLinc login dialog

Why not use https://www.rackham-gui.uppmax.uu.se?

Because that does not work :-)

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#3-forward-the-thinlinc-welcome-dialog","title":"3. Forward the ThinLinc Welcome dialog","text":"

On the ThinLinc 'Welcome' dialog, click 'Forward'

How does that look like?

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#4-select-a-thinlinc-profile","title":"4. Select a ThinLinc profile","text":"

On the ThinLinc 'Select profile' dialog, select a profile:

Profile Recommendation GNOME Recommended KDE Avoid XFCE Recommended

Avoid choosing KDE

Avoid choosing the KDE desktop, as it gives problems when running interactive sessions.

Instead, we recommend GNOME or XFCE.

How does that look like?

Here you are asked to pick a remote desktop flavor, with Xfce as the default. Pick any, except KDE.

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#5-you-are-in","title":"5. You are in","text":"

You are in! Well done!

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

How does that look like?

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_local_thinlinc_client/#usage","title":"Usage","text":"

For tips on how to work with this environment, see the UPPMAX ThinLinc page.

","tags":["login","log in","Rackham","remote desktop","local","ThinLinc","ThinLinc client"]},{"location":"getting_started/login_rackham_remote_desktop_website/","title":"Log in to Rackham's remote desktop via a webbrowser","text":"

Rackham's remote desktop environment via a webbrowser

There are multiple ways to log in to Rackham. This page described how to log in to its remote desktop environment via a web browser.

","tags":["login","log in","Rackham","remote desktop","website","URL"]},{"location":"getting_started/login_rackham_remote_desktop_website/#procedure","title":"Procedure","text":"Prefer a video?

This procedure is also shown by this YouTube video.

This is a procedure with one step. Most work will be to fulfill all Rackham usage prerequisites.

","tags":["login","log in","Rackham","remote desktop","website","URL"]},{"location":"getting_started/login_rackham_remote_desktop_website/#1-go-to-httpsrackham-guiuppmaxuuse","title":"1. Go to https://rackham-gui.uppmax.uu.se","text":"

In a webbrowser, go to https://rackham-gui.uppmax.uu.se.

  • In the first field, fill in your UPPMAX username, e.g. sven
  • In the second field, fill in your UPPMAX password (e.g. password and your UPPMAX 2FA (e.g. 123456) together, without a space (e.g. `password123456)
How does that page look like?

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

","tags":["login","log in","Rackham","remote desktop","website","URL"]},{"location":"getting_started/login_rackham_remote_desktop_website/#usage","title":"Usage","text":"

For tips on how to work with this environment, see the UPPMAX ThinLinc page (as that software is used to do the heavy lifting for that website).

","tags":["login","log in","Rackham","remote desktop","website","URL"]},{"location":"getting_started/login_snowy/","title":"Log in to Snowy","text":"

There are multiple UPPMAX clusters one can log in to. Here we describe how to log in to Snowy

One needs to be allowed to use Snowy. These prerequisites describes what is needed before one can use Snowy.

To make Snowy do a calculation, one needs to log in to a Rackham login node, which is described here.

After login, you will be on a login node.

How to behave on a login node

On a login node, one can and should do simple things only: it is a resource shared with all other users on that node.

If you need to do more intense calculations, use the Slurm job scheduler.

If you need to do more intense calculations interactively, use an interactive node.

After logging in, one can

  • Start a batch job using Snowy resources
  • Start an interactive job
graph LR\n\n  subgraph \"Snowy\"\n    snowy_calculation_node[Calculation nodes]\n  end\n\n\n  subgraph \"Rackham\"\n    login_node[Login node]\n  end\n\n  login_node --> |interactive or sbatch| snowy_calculation_node
","tags":["login","log in","Snowy"]},{"location":"getting_started/project/","title":"UPPMAX project","text":"

To use UPPMAX resources, one needs:

  • an active research project
  • an UPPMAX user account

This page is about UPPMAX projects:

  • View your existing UPPMAX projects
  • Type of UPPMAX projects
  • Apply to an UPPMAX project
","tags":["project","UPPMAX"]},{"location":"getting_started/project/#view-your-uppmax-projects","title":"View your UPPMAX projects","text":"

SUPR (the 'Swedish User and Project Repository') is the website that allows one to request access to Swedish computational resources and to get an overview of the requested resources.

How does the SUPR website look like?

First SUPR page

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

After logging in, the SUPR website will show all projects you are a member of, under the 'Projects' tab.

How does the 'Projects' tab of the SUPR website look like?

An example overview of SUPR projects

","tags":["project","UPPMAX"]},{"location":"getting_started/project/#how-to-convert-my-project-name-to-an-account-name-for-the-job-scheduler","title":"How to convert my project name to an account name for the job scheduler?","text":"

Here is a simple conversion table:

Project name Account name for the job scheduler? NAISS 2024/22-49 naiss2024-22-49 sens2017625 sens2017625

Else, on an UPPMAX cluster do:

cd /proj\nls\n

and look for a project folder that resembles the name of your project. The name of that folder is the name of your account.

How does that look like?

Here is part of the output:

naiss2023-22-57           naiss2024-22-227  snic2015-10-19           snic2018-8-136  snic2020-15-16   snic2021-22-513   snic2022-22-1164  snic2022-5-333    uppstore2019112\nnaiss2023-22-570          naiss2024-22-24   snic2015-10-25           snic2018-8-139  snic2020-15-161  snic2021-22-517   snic2022-22-117   snic2022-5-334    uppstore2019113\nnaiss2023-22-574          naiss2024-22-244  snic2015-10-8            snic2018-8-14   snic2020-15-162  snic2021-22-521   snic2022-22-1172  snic2022-5-339    uppstore2019114\nnaiss2023-22-577          naiss2024-22-247  snic2015-1-142           snic2018-8-141  snic2020-15-163  snic2021-22-522   snic2022-22-1173  snic2022-5-34     uppstore2019115\nnaiss2023-22-578          naiss2024-22-253  snic2015-1-164           snic2018-8-143  snic2020-15-164  snic2021-22-525   snic2022-22-1178  snic2022-5-343    uppstore2019117\nnaiss2023-22-58           naiss2024-22-257  snic2015-1-176           snic2018-8-144  snic2020-15-165  snic2021-22-526   snic2022-22-1179  snic2022-5-364    uppstore2019118\nnaiss2023-22-580          naiss2024-22-26   snic2015-1-177           snic2018-8-145  snic2020-15-17   snic2021-22-529   snic2022-22-1180  snic2022-5-373    uppstore2019119\nnaiss2023-22-582          naiss2024-22-270  snic2015-1-201           snic2018-8-146  snic2020-15-172  snic2021-22-530   snic2022-22-1181  snic2022-5-376    uppstore2019120\nnaiss2023-22-583          naiss2024-22-275  snic2015-1-204           snic2018-8-147  snic2020-15-173  snic2021-22-535   snic2022-22-1184  snic2022-5-392    uppstore2019121\nnaiss2023-22-586          naiss2024-22-281  snic2015-1-228           snic2018-8-148  snic2020-15-175  snic2021-22-537   snic2022-22-1186  snic2022-5-403    uppstore2019123\nnaiss2023-22-590          naiss2024-22-282  snic2015-1-242           snic2018-8-149  snic2020-15-177  snic2021-22-538   snic2022-22-1194  snic2022-5-407    uppstore2021-23-134\nnaiss2023-22-598          naiss2024-22-295  snic2015-1-259           snic2018-8-15   snic2020-15-178  snic2021-22-541   snic2022-22-1195  snic2022-5-408    uu_1dl550_2021\nnaiss2023-22-600          naiss2024-22-299  snic2015-1-268           snic2018-8-150  snic2020-15-179  snic2021-22-544   snic2022-22-1197  snic2022-5-415    uucompbiochem\nnaiss2023-22-608          naiss2024-22-3    snic2015-1-281           snic2018-8-151  snic2020-15-18   snic2021-22-546   snic2022-22-1198  snic2022-5-42     var_inf_sim_alex\nnaiss2023-22-62           naiss2024-22-301  snic2015-1-315           snic2018-8-152  snic2020-15-182  snic2021-22-547   snic2022-22-12    snic2022-5-423    viher_snic2022\nnaiss2023-22-620          naiss2024-22-303  snic2015-1-33            snic2018-8-153  snic2020-15-183  snic2021-22-550   snic2022-22-1200  snic2022-5-428    viscaria_pilot\nnaiss2023-22-621          naiss2024-22-305  snic2015-1-345           snic2018-8-154  snic2020-15-185  snic2021-22-554   snic2022-22-1207  snic2022-5-432    vrognas\nnaiss2023-22-623          naiss2024-22-307  snic2015-1-364           snic2018-8-155  snic2020-15-186  snic2021-22-555   snic2022-22-1208  snic2022-5-443    wamr\nnaiss2023-22-624          naiss2024-22-308  snic2015-1-37            snic2018-8-156  snic2020-15-188  snic2021-22-557   snic2022-22-121   snic2022-5-451    wave_energy_parks\nnaiss2023-22-627          naiss2024-22-310  snic2015-1-398           snic2018-8-157  snic2020-15-189  snic2021-22-559   snic2022-22-1214  snic2022-5-454    wheatrnaseq\nnaiss2023-22-632          naiss2024-22-319  snic2015-1-399           snic2018-8-158  snic2020-15-19   snic2021-22-56    snic2022-22-1216  snic2022-5-461    wheatrnaseq2\nnaiss2023-22-633          naiss2024-22-322  snic2015-1-410           snic2018-8-159  snic2020-15-190  snic2021-22-562   snic2022-22-1224  snic2022-5-466    wiosym\nnaiss2023-22-634          naiss2024-22-324  snic2015-1-451           snic2018-8-16   snic2020-15-191  snic2021-22-563   snic2022-22-1227  snic2022-5-484    xfooli\nnaiss2023-22-64           naiss2024-22-326  snic2015-1-466           snic2018-8-161  snic2020-15-192  snic2021-22-564   snic2022-22-1228  snic2022-5-503    yeast1000storage\nnaiss2023-22-640          naiss2024-22-330  snic2015-1-475           snic2018-8-162  snic2020-15-193  snic2021-22-565   snic2022-22-123   snic2022-5-506    yeast-genomics\nnaiss2023-22-648          naiss2024-22-332  snic2015-1-52            snic2018-8-163  snic2020-15-195  snic2021-22-569   snic2022-22-1231  snic2022-5-51     yeast_hybrid_barcode\nnaiss2023-22-652          naiss2024-22-339  snic2015-16-12           snic2018-8-164  snic2020-15-196  snic2021-22-570   snic2022-22-1233  snic2022-5-52     zengkun\nnaiss2023-22-654          naiss2024-22-341  snic2015-16-27           snic2018-8-165  snic2020-15-197  snic2021-22-571   snic2022-22-1234  snic2022-5-528    zinc22\nnaiss2023-22-655          naiss2024-22-345  snic2015-16-34           snic2018-8-166  snic2020-15-198  snic2021-22-572   snic2022-22-1236  snic2022-5-530\nnaiss2023-22-658          naiss2024-22-347  snic2015-1-72            snic2018-8-167  snic2020-15-199  snic2021-22-573   snic2022-22-1237  snic2022-5-544\nnaiss2023-22-659          naiss2024-22-351  snic2015-1-92            snic2018-8-168  snic2020-15-2    snic2021-22-574   snic2022-22-1238  snic2022-5-548\nnaiss2023-22-660          naiss2024-22-354  snic2015-6-101           snic2018-8-169  snic2020-15-20   snic2021-22-579   snic2022-22-1247  snic2022-5-552\nnaiss2023-22-662          naiss2024-22-358  snic2015-6-102           snic2018-8-170  snic2020-15-201  snic2021-22-580   snic2022-22-125   snic2022-5-555\nnaiss2023-22-665          naiss2024-22-362  snic2015-6-104           snic2018-8-171  snic2020-15-202  snic2021-22-582   snic2022-22-1250  snic2022-5-560\nnaiss2023-22-667          naiss2024-22-363  snic2015-6-107           snic2018-8-173  snic2020-15-203  snic2021-22-583   snic2022-22-1253  snic2022-5-568\nnaiss2023-22-67           naiss2024-22-375  snic2015-6-109           snic2018-8-175  snic2020-15-204  snic2021-22-584   snic2022-22-1254  snic2022-5-582\n
","tags":["project","UPPMAX"]},{"location":"getting_started/project/#type-of-uppmax-projects","title":"Type of UPPMAX projects","text":"
  • NAISS projects
  • UPPMAX projects
  • NGI Delivery projects
  • Course projects
","tags":["project","UPPMAX"]},{"location":"getting_started/project/#apply-to-an-uppmax-project","title":"Apply to an UPPMAX project","text":"

See the UPPMAX page on a 'Project application' here.

","tags":["project","UPPMAX"]},{"location":"getting_started/project_apply/","title":"Project application","text":"

To use UPPMAX resources, one needs:

  • an active research project
  • an UPPMAX user account

Your user account is a personal log-in to our systems. Computer resources like CPU-hours and disk storage are allocated to projects.

The workflow is like this:

- Register in SUPR\n- Accept SUPR user agreement\n- Become a member of a project\n- Apply for an account at UPPMAX (or other resources)\n

Warning

Note that you can only get a user account on a resource if you belong to a project with allocations there!

","tags":["project","apply","application"]},{"location":"getting_started/project_apply/#supr-account","title":"SUPR account","text":"Get a SUPR account
  • You should visit the national project management platform SUPR and register there.
    • Make sure that you don't already have an account at SUPR. You must not have more than one account in SUPR.
  • All steps here.
Accept SUPR user agreement
  • You must accept the user agreement in SUPR, either online or in paper form.
  • Details here.
","tags":["project","apply","application"]},{"location":"getting_started/project_apply/#applying-for-an-uppmax-project-pi","title":"Applying for an UPPMAX project (PI)","text":"

If you are a PI: apply for a project in SUPR.

  • Apply for a Bianca project, i.e. a project using sensitive data
  • Apply for a Pelle project, i.e. a project for the new Uppsala-local general-purpose cluster
  • Apply for a Swedish Science Cloud project, i.e. a project that provides an online service, e.g. a website
  • Apply for a SIMPLER project, i.e. a project for the Swedish Infrastructure for Medical Population-based Life-course and Environmental Research
  • Other applications
","tags":["project","apply","application"]},{"location":"getting_started/project_apply/#become-a-member-of-a-project","title":"Become a member of a project","text":"

If you are not a PI: Apply for membership in a project you want to join in SUPR, Wait for the PI to accept your application. Alternatively, the PI can add you directly.

  • join an existing project.
I just got an UPPMAX project, yet I cannot login to UPPMAX?

It tends to be a matter of minutes to less than hours before the changes propagate from SUPR to UPPMAX.

If after one night you cannot login, please contact support.

","tags":["project","apply","application"]},{"location":"getting_started/project_apply/#apply-for-an-account-at-uppmax","title":"Apply for an account at UPPMAX","text":"

If you don't already have an account at UPPMAX you are ready by now!

  • Apply for an UPPMAX user account
","tags":["project","apply","application"]},{"location":"getting_started/project_apply_bianca/","title":"Project application for Bianca","text":"

To use an UPPMAX cluster, one needs to apply to a project. This page describes how to apply to a Bianca project.

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#procedure","title":"Procedure","text":"","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#1-go-to-httpssuprnaissse","title":"1. Go to https://supr.naiss.se/","text":"

Example SUPR NAISS main page

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#2-click-on-rounds","title":"2. Click on 'Rounds'","text":"

On the main page, click on 'Rounds'

On the main page, click on 'Rounds'

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#3-click-on-go-to-naiss-sens","title":"3. Click on 'Go to NAISS SENS'","text":"

In the 'Rounds' menu, click on 'Go to NAISS SENS'

In the 'Rounds' menu, click on 'Go to NAISS SENS'

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#4-click-on-go-to-naiss-sens-small-for-the-current-year","title":"4. Click on 'Go to NAISS SENS Small' for the current year","text":"

In the 'NAISS SENS Rounds' menu, click on 'Go to NAISS SENS Small' for the current year:

In the 'NAISS SENS Rounds' menu, click on 'Go to NAISS SENS Small' for the current year

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#5-click-create-new-proposal-for-naiss-sens-small-for-the-current-year","title":"5. Click 'Create New Proposal for NAISS SENS Small' for the current year","text":"

In the 'Open for Proposals' screen, click 'Create New Proposal for NAISS SENS Small' for the current year

In the 'Open for Proposals' screen, click 'Create New Proposal for NAISS SENS Small' for the current year

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_bianca/#6-add-a-project-title-and-click-create-new-proposal","title":"6. Add a project title and click 'Create new proposal'","text":"

In the 'Create New Proposal for NAISS SENS Small 2024', add a project title and click 'Create new proposal'

In the 'Create New Proposal for NAISS SENS Small 2024', add a project title and click 'Create new proposal'

After this, the procedure is straightforward.

Resource available for a NAISS SENS Small project

","tags":["project","apply","application","Bianca"]},{"location":"getting_started/project_apply_pelle/","title":"Project application for Pelle","text":"

To use an UPPMAX cluster, one needs to apply to a project. This page describes how to apply to a Pelle project.

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#procedure","title":"Procedure","text":"Prefer a video?

See the YouTube video 'Apply for an UPPMAX Pelle project'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#1-go-to-httpssuprnaissse","title":"1. Go to https://supr.naiss.se/","text":"How does that look like?

Example SUPR NAISS main page

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#2-click-on-rounds","title":"2. Click on 'Rounds'","text":"

On the main page, click on 'Rounds'

How does that look like?

On the main page, click on 'Rounds'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#3-click-on-go-to-compute-rounds","title":"3. Click on 'Go to Compute Rounds'","text":"

In the 'Rounds' menu, click on 'Go to Compute Rounds'

How does that look like?

In the 'Rounds' menu, click on 'Go to Compute Rounds'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#4-click-on-go-to-centre-local-compute","title":"4. Click on 'Go to Centre Local Compute'","text":"

In the 'Compute Rounds' menu, click on 'Go to Centre Local Compute'

How does that look like?

In the 'Compute Rounds' menu, click on 'Go to Centre Local Compute'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#5-click-on-go-to-uppmax-local","title":"5. Click on 'Go to UPPMAX Local'","text":"

In the 'Centre Local Compute Rounds' menu, click on 'Go to UPPMAX Local'

How does that look like?

In the 'Centre Local Compute Rounds' menu, click on 'Go to UPPMAX Local'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#6-click-on-create-new-proposal","title":"6. Click on 'Create new proposal'","text":"

In the 'UPPMAX Local' menu, click on 'Create new proposal'

How does that look like?

In the 'UPPMAX Local' menu, click on 'Create new proposal'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#7-fill-in-a-title-and-click-on-create-new-proposal","title":"7. Fill in a title and click on 'Create new proposal'","text":"

In the 'Create new proposal for UPPMAX local' menu, fill in a title and click on 'Create new proposal'

How does that look like?

In the 'Create new proposal for UPPMAX local' menu, fill in a title and click on 'Create new proposal'

You have just created an UPPMAX local compute project!

How does that look like?

An UPPMAX local compute project

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#8-scroll-down-and-add-pelle","title":"8. Scroll down and add Pelle","text":"

In your UPPMAX local compute project, scroll down to 'Resources' and add Pelle.

How does that look like?

In your UPPMAX local compute project, scroll down to 'Resources' and add Pelle

Click on 'Add resource to proposal' to add Pelle as a resource.

How does that look like?

In your UPPMAX local compute project, click 'Add resource to proposal'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#9-click-on-create-new-proposal","title":"9. Click on 'Create new proposal'","text":"

In the 'Add resource Pelle' menu, set the number of core hours and click 'Add resource'.

How does that look like?

In the 'Add resource Pelle' menu, set the number of core hours and click 'Add resource'

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_pelle/#10-done","title":"10. Done","text":"

Now, Pelle is added to your UPPMAX local compute project. Well done!

How does that look like?

In your UPPMAX local compute project, Pelle is added

","tags":["project","apply","application","Pelle"]},{"location":"getting_started/project_apply_scc/","title":"Project application for SCC","text":"

To use an UPPMAX cluster, one needs to apply to a project. This page describes how to apply to a SCC project.

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#procedure","title":"Procedure","text":"Prefer a video?

...

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#1-go-to-httpssuprnaissse","title":"1. Go to https://supr.naiss.se/","text":"How does that look like?

Example SUPR NAISS main page

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#2-click-on-rounds","title":"2. Click on 'Rounds'","text":"

On the main page, click on 'Rounds'

How does that look like?

On the main page, click on 'Rounds'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#3-click-on-go-to-swedish-science-cloud","title":"3. Click on 'Go to Swedish Science Cloud'","text":"

In the 'Rounds' menu, click on 'Go to Swedish Science Cloud'

How does that look like?

In the 'Rounds' menu, click on 'Go to Swedish Science Cloud'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#4-click-on-go-to-swedish-science-cloud","title":"4. Click on 'Go to Swedish Science Cloud'","text":"

In the 'Cloud resource' menu, click on 'Go to NAISS Small Compute 2025'.

How does that look like?

In the 'Cloud resource' menu, click on 'Go to NAISS Small Compute 2025'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#5-click-on-create-new-proposal-for-naiss-small-compute","title":"5. Click on 'Create new proposal for NAISS Small Compute'","text":"

In the 'Small Compute Rounds' menu, click on 'Create new proposal for NAISS Small Compute'.

How does that look like?

In the 'Small Compute Rounds' menu, click on 'Create new proposal for NAISS Small Compute'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#6-add-a-project-title-and-click-on-create-new-proposal","title":"6. Add a project title and click on 'Create new proposal'","text":"

In the 'Create new proposal' menu, add a project title and click on 'Create new proposal'

How does that look like?

In the 'Create new proposal' menu, add a project title and click on 'Create new proposal'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#7-scroll-down-to-resources","title":"7. Scroll down to 'Resources'","text":"

In this NAISS project proposal page, scroll down to 'Resources'.

How does that look like?

In this NAISS project proposal page, scroll down to 'Resources'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#8-select-cloud-scc","title":"8. Select 'Cloud @ SCC'","text":"

In the 'Resources' dropbox, Select 'Cloud @ SCC'.

How does that look like?

In the 'Resources' dropbox, Select 'Cloud @ SCC'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#9-set-the-amount-of-coins-and-click-add-resource","title":"9. Set the amount of coins and click 'Add Resource'","text":"

At the 'Add resource Cloud' page, set the amount of coins and click 'Add Resource'.

How does that look like?

At the 'Add resource Cloud' page, set the amount of coins and click 'Add Resource'.

The resource is now added to your project.

How does that look like?

'Resource Cloud added to proposal'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_scc/#10-click-submit-proposal","title":"10. Click 'Submit proposal'","text":"

In this NAISS project proposal page, after all other details are filled in, scroll down and click on 'Submit proposal'

How does that look like?

In this NAISS project proposal page, scroll down and click on 'Submit proposal'

","tags":["project","apply","application","SCC","Swedish Science Cloud"]},{"location":"getting_started/project_apply_simpler/","title":"SIMPLER project application","text":"

SIMPLER is an abbreviation

SIMPLER is an abbreviation of 'Swedish Infrastructure for Medical Population-based Life-course and Environmental Research'. It does not meant to indicate that this is easier.

To use an UPPMAX cluster, one needs to apply to a project. This page describes how to apply for a SIMPLER project.

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#procedure","title":"Procedure","text":"Prefer a video?

View the YouTube video that shows this procedure

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#1-go-to-httpssuprnaissse","title":"1. Go to https://supr.naiss.se/","text":"

Example SUPR NAISS main page

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#2-click-on-rounds","title":"2. Click on 'Rounds'","text":"

On the main page, click on 'Rounds'

On the main page, click on 'Rounds'

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#3-click-on-go-to-naiss-sens","title":"3. Click on 'Go to NAISS SENS'","text":"

In the 'Rounds' menu, click on 'Go to NAISS SENS'

In the 'Rounds' menu, click on 'Go to NAISS SENS'

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#4-click-on-go-to-simpler-for-the-current-year","title":"4. Click on 'Go to SIMPLER' for the current year","text":"

In the 'Rounds' menu, click on 'Go to SIMPLER' for the current year.

In the 'Rounds' menu, click on 'Go to SIMPLER' for the current year.

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#5-click-create-new-proposal-for-simpler-for-the-current-year","title":"5. Click 'Create New Proposal for SIMPLER' for the current year","text":"

In the 'Open for Proposals' screen, click 'Create New Proposal for SIMPLER' for the current year

In the 'Open for Proposals' screen, click 'Create New Proposal for SIMPLER' for the current year

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/project_apply_simpler/#6-add-a-project-title-and-click-create-new-proposal","title":"6. Add a project title and click 'Create new proposal'","text":"

In the 'Create New Proposal for SIMPLER 2024', add a project title and click 'Create new proposal'

In the 'Create New Proposal for SIMPLER 2024', add a project title and click 'Create new proposal'

After this, the procedure is straightforward.

","tags":["project","apply","application","SIMPLER"]},{"location":"getting_started/rackham_usage_prerequisites/","title":"Prerequisites for using Rackham","text":"

To be allowed to log in to Rackham, one needs all of these:

  • An active research project
  • An UPPMAX account
  • An UPPMAX password
  • (for the Rackham remote desktop website) An UPPMAX 2FA

These prerequisites are discussed in detail below.

"},{"location":"getting_started/rackham_usage_prerequisites/#an-active-research-project","title":"An active research project","text":"

One prerequisite for using Rackham is that you need to be a member of an active SNIC or SIMPLER research project (these can have many names such as uppmax[number], snic[number] ornaiss[number]), where [number] represent a number, for example uppmax2021-2-1, snic2022-6-230 or naiss2023-6-382).

Forgot your Rackham projects?

How to see you research projects is described at research projects.

Spoiler: go to https://supr.naiss.se

SUPR (the 'Swedish User and Project Repository') is the website that allows one to request access to Rackham and to get an overview of the requested resources.

How does the SUPR website look like?

First SUPR page

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

After logging in, the SUPR website will show all projects you are a member of, under the 'Projects' tab.

How does the 'Projects' tab of the SUPR website look like?

Example overview of SUPR projects

To see if a project has access to Rackham, click on the project and scroll to the 'Resources' section. In the 'Compute' subsection, there is a table. Under 'Resource' it should state 'Rackham @ UPPMAX'.

How does the 'Resources' page of an example project look like?

The 'Resources' page of an example project. This project has two compute resources and two storage resources.

Note that the 'Accounts' tab can be useful to verify your username.

How does the 'Accounts' tab help me find my username?

An example of a SUPR 'Accounts' tab. The example user has username sven-sens2023598, which means his/her UPPMAX username is sven

You can become a member of an active SNIC SENS by:

  • request membership to an existing project in SUPR
  • create a project. See the UPPMAX page on how to submit a project application here
"},{"location":"getting_started/rackham_usage_prerequisites/#an-uppmax-user-account","title":"An UPPMAX user account","text":"

Another prerequisite for using Rackham is that you must have a personal UPPMAX user account.

"},{"location":"getting_started/rackham_usage_prerequisites/#an-uppmax-password","title":"An UPPMAX password","text":"

Another prerequisite for using Rackham is that you need to know your UPPMAX password. See how to reset and set your UPPMAX password to do so.

"},{"location":"getting_started/rackham_usage_prerequisites/#an-uppmax-2fa","title":"An UPPMAX 2FA","text":"

Another prerequisite for using Rackham, but only for the Rackham remote desktop website) is to have an UPPMAX 2FA. See how to get an UPPMAX 2FA

"},{"location":"getting_started/reset_uppmax_password/","title":"Reset your UPPMAX password","text":"Prefer a video?

See the YouTube video 'How to reset your UPPMAX password'

","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/reset_uppmax_password/#procedure","title":"Procedure","text":"","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/reset_uppmax_password/#1-go-to-httpssuprintegrationuppmaxuusegetpasswd","title":"1. Go to https://suprintegration.uppmax.uu.se/getpasswd","text":"

Go to https://suprintegration.uppmax.uu.se/getpasswd. After authenticating yourself, you're password is reset immediately.

You will be sent an email in around 5 minutes.

","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/reset_uppmax_password/#2-open-email","title":"2. Open email","text":"

Open the email and click on the link that it suggests you to click.

How does that email look like?

Your email will look similar to this:

Greetings,\n\na new password has been generated for your account at UPPMAX.\n\nYou can fetch it by visiting the link below.\nNote though that the link is only valid for 7 days and one (1) visit.\n\nYou can retrieve the password at the following link:\n\nhttps://content.uppmax.uu.se/get-password2.php?sum=hvs.CAESIGOczS0[more_letters]\n\nIf the password has expired, you can request a new password from our homepage\nhttps://www.uppmax.uu.se and the link \"Lost your password?\".\n\nNote that if you requested a new password because your account was locked,\nit may take some additional time (up to an hour) before that change is\nreflected everywhere.\n\nIf you are unsure about what your user name is, this information is available\nin SUPR (https://supr.snic.se/) under Accounts.\n\nFor general information about how to login, change your password and\nso on, please see our getting started guide at\n\nhttp://www.uppmax.uu.se/support/user-guides/guide--first-login-to-uppmax/\n\nregards, UPPMAX Support\n\n\nVARNING: Klicka inte p\u00e5 l\u00e4nkar och \u00f6ppna inte bilagor om du inte k\u00e4nner igen avs\u00e4ndaren och vet att inneh\u00e5llet \u00e4r s\u00e4kert.\nCAUTION: Do not click on links or open attachments unless you recognise the sender and know the content is safe.\n

In this example, https://content.uppmax.uu.se/get-password2.php?sum=hvs.CAESIGOczS0[more_letters] is the link you should click

This will take you to a page with your new password.

","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/reset_uppmax_password/#3-log-in-with-your-new-password","title":"3. Log in with your new password","text":"

At the page with your new password, you use that password to log in.

","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/reset_uppmax_password/#4-optional-change-your-password","title":"4. (optional) Change your password","text":"

If you want to change your password, see How to change your UPPMAX password.

","tags":["UPPMAX","password","reset","set","passwd"]},{"location":"getting_started/setup_vpn/","title":"Setup a VPN","text":"

Some UPPMAX clusters require you to have an IP address inside of SUNET. A virtual private network (VPN) allows one to do so: it will use the Swedish university networks to connect to the UPPMAX clusters.

To be able to use a VPN to get inside of SUNET:

  • For Uppsala University:
    • Mac and Windows users: go to this page
    • Linux users: go to this page
  • For Lund University: go to this page
  • For other Swedish universities, search their websites to get the required VPN credentials.
Where do I go if I am no longer affiliated with a Swedish university?

In this case, one cannot use a VPN. Instead, log in to.

This is yet unknown. Please contact support.

Want a video to see how the UU VPN is used?
  • Use the UU VPN with 2FA
  • Use the UU VPN (yet without 2FA) to access the Bianca remote desktop website
","tags":["setup","set up","VPN","Windows","Mac"]},{"location":"getting_started/setup_vpn_uu_linux/","title":"Setup a VPN from Uppsala University for Linux","text":"

How to set up a VPN differs between universities and differs between operating systems. This page describes how to set up a VPN from Uppsala University for Linux.

","tags":["setup","set up","VPN","Linux","UU","Uppsala"]},{"location":"getting_started/setup_vpn_uu_linux/#procedure","title":"Procedure","text":"

Here is the procedure, as suggested by UIT:

","tags":["setup","set up","VPN","Linux","UU","Uppsala"]},{"location":"getting_started/snowy_usage_prerequisites/","title":"Prerequisites for using Snowy","text":"

To be allowed to log in to Snowy, one needs all of these:

  • An active research project
  • An UPPMAX account
  • An UPPMAX password

These prerequisites are discussed in detail below.

"},{"location":"getting_started/snowy_usage_prerequisites/#an-active-research-project","title":"An active research project","text":"

One prerequisite for using Snowy is that you need to be a member of an active SNIC or SIMPLER research project (these can have many names such as uppmax[number], snic[number] ornaiss[number]), where [number] represent a number, for example uppmax2021-2-1, snic2022-6-230 or naiss2023-6-382).

Forgot your Snowy projects?

How to see you research projects is described at research projects.

Spoiler: go to https://supr.naiss.se

SUPR (the 'Swedish User and Project Repository') is the website that allows one to request access to Snowy and to get an overview of the requested resources.

How does the SUPR website look like?

First SUPR page

SUPR 2FA login. Use the SUPR 2FA (i.e. not UPPMAX)

After logging in, the SUPR website will show all projects you are a member of, under the 'Projects' tab.

How does the 'Projects' tab of the SUPR website look like?

Example overview of SUPR projects

To see if a project has access to Snowy, click on the project and scroll to the 'Resources' section. In the 'Compute' subsection, there is a table. Under 'Resource' it should state 'Snowy @ UPPMAX'.

How does the 'Resources' page of an example project look like?

The 'Resources' page of an example project. This project has two compute resources and two storage resources. A Snowy project would show the word 'Snowy' somewhere, so this is not a Snowy project.

Note that the 'Accounts' tab can be useful to verify your username.

How does the 'Accounts' tab help me find my username?

An example of a SUPR 'Accounts' tab. The example user has username sven-sens2023598, which means his/her UPPMAX username is sven

You can become a member of an active SNIC SENS by:

  • request membership to an existing project in SUPR
  • create a project. See the UPPMAX page on how to submit a project application here
"},{"location":"getting_started/snowy_usage_prerequisites/#an-uppmax-user-account","title":"An UPPMAX user account","text":"

Another prerequisite for using Snowy is that you must have a personal UPPMAX user account.

"},{"location":"getting_started/snowy_usage_prerequisites/#an-uppmax-password","title":"An UPPMAX password","text":"

Another prerequisite for using Snowy is that you need to know your UPPMAX password. If you change it, it may take up to an hour before changes are reflected in Snowy.

For advice on handling sensitive personal data correctly on Snowy, see our FAQ page.

"},{"location":"getting_started/storage/","title":"Data storage","text":""},{"location":"getting_started/storage/#links","title":"Links","text":"
  • UU guide on data storage https://www.uu.se/en/staff/gateway/research/research-handbook/research-data/store-data-and-cooperate (broken link)
"},{"location":"getting_started/supr/","title":"SUPR","text":"

SUPR ('Swedish User and Project Repository') is a website at https://supr.naiss.se/ that manages HPC accounts and projects.

","tags":["SUPR"]},{"location":"getting_started/supr/#apply-for-an-uppmax-project","title":"Apply for an UPPMAX project","text":"

See the UPPMAX page on how to apply for an UPPMAX project.

","tags":["SUPR"]},{"location":"getting_started/supr/#setting-up-an-uppmax-2fa-token","title":"Setting up an UPPMAX 2FA token","text":"

Go to https://suprintegration.uppmax.uu.se/bootstrapotp/ to initiate the token creation. This should take you to a landing page with some initial information and let you know that you will be sent to SUPR to log in.

How does that look like?

Once you click \"Continue\", you'll be sent to SUPR where you should log in. Once you've done so SUPR will let you know that you will be sent back and the identity you are logged in with.

For staff only

SUPR API documentation, requires the same certificate as RT

","tags":["SUPR"]},{"location":"getting_started/supr_register/","title":"Register at SUPR","text":"
  • Go to https://supr.naiss.se/person/register/
  • If you already have an account you must use that account, otherwise you can register with or without SWAMID by clicking on the appropriate button.
  • If you clicked on register via SWAMID you will have to choose the university that you belong to. Here as an example I choose Uppsala University:
  • Click on the University and then choose if you want to login via this SWAMID once
  • or if your browser should remember your choice and use every time you visit SUPR
  • Here again as an example is Uppsala University:
  • You can now check the information your university has sent to SUPR and accept it to create a SUPR account.
  • If you instead click on Register without SWAMID you will have to fill in this:
  • Fill in the form and click the button to create your SUPR account.
  • You have to confirm your account by answering an email sent to the address you registered.
"},{"location":"getting_started/supr_register/#accept-the-user-agreement","title":"Accept the User Agreement","text":"
  • After logging into your SUPR account you must accept the user agreement. Click on Handle User Agreement
  • Depending on how you take care of the User Agreement, it may be approved automatically or it may require manual checking (for example if you choose to use the paper form). You will get an email from SUPR when it has been approved.
"},{"location":"getting_started/user_account/","title":"UPPMAX User Accounts","text":"

An UPPMAX user account is needed to use UPPMAX resources (together with an active UPPMAX research project) and allows you to log in into the UPPMAX clusters.

"},{"location":"getting_started/user_account/#apply-to-an-uppmax-user-account","title":"Apply to an UPPMAX user account","text":"

To apply for an UPPMAX user account, you (the user) and the PI of the project (the researcher in charge of the research project) must complete the following steps:

  • You should visit the national project management platform SUPR and register there. Instructions here.
  • Make sure that you don't already have an account at SUPR. You must not have more than one account in SUPR.
  • You must accept the user agreement in SUPR, either online or in paper form. Details here.
  • Become a member of a project:
    • If you are a PI: apply for a project in SUPR. Details here.
    • If you are not a PI: Apply for membership in a project you want to join in SUPR, Wait for the PI to accept your application. Alternatively, the PI can add you directly. Join an existing project
  • You must apply for an account at UPPMAX in SUPR.
"},{"location":"getting_started/user_account/#apply-for-an-account-at-uppmax","title":"Apply for an account at UPPMAX","text":"
  • When the PI has accepted your membership application. You will receive an email.
  • Log in to SUPR and click on Accounts in the list to the left.

  • You will see the login accounts you already have at other NAISS centres if you have any. Under the \"Possible Resource Account Requests\" headings you find the UPPMAX resources you can apply for login account on. Just use the \"Request Account on UPPMAX\" button.

  • You can then request a username. Then click Request Account

  • After applying it might take up to 2 working days before you receive 2 emails with information on how to login to UPPMAX.

  • If you have any questions please contact us through the Support Form on how to access the UPPMAX resources.

Note

After the 4 steps are completed your account will be created at UPPMAX within 2 working days and you will receive two emails with information

"},{"location":"getting_started/why_2fa/","title":"Why is 2FA important?","text":"

By requiring a second factor, typically a cell phone or other physical device, it becomes much harder for an attacker to gain access to your account if they somehow have gotten hold of your password (which in this case is the 1:st factor).

For security reasons you will have to use a two factor authentication system if you are a) connecting to UPPMAX from outside of Sweden, or b) connecting from a network within Sweden that does not support forward and reverse DNS lookups (due to a misconfiguration in your network, you can ask your internet service provider about this). More information about why can be found below.

","tags":["2FA","MFA","UPPMAX","Why"]},{"location":"getting_started/why_2fa/#from-outside-sweden","title":"From outside Sweden","text":"

If you try to connect directly to our resources from computers outside Sweden you will most likely be required to set up and use two factor authentication (you will be asked for a code from your second factor automatically if required).

Another alternative, if you need to access UPPMAX from outside Sweden, may be to use a Swedish VPN service. For example, if you're employed at Uppsala University, then you can connect using the university's VPN service.

","tags":["2FA","MFA","UPPMAX","Why"]},{"location":"getting_started/why_2fa/#from-within-sweden","title":"From within Sweden","text":"

If you are required to use two factor authentication, and are connecting from a computer in Sweden, this is typically caused by your computer not having a proper DNS name, or the forward and reverse name resolution do not match.

Why is that important?

See here

If this is the case, please contact your ISP and ask them to correct this.

","tags":["2FA","MFA","UPPMAX","Why"]},{"location":"getting_started/why_2fa/#note","title":"Note","text":"

You can check forward and reverse name resolution on this webpage:

  • http://www.whatismyip.com/reverse-dns-lookup

To see what address the other side thinks you come from (which will likely be what our systems see), services like

  • https://ifconfig.co/

can be helpful.

On Linux, you can also use these commands:

  • Forward resolution: host mycomputername.domain.tld. You have to replace mycomputername.domain.tld with your computers actual name, for example:
host rackham2.uppmax.uu.se\n

will give:

rackham2.uppmax.uu.se has address 89.44.250.83\n
  • Reverse resolution: host my_ipnumber. You have to replace my_ipnumber with your computers actual IP number, for example:
host 89.44.250.83t\n

which should give something similar to:

89.44.250.83.in-addr.arpa domain name pointer tintin1.uppmax.uu.se\n
","tags":["2FA","MFA","UPPMAX","Why"]},{"location":"hardware/overview/","title":"Hardware overview","text":"

This page describes the hardware architecture of the different compute clusters at UPPMAX as well as their storage systems.

UPPMAX is part of the National Academic Infrastructure for Supercomputing in Sweden (NAISS).

Parameter Rackham Snowy Bianca UPPMAX Cloud Purpose General-purpose General-purpose Sensitive data IaaS Reserved for NAISS projects Uppsala researchers and course projects See Bianca NAISS and local projects Nodes (Intel) 486+144 228 + 50 N vidia T4 GPUs See Bianca 40 + 20 A2 and 4 T4 Nvidia GPUs Cores per node 20/16 16 See Bianca 16 Memory per node 128GB 128GB See Bianca 128/256GB Fat nodes 256GB & 1TB 256, 512 GB & 4TB See Bianca N/A Local disk (scratch) 2/3TB 4TB See Bianca N/A Network InfiniBand FDR 56Gbit/s InfiniBand FDR 40Gbit/ s See Bianca 10GbE Operating System CentOS 7 CentOS 7 See Bianca Linux cloud image Login nodes Yes No (reached from Rackham) See Bianca N/A \"Home\" storage Domus Domus See Bianca N/A \"Project\" Storage Crex, Lutra Crex, Lutra See Bianca N/A

The storage systems we have provide a total volume of about 20 PB, the equivalent of nearly 15\u00a0billion 3.5-inch floppy disks or 40,000 years of 128-bit encoded\u00a0music.

","tags":["overview","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/","title":"Bianca hardware","text":"","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#technical-summary","title":"Technical Summary","text":"
  • 204 compute nodes with single or dual CPUs and one 4TB mechanical drive or 1TB SSD
  • Each CPU has 8 cores
  • 75 compute nodes, 256 GB memory each.
  • 15 compute nodes, 512 GB memory each
  • 10 compute nodes each equipped with 2xNVIDIA A100 (40GB) GPUs
  • Total number of CPU cores is 4800
  • Login nodes have 2vCPU each and 16GB memory
  • Dual 10 Gigabit Ethernet for all nodes
","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#parameters","title":"Parameters","text":"Parameter Bianca Purpose Sensitive data Reserved for NAISS-SENS projects Nodes (Intel) 272 + 4 nodes \u00e1 2 NVIDIA A100 GPUs Cores per node 16/64 Memory per node 128GB Fat nodes 256 & 512GB Local disk (scratch) 4TB Network Dual 10Gbit/s Operating System CentOS 7 Login nodes Yes (2 cores and 15 GB) \"Home\" storage Castor \"Project\" Storage Castor","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#cpu","title":"CPU","text":"","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#gpu","title":"GPU","text":"","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#network","title":"Network","text":"","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#storage","title":"Storage","text":"","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/bianca/#security","title":"Security","text":"

Since Bianca is designed to handle sensitive personal data security is a key aspect of the configuration. In order to ensure that the data is safe we have implemented a series of security measures including, but not limited to:

  • One virtualized cluster per project, no resources are shared between projects.
  • Separate storage volumes per project.
  • Detailed logging of file transfers in and out of the cluster.
  • Two factor authentication
  • No internet access inside the clusters.
  • Locked racks for the hardware
  • Destruction of broken hard drives

Uppsala University has decided on the following KRT classifications for Bianca:

  • 321 for project directories
  • 322 for home directories
","tags":["Bianca","hardware","specifications","specs"]},{"location":"hardware/clusters/rackham/","title":"Rackham hardware","text":"Nodes CPUs Cores Memory Scratch GPUs Name Comment 272 2x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz) 20 (2 x 10) 128GB 3/4TB N/A r33-r304 . 32 2x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz) 20 (2 x 10) 256GB 3/4TB N/A r1-r32 . 4 2x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz) 20 (2 x 10) 1TB 3/4TB N/A ? . 4 2x Xeon E5-2630 V4 2.2 GHz (turbo 3.1 GHz) 20 (2 x 10) 256GB 3/4TB Nvidia Quatro K2200 rackham1-rackham3 Login nodes

The Rackham cluster was introduced in February 2017. Rackham is a NAISS resource and is estimated to be in production until first of January 2023. The major features of Rackham and its storage system Crex is found below. For more technical data please see the end of this article.

"},{"location":"hardware/clusters/rackham/#cpu","title":"CPU","text":""},{"location":"hardware/clusters/rackham/#network","title":"Network","text":""},{"location":"hardware/clusters/rackham/#storage","title":"Storage","text":""},{"location":"hardware/clusters/snowy/","title":"Snowy hardware","text":"Nodes CPUs Cores Memory Scratch GPUs Name Comment 122 2x Xeon E5-2660 2.2 GHz 16 (2 x 8) 128GB 3/4TB N/A s1-s12, s14-s40, s42-s120, s201-s204 . 49 2x Xeon E5-2660 2.2 GHz 16 (2 x 8) 128GB 3/4TB Tesla T4 s151-s163, s164-s200 . 15 2x Xeon E5-2660 2.2 GHz 16 (2 x 8) 512GB 3/4TB N/A s121-s129, s131, s133-s137 . 12 2x Xeon E5-2660 2.2 GHz 16 (2 x 8) 256GB 3/4TB N/A s139-s150 . 1 2x Xeon E5-2660 2.2 GHz 80 (10 x 8) 4TB 3/4TB N/A s229 . 1 2x Xeon E5-2660 2.2 GHz 16 (2 x 8) 256GB 3/4TB Tesla T4 s138 ."},{"location":"hardware/clusters/snowy/#cpu","title":"CPU","text":""},{"location":"hardware/clusters/snowy/#gpu","title":"GPU","text":""},{"location":"hardware/clusters/snowy/#network","title":"Network","text":""},{"location":"hardware/clusters/snowy/#storage","title":"Storage","text":""},{"location":"hardware/storage/castor/","title":"Castor","text":"

UPPMAX has many storage systems. This page describes the Castor storage system.

Castor is a custom built storage system running GlusterFS dedicated to Bianca. The system consists of 54 Huawei 5288 V3 servers, each server is equipped with 36 x 3TB SATA disks working as one logical volume (with redundancy) and providing 109TB raw disk space per one server. This gives about 5,7 PB raw disk space in total. Each storage server is connected to network with 2 x 40 Gbit/s Ethernet links working as one aggregated link at 80 Gbit/s.

"},{"location":"hardware/storage/crex/","title":"Crex","text":"

UPPMAX has many storage systems. This page describes the Crex storage system.

Rackham and Snowy's storage system\u00a0is a DDN (DataDirect Networks) EXAScaler filesystem based on the ES14KX platform. Crex uses 840 10TB NL-SAS drives and 24 300GB SAS drives for metadata storage. The total volume is 6 PB, with 1 PB reserved for SciLifeLab, 4.5 PB\u00a0reserved for SNIC projects, and 0.5\u00a0PB for UPPMAX use. The filesystem is Lustre, a highly scalable filesystem common in HPC.

"},{"location":"hardware/storage/cygnus/","title":"Cygnus","text":"

UPPMAX has many storage systems. This page describes the Cygnus storage system.

Cygnus is a DDN Secure Lustre file system for Bianca.

"},{"location":"hardware/storage/domus/","title":"Domus","text":"

UPPMAX has many storage systems. This page describes the Domus storage system.

Domus hosts the home directories and some common system directories, e.g. the software catalogue. The system is a NetApp totalling\u00a0100 TB on 96 SAS 10K disks, supports snapshots, and has off-site backup.

"},{"location":"hardware/storage/lutra/","title":"Lutra","text":"

UPPMAX has many storage systems. This page describes the Lutra storage system.

Lutra is a custom built storage system running GlusterFS. The system consist of 6 Huawei 5288 V5 servers with a total of 6x38 10TB SATA-drives for a capacity of 2.2 PB. The usable disk space is 1.8PB. Lutra is meant for \"offload\" or archive storage and available for all users at a cost of (at this moment) 500 SEK/TB/year, for a commitment of four years and a minimum 50TB. The design and filesystem choice makes Lutra very scalable, cost efficient while retaining moderate read/write performance. Lutra is connected to Rackham and Snowy for general availability.

If you are interested in this type of storage please contact support.

"},{"location":"hardware/storage/spirula/","title":"Spirula","text":"

UPPMAX has many storage systems. This page describes the Spirula storage system.

The DDLS-funded SciLifeLab FAIR Data Storage system, Spirula, is runs Ceph-based Object Storage.

"},{"location":"hardware/storage/vulpes/","title":"Vulpes","text":"

UPPMAX has many storage systems. This page describes the Vulpes storage system.

Lupus provided storage for Miarka.

"},{"location":"naiss/","title":"NAISS","text":"

Info

This page is here temporarily, until its content is moved to a better place.

NAISS does stuff.

"},{"location":"naiss/hpc2n/","title":"HPC2N","text":"

Info

This page is here temporarily, until its content is moved to a better place.

NAISS has many HPC centers. HPC2N is one of those.

  • HPC2N documentation main page
","tags":["HPC2n","Ume\u00e5","HPC","center","centre"]},{"location":"naiss/login_cosmos/","title":"Login COSMOS","text":"

Info

This page is here temporarily, until its content is moved to a better place.

"},{"location":"naiss/login_cosmos/#videos","title":"Videos","text":"
  • Using SSH, including password and Pocket Pass reset
  • Using a local ThinLinc client
"},{"location":"naiss/login_kebnekaise/","title":"Login Kebnekaise","text":"

Info

This page is here temporarily, until its content is moved to a better place.

"},{"location":"naiss/login_kebnekaise/#videos","title":"Videos","text":"
  • Using SSH and a password
  • Using a website to access the remote desktop environment
  • Using a local ThinLinc client to access the remote desktop environment
"},{"location":"naiss/login_tetralith/","title":"Login Kebnekaise","text":"

Info

This page is here temporarily, until its content is moved to a better place.

"},{"location":"naiss/login_tetralith/#videos","title":"Videos","text":"
  • Using SSH and a password
  • Using a local ThinLinc client to access the remote desktop environment
"},{"location":"naiss/lunarc/","title":"LUNARC","text":"

Info

This page is here temporarily, until its content is moved to a better place.

NAISS has many HPC centers. LUNARC is one of those.

  • LUNARC documentation main page
  • LUNARC documentation GitHub repository

Pages to be merged with their documentation:

  • Log in to COSMOS
","tags":["LUNARC","Link\u00f6ping","HPC","center","centre"]},{"location":"naiss/request_tracker/","title":"Request Tracker","text":"

Info

This page is here temporarily, until its content is moved to a better place.

Request Tracker, commonly abbreviated to 'RT' is the software used by the NAISS ticket system.

","tags":["RT","Request Tracker"]},{"location":"naiss/request_tracker/#workflow","title":"Workflow","text":"","tags":["RT","Request Tracker"]},{"location":"naiss/request_tracker/#as-presented","title":"As presented","text":"

As presented by Henric Zazzi on 2024-10-03 at the NAISS All-Hands:

flowchart TD\n  new_ticket[New ticket]\n  owned_ticket[Owned ticket]\n  stalled_ticket[Stalled ticket]\n  resolved_ticket[Resolved ticket]\n\n  new_ticket --> |time and knowledge| owned_ticket\n  owned_ticket --> |when solution has been sent| resolved_ticket\n  owned_ticket --> |When ticket cannot be solved yet| stalled_ticket\n  stalled_ticket --> |When ticket can be solved| owned_ticket
","tags":["RT","Request Tracker"]},{"location":"naiss/request_tracker/#alternative","title":"Alternative","text":"

As discussed at the whiteboard discussion:

flowchart TD\n  new_ticket[New ticket]\n  owned_ticket[Owned ticket]\n  stalled_ticket[Stalled ticket]\n  resolved_ticket[Resolved ticket]\n\n  new_ticket --> |time and knowledge| owned_ticket\n  owned_ticket --> |When ticket cannot be solved yet| stalled_ticket\n  owned_ticket --> |When use has not confirmed the ticket is solved yet| stalled_ticket\n  stalled_ticket --> |When ticket can be solved| owned_ticket\n  stalled_ticket --> |When the user has confirmed the ticket has been solved| resolved_ticket
","tags":["RT","Request Tracker"]},{"location":"naiss/swestore/","title":"Swestore","text":"

Swestore is a NAISS HPC center hosting the storage system called 'dCache'.

This is the information from SUPR:

dCache is a resource at Swestore. The total capacity allocated in this round is 100 TiB. The round upper limit is 10 TiB.\n\nSwestore is a Research Data Storage Infrastructure, intended for active research data and operated by the National Academic Infrastructure for Supercomputing in Sweden, NAISS,\n\nThe storage resources provided by Swestore are made available for free for academic research funded by VR and Formas through open calls such that the best Swedish research is supported and new research is facilitated.\n\nThe purpose of Swestore allocations, granted by National Allocations Committee (NAC), is to provide large scale data storage for \u201clive\u201d or \u201cworking\u201d research data, also known as active research data.\nSee the documentation at: https://docs.swestore.se\n

Times are changing

The following information appears at application rounds and in decision mails from first of January 2025:

Please note: NAISS can currently only approve storage on dCache at Swestore until 2026-01-01. Storage solutions for non-hot data, such as Swestore, is being investigated in accelerated form by NAISS very early 2025, and we hope to communicate the plan for long-term services before the large allocation rounds in spring 2025 are opened.\n
","tags":["Swestore","dCache","HPC","center","centre"]},{"location":"software/allinea-ddt/","title":"Allinea DDT","text":"

UPPMAX has many debuggers installed. This page describes Allinea DDT ('Distributed Debugging Tool').

UPPMAX has 96 licenses (one license per (MPI) process) that allows you to debug programs running in parallel with up to 6 nodes on 16 cores or any other combination. The licenses are shared between all users that are in active debugging session.

To use the graphical user interface use ssh with forwarding or ThinLinc.

To use the program load the ddt module from you command line:

module load ddt\n

To start the program run:

ddt\n

or

ddt ./myprogram\n

ddt can only do debugging if you have compiled your code with debugging flag options.

"},{"location":"software/allinea-ddt/#debugging-multithreaded-programs","title":"Debugging Multithreaded programs","text":"

Start an interactive job with multiple cores (e.g. interactive -p core -n 20 -A snicXYZ -t 04:00:00) before starting DDT. In the run window, select the OpenMP box. You can change the number of OpenMP threads directly in the DDT window before running.

"},{"location":"software/allinea-ddt/#debugging-mpi-programs","title":"Debugging MPI programs","text":"

To be able to debug MPI program select MPI option as well as the 'Submit to Queue' option, and then click on 'Change' to select submission script configuration for Rackham and provide the job specific options:

  • 'System | MPI/UPC Implementation | check Auto-Detect'
  • 'System | MPI/UPC Implementation | tick Create Root and Workers group automatically'
  • Select a template file depending on the partition you want to use:
    • core: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-core.qtf
    • node: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-node.qtf
    • devcore: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-node.qtf
  • 'Job Submission | tick Quick Restart': allows you to restart your program without cancelling the allocated time and allocating it again.
  • Edit Queue Submission Parameters to specify Partition, Project and requested time. Failing to provide project number will cause failures in the submission process

On the main configuration window the button \"Run\" will change to \"Submit\". Click on this button to submit your debugging session to the queue manager.

If you enable \"Memory debugging\", click the \"Details\" button and tick 'Preload the memory debugging library' and select \"C/Fortran threads\" in the \"Language:\" field. Read the manual for more detail on the other options in this panel.

"},{"location":"software/allinea-ddt/#links","title":"Links","text":"
  • DDT home page (formerly: Allinea, now Linaroforge)
"},{"location":"software/bash/","title":"bash","text":"

Bash is the default Unix shell, a command-line interpreter and script host that provides a traditional user interface for the linux operating system at UPPMAX. Users direct the operation of the computer by entering command input as text for a command line interpreter to execute or by creating text scripts of one or more such commands.

"},{"location":"software/bash/#special-bash-files","title":"Special bash files","text":"
  • .bash_profile: is run whenever you login or when you start a login shell (as in starting a job in the queue).
  • .bashrc: is run when an interactive shell that is not a login shell is started, or if it is called from the .bash_profile (as it is in the default configuration).
  • .bash_logout: is run when you log out.
"},{"location":"software/beast/","title":"BEAST","text":"

BEAST is a tool for Bayesian phylogenetic analysis.

Is BEAST2 a new version of BEAST?

No.

Although BEAST and BEAST2 achieve a similar goal, BEAST and BEAST2 are developed independently.

Hence,

  • there are things BEAST can do that BEAST2 cannot, and vice versa
  • one cannot create a BEAST XML file and expect BEAST2 to be able to run it, and vice versa
"},{"location":"software/beast/#run-tracer","title":"Run Tracer","text":"

Tracer is a tool to analyse the results of a BEAST (or BEAST2) run.

See Tracer how to use Tracer.

"},{"location":"software/beast2/","title":"BEAST2","text":"

BEAST2 is a tool for Bayesian phylogenetic analysis.

Is BEAST2 a new version of BEAST?

No.

Although BEAST and BEAST2 achieve a similar goal, BEAST and BEAST2 are developed independently.

Hence:

  • there are things BEAST can do that BEAST2 cannot, and vice versa
  • one cannot create a BEAST XML file and expect BEAST2 to be able to run it, and vice versa
"},{"location":"software/beast2/#using-beast2","title":"Using BEAST2","text":"

Here is how to use BEAST2 on the UPPMAX clusters.

Prefer a video?

This YouTube video shows how to use BEAST2 on the UPPMAX clusters.

"},{"location":"software/beast2/#1-load-a-beast2-module","title":"1. Load a beast2 module","text":"

First step is to load a BEAST2 module.

Here is how to find the BEAST2 versions on the UPPMAX clusters:

module spider beast2\n

When loading a BEAST2 module, also load bioinfo-tools:

module load bioinfo-tools beast2/2.7.4\n
How does that look like?
$ module load bioinfo-tools beast2/2.7.4\nbeast2/2.7.4: Also loaded beagle/4.0.0\nbeast2/2.7.4: Many Beast packages are available, to see the list, 'packagemanager -list'\nbeast2/2.7.4: Use BEAST_XMX to specify the amount of RAM (default 5g), 'export BEAST_XMX=15g'. Do not exceed RAM available to your job.\n
"},{"location":"software/beast2/#2-run-beauti","title":"2. Run BEAUti","text":"

Next step is to create a BEAST2 configuration file using BEAUti. This graphical tool can be started using:

beauti\n

As BEAUti is a graphical program, it needs SSH with X forwarding enabled enabled.

How does that look like?

Starting BEAUti results in the following pop-up window:

After using BEAUti, save the file with your BEAST2 model.

"},{"location":"software/beast2/#3-run","title":"3. Run","text":"

A BEAST2 run takes a lot of computing power, hence do not run it on a login node. Instead, run it on an interactive node or use a script.

How to start an interactive node?

View the UPPMAX documentation 'How to start an interactive node on Rackham'.

On an interactive node, run BEAST2 on the saved BEAST2 model:

beast beast2_setup.xml\n

When using a script, put that line in a script. Below is an example script, called run_beast2.sh:

run_beast2.sh
#!/bin/bash\n#SBATCH -A uppmax2023-2-25\nmodule load bioinfo-tools beast2/2.7.4\nbeast beast2_setup.xml\n
  • In line 2, replace uppmax2023-2-25 with your UPPMAX project.
  • In line 3, you may want to replace beast2/2.7.4 with your favorite BEAST2 version

Then run this script using sbatch run_beast2.sh.

Note that this is a minimal script. See the UPPMAX documentation on Slurm for ways to improve this script.

"},{"location":"software/beast2/#view-the-trees-using-densitree","title":"View the trees using DensiTree","text":"

DensiTree is a tool that allows one to display the posterior tree distribution of a BEAST2 run.

Run:

densitree [trees_filename]\n

where [trees_filename] is the name of the file containing the posterior trees, resulting in, for example, densitree my_file.trees.

"},{"location":"software/beast2/#run-tracer","title":"Run Tracer","text":"

Tracer is a tool to analyse the results of a (BEAST or) BEAST2 run.

See Tracer how to use Tracer.

"},{"location":"software/beast2/#show-info","title":"Show info","text":"
beast -beagle_info\n
How does that look like?

Here the command is run on a Rackham compute node, using an interactive session.

Here an interactive session with 1 node:

interactive -A uppmax2023-2-25 -M snowy -N 1 -n 16 --exclusive -t 1-00:00:00\n
[sven@s93 ~]$ beast -beagle_info\n\n                        BEAST v2.7.4, 2002-2023\n             Bayesian Evolutionary Analysis Sampling Trees\n                       Designed and developed by\n Remco Bouckaert, Alexei J. Drummond, Andrew Rambaut & Marc A. Suchard\n\n                   Centre for Computational Evolution\n                         University of Auckland\n                       r.bouckaert@auckland.ac.nz\n                        alexei@cs.auckland.ac.nz\n\n                   Institute of Evolutionary Biology\n                        University of Edinburgh\n                           a.rambaut@ed.ac.uk\n\n                    David Geffen School of Medicine\n                 University of California, Los Angeles\n                           msuchard@ucla.edu\n\n                      Downloads, Help & Resources:\n                           http://beast2.org/\n\n  Source code distributed under the GNU Lesser General Public License:\n                   http://github.com/CompEvol/beast2\n\n                           BEAST developers:\n   Alex Alekseyenko, Trevor Bedford, Erik Bloomquist, Joseph Heled,\n Sebastian Hoehna, Denise Kuehnert, Philippe Lemey, Wai Lok Sibon Li,\nGerton Lunter, Sidney Markowitz, Vladimir Minin, Michael Defoin Platel,\n          Oliver Pybus, Tim Vaughan, Chieh-Hsi Wu, Walter Xie\n\n                               Thanks to:\n          Roald Forsberg, Beth Shapiro and Korbinian Strimmer\n\n\n--- BEAGLE RESOURCES ---\n\n0 : CPU (x86_64)\n    Flags: PRECISION_SINGLE PRECISION_DOUBLE COMPUTATION_SYNCH EIGEN_REAL EIGEN_COMPLEX SCALING_MANUAL SCALING_AUTO SCALING_ALWAYS SCALERS_RAW SCALERS_LOG VECTOR_SSE VECTOR_NONE THREADING_CPP THREADING_NONE PROCESSOR_CPU FRAMEWORK_CPU\n

Here an interactive session with 2 nodes:

interactive -A uppmax2023-2-25 -M snowy -N 2 -n 32 --exclusive -t 1-00:00:00\n
[sven@s106 ~]$ beast -beagle_info\n\n                        BEAST v2.7.4, 2002-2023\n             Bayesian Evolutionary Analysis Sampling Trees\n                       Designed and developed by\n Remco Bouckaert, Alexei J. Drummond, Andrew Rambaut & Marc A. Suchard\n\n                   Centre for Computational Evolution\n                         University of Auckland\n                       r.bouckaert@auckland.ac.nz\n                        alexei@cs.auckland.ac.nz\n\n                   Institute of Evolutionary Biology\n                        University of Edinburgh\n                           a.rambaut@ed.ac.uk\n\n                    David Geffen School of Medicine\n                 University of California, Los Angeles\n                           msuchard@ucla.edu\n\n                      Downloads, Help & Resources:\n                           http://beast2.org/\n\n  Source code distributed under the GNU Lesser General Public License:\n                   http://github.com/CompEvol/beast2\n\n                           BEAST developers:\n   Alex Alekseyenko, Trevor Bedford, Erik Bloomquist, Joseph Heled,\n Sebastian Hoehna, Denise Kuehnert, Philippe Lemey, Wai Lok Sibon Li,\nGerton Lunter, Sidney Markowitz, Vladimir Minin, Michael Defoin Platel,\n          Oliver Pybus, Tim Vaughan, Chieh-Hsi Wu, Walter Xie\n\n                               Thanks to:\n          Roald Forsberg, Beth Shapiro and Korbinian Strimmer\n\n\n--- BEAGLE RESOURCES ---\n\n0 : CPU (x86_64)\n    Flags: PRECISION_SINGLE PRECISION_DOUBLE COMPUTATION_SYNCH EIGEN_REAL EIGEN_COMPLEX SCALING_MANUAL SCALING_AUTO SCALING_ALWAYS SCALERS_RAW SCALERS_LOG VECTOR_SSE VECTOR_NONE THREADING_CPP THREADING_NONE PROCESSOR_CPU FRAMEWORK_CPU\n
"},{"location":"software/beast2/#troubleshooting","title":"Troubleshooting","text":""},{"location":"software/beast2/#beauti-gives-badalloc","title":"BEAUti gives BadAlloc","text":"
  • Platform(s): MacOS

This problem seems to be related to not having a proper X server installed. In this case, SSH X forwarding works to the extent that SSH is able to show xeyes, yet fails to show BEAUti. Also, using the remote desktop via a ThinLinc client fails.

A solution may be to use the remote desktop via the web

How does that look like?

Here is how it looks like:

[kayakhi@rackham2 ~]$ xeyes\n\n[kayakhi@rackham2 ~]$ module load bioinfo-tools beast2/2.7.4\n\nbeast2/2.7.4: Also loaded beagle/4.0.0\n\nbeast2/2.7.4: Many Beast packages are available, to see the list, 'packagemanager -list'\n\nbeast2/2.7.4: Use BEAST_XMX to specify the amount of RAM (default 5g), 'export BEAST_XMX=15g'. Do not exceed RAM available to your job.\n\n[kayakhi@rackham2 ~]$ beauti\n\nX Error of failed request:  BadAlloc (insufficient resources for operation)\n\n  Major opcode of failed request:  149 (GLX)\n\n  Minor opcode of failed request:  5 (X_GLXMakeCurrent)\n\n  Serial number of failed request:  0\n\n  Current serial number in output stream:  32\n

Note that this user has enabled SSH X forwarding, as is proven by calling xeyes without problems.

"},{"location":"software/beast2/#optimize-performance","title":"Optimize performance","text":"
  • BEAST2 performance suggestions
  • BEAST2 and BEAGLE
"},{"location":"software/beast2/#links","title":"Links","text":"
  • DensiTree GitHub repository
  • CIPRES: cyberinfrastructure for phylogenetics research
"},{"location":"software/bianca_file_transfer_using_filezilla/","title":"File transfer to/from Bianca using FileZilla","text":"

FileZilla connected to Bianca

There are multiple ways to transfer data to/from Bianca.

Here, we show how to transfer files using a graphical tool called FileZilla.

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#procedure","title":"Procedure","text":"Would you like a video?

If you like to see how to do file transfer from/to Bianca using FileZilla, watch the video here

To transfer files to/from Bianca using FileZilla, do the following steps:

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

Get inside SUNET.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#2-start-filezilla","title":"2. Start FileZilla","text":"

Start FileZilla.

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#3-select-file-site-manager","title":"3. Select 'File | Site manager'","text":"

In FileZilla, from the menu, select 'File | Site manager'

Where is that?

It is here:

The FileZilla 'File' menu contains the item 'Site manager'

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#4-click-new-site","title":"4. Click 'New site'","text":"

In the 'Site Manager' dialog, click 'New site'

Where is that?

It is here:

'New site' can be found at the bottom-left

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#5-create-a-name-for-the-site-eg-bianca-sens123456","title":"5. Create a name for the site, e.g. bianca-sens123456","text":"

In the 'New Site' dialog, create a name for the site, e.g. bianca-sens123456.

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#6-configure-site","title":"6. Configure site","text":"

In the 'New Site' dialog, use all standards, except:

  • Set protocol to 'SFTP - SSH File Transfer Protocol'
  • Set host to bianca-sftp.uppmax.uu.se
  • Set user to [username]-[project], e.g. sven-sens123456
How does that look like?

It looks similar to these:

Storing a password is useless

Because Bianca holds sensitive data, there is need to use the UPPMAX two-factor authentication code every time you login. Due to this, storing a password is hence useless

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#7-click-connect","title":"7. Click 'Connect'","text":"

In FileZilla, click 'Connect'

You will be asked for your password with two-factor identification, hence type [your password][2FA code], e.g. VerySecret123456.

Now you can transfer files between your local computer and your wharf folder.

NOTE: Filezilla will ask for your password and two-factor for each file you transfer. To avoid that, go to Site Manager > Transfer Settings > Limit number of simultaneous connections to 1.

How does that look like?

It looks like this:

FileZilla is ready to transfer files

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#troubleshooting","title":"Troubleshooting","text":"","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_filezilla/#access-denied","title":"Access denied","text":"

Full error, in the FileZilla terminal:

Status: Connecting to bianca-sftp.uppmax.uu.se...\n\nStatus: Using username \"sven-sens2023613\".\n\nStatus: Access denied\n\nError: Authentication failed.\n\nError: Critical error: Could not connect to server\n

Hypotheses:

  • The user is not within SUNET
How do I know if I am within the university networks?

If you login via eduroam you are within the university networks.

When unsure, go to the Bianca remote desktop website at https://bianca.uppmax.uu.se: if this page does not load, you are outside of the university networks.

See How to get inside of the university networks if you outside of the university networks.

  • The account is not active
How do I know if the Bianca project is active?

A quick way to confirm your Bianca project is active: go to https://bianca.uppmax.uu.se and type your username. If the project is displayed, it is active.

To confirm your project is active or inactive, use the SUPR NAISS website. See the UPPMAX documentation on projects how to see if your project is active?

  • The user is not a member of the Bianca project
How do I know if I am a member of the Bianca project?

A quick way to confirm you are a member of the Bianca project: go to https://bianca.uppmax.uu.se and type your username. If the project is displayed, you are a member of the Bianca project.

To confirm your project is active or inactive, use the SUPR NAISS website. See the UPPMAX documentation on projects how to see which projects you are a member of.

See the UPPMAX page on contacting support on how to contact us.

","tags":["FileZilla","Bianca"]},{"location":"software/bianca_file_transfer_using_gui/","title":"File transfer to/from Bianca using a graphical tool","text":"

FileZilla connected to Bianca

"},{"location":"software/bianca_file_transfer_using_gui/#overview","title":"Overview","text":"

As a user, we need to transfer files between our local computer and Bianca. The many ways to transfer files to/from Bianca are discussed here. On this page, we learn how to transfer files to Bianca using a graphical tool/program.

There are constraints on which programs we can use, due to Bianca being an HPC cluster for sensitive data. Details are described in 'Bianca's constraints', here are graphical tools that work:

Tool Description FileZilla All operating systems WinSCP Windows-only

When using such a graphical tool, one needs to be inside of SUNET.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

When a tool is setup, one can only transfer files between you local computer and your Bianca wharf folder.

"},{"location":"software/bianca_file_transfer_using_gui/#biancas-constraints","title":"Bianca's constraints","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#faf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#aaf,color:#000,stroke:#00f\n\n    subgraph sub_inside[IP inside SUNET]\n      subgraph sub_bianca_shared_env[Bianca shared network]\n        subgraph sub_bianca_private_env[The project's private virtual project cluster]\n          login_node(login/calculation/interactive node):::calculation_node\n          files_in_wharf(Files in wharf):::file_node\n          files_in_bianca_project(Files in Bianca project folder):::file_node\n        end\n      end\n      user(User)\n      user_local_files(Files on user computer):::file_node\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#fcc,color:#000,stroke:#fcc\n    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc\n    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc\n\n    user --> |logs in |login_node\n    user --> |uses| user_local_files\n\n    %% As of 2023-12-22, using `**text**` for bold face, does not render correctly\n    %% user_local_files <== \"`**transfer files**`\" ==> files_in_wharf\n    user_local_files <== \"transfer files\" ==> files_in_wharf\n\n    login_node --> |can use|files_in_bianca_project\n    login_node --> |can use|files_in_wharf\n    files_in_wharf <--> |transfer files| files_in_bianca_project

Overview of file transfer on Bianca, when using a graphical tool. The purple nodes are about file transfer, the blue nodes are about 'doing other things'. In this session, we will transfer files between 'Files on user computer' and 'Files in wharf' using a graphical tool, e.g. FileZilla

Bianca is an HPC cluster for sensitive data. To protect that sensitive data, Bianca has no direct internet connection. This means that files cannot be downloaded directly.

What is an HPC cluster again?

What an HPC cluster is, is described in general terms here.

Instead, one needs to learn one of the many ways to do secure file transfer.

Here, we show how to transfer files using a graphical tool called FileZilla.

In general, one can pick any graphical tools with these constraints:

  • the tool must support SFTP
  • the tool must not store a password

Whatever tool one picks, it must do secure file transfer. For secure file transfer, Bianca supports the SFTP protocol. So, for secure file transfer to Bianca, one needs a tool that supports SFTP.

Use SFTP ... and why users think incorrectly that SCP will work

Only SFTP will work. SCP will never work.

However, some users use tools that support SFTP, yet that have 'SCP' in the name, for example, 'WinSCP'. As users hear from colleagues that the tool 'WinSCP' works, they may incorrectly conclude that SCP will work.

SCP will never work. Only SFTP will work.

Whatever tool one picks, additionally, the tool must not store a password. Due to security reasons, one needs to connect to Bianca using a password and a two-factor authentication number (e.g. VerySecret123456). If a tool stores a password, that password will be valid for only one session.

One tool that can be used for file transfer to Bianca is FileZilla, which is described in detail below. The extra materials at the bottom of this page contain other tools.

"},{"location":"software/bianca_file_transfer_using_gui/#file-transfer-overview","title":"File transfer overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n    classDef transit_node fill:#fff,color:#000,stroke:#fff\n\n    subgraph sub_inside[IP inside SUNET]\n      subgraph sub_bianca_shared_env[Bianca shared network]\n        subgraph sub_bianca_private_env[The project's private virtual project cluster]\n          login_node(login/calculation/interactive node):::calculation_node\n          files_in_wharf(Files in wharf):::file_node\n          files_in_bianca_project(Files in Bianca project folder):::file_node\n        end\n      end\n      user(User)\n      user_local_files(Files on user computer):::file_node\n      files_on_transit(Files posted to Transit):::transit_node\n      files_on_other_clusters(Files on other HPC clusters):::file_node\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#fcc,color:#000,stroke:#fcc\n    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc\n    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc\n\n    user --> |logs in |login_node\n    user --> |uses| user_local_files\n    user_local_files <--> |transfer files|files_in_wharf\n    user_local_files <--> |transfer files|files_on_transit\n    files_on_transit <--> |transfer files|files_in_wharf\n    files_on_transit <--> |transfer files|files_on_other_clusters\n    login_node --> |can use|files_in_bianca_project\n    login_node --> |can use|files_in_wharf\n    files_in_wharf <--> |transfer files| files_in_bianca_project

Overview of file transfer on Bianca The purple nodes are about file transfer, the blue nodes are about 'doing other things'.

"},{"location":"software/bianca_file_transfer_using_lftp/","title":"Using lftp with Bianca","text":"

lftp is a command-line program to transfer files to/from Bianca.

With the command line SFTP client lftp, you need to \"set net:connection_limit 1\". lftp may also defer the actual connection until it's really required unless you end your connect URL with a path.

When inside of SUNET (which can be on a local computer or on Rackham) do:

lftp sftp://[user_name]-[project_id]@bianca-sftp.uppmax.uu.se/[user_name]-[project_id]/\n

where

  • [project_id] is the ID of your NAISS project
  • [user_name] is the name of your UPPMAX user account

For example:

lftp sftp://sven-sens2016001@bianca-sftp.uppmax.uu.se/sven-sens2016001/\n
"},{"location":"software/bianca_file_transfer_using_rsync/","title":"File transfer to/from Bianca using rsync","text":"

rsync is a tool to do file transfer to/from Bianca, that works under Linux, Mac and Windows.

Prefer a video?

Watch this video to see the procedure below as a video.

To transfer files to/from Bianca using rsync, do the following steps:

flowchart TD\n  local_computer_ourside_sunet[Local computer outside of SUNET]\n  local_computer[Local computer]\n  transit[Transit]\n  bianca[Bianca]\n  local_computer_ourside_sunet --> |1.Get inside SUNET|local_computer\n  local_computer --> |2.login| transit\n  local_computer --> |4.rsync| bianca\n  bianca --> |5.rsync| local_computer\n  transit --> |3.mount| bianca
"},{"location":"software/bianca_file_transfer_using_rsync/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

Get inside SUNET.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

"},{"location":"software/bianca_file_transfer_using_rsync/#2-log-in-to-transit","title":"2. Log in to Transit","text":"

On your local computer, start a terminal and use ssh to login to Transit:

ssh [username]@transit.uppmax.uu.se\n

where

  • [username] is your UPPMAX username

For example:

ssh sven@transit.uppmax.uu.se\n

When asked for a password, use your UPPMAX password (without 2FA).

See Log in to transit for more details on how to log in to Transit.

"},{"location":"software/bianca_file_transfer_using_rsync/#3-mount-a-bianca-project","title":"3. Mount a Bianca project","text":"

On transit, mount the wharf of your Bianca project:

mount_wharf [project_id]\n

where

  • [project_id] is the ID of your NAISS project
What about the [path] argument?

Well spotted!

Indeed, the Transit server gives these arguments:

mount_wharf [project_id] [path]\n

However, the [path] argument is optional: if not given, a default will be used.

To simplify matters, here we use the default.

for example:

mount_wharf sens2016001\n

The password is your normal UPPMAX password directly followed by the six digits from the the UPPMAX 2-factor authentication. For example, if your password is VerySecret and the second factor code is 123456 you would type VerySecret123456 as the password in this step.

Now a folder called sens2016001 is created.

"},{"location":"software/bianca_file_transfer_using_rsync/#4-transfer-files-to-bianca","title":"4. Transfer files to Bianca","text":"

You can transfer files to Bianca by:

  • 4a. Transfer individual files to Bianca
  • 4b. Transfer all files in a folder to Bianca
"},{"location":"software/bianca_file_transfer_using_rsync/#4a-transfer-individual-files-to-bianca","title":"4a. Transfer individual files to Bianca","text":"

On local computer, do:

rsync [my_local_file] [username]@transit.uppmax.uu.se:[project_id]\n

where

  • [my_local_file] is the path to your local file
  • [project_id] is the ID of your NAISS project
  • [username] is your UPPMAX username

for example:

rsync my_local_file.txt sven@transit.uppmax.uu.se:sens2016001\n

No need to specify the path to the mounted folder, if defaults are used.

The files can now be found in your wharf folder.

"},{"location":"software/bianca_file_transfer_using_rsync/#4b-transfer-all-files-in-a-folder-to-bianca","title":"4b. Transfer all files in a folder to Bianca","text":"

On local computer, do:

rsync --recursive my_folder [username]@transit.uppmax.uu.se:[project_id]\n

where

  • [project_id] is the ID of your NAISS project
  • [username] is your UPPMAX username

for example:

rsync --recursive my_folder sven@transit.uppmax.uu.se:sens2016001\n

No need to specify the path to the mounted folder, if defaults are used.

The files can now be found in your wharf folder.

"},{"location":"software/bianca_file_transfer_using_rsync/#5-transfer-files-from-bianca-to-you-local-computer","title":"5. Transfer files from Bianca to you local computer","text":"

Be responsible with sensitive data

This command below will copy data from Bianca to your local computer.

You can transfer files from Bianca to your local computer by:

  • 5a. Transfer individual files from Bianca to your local computer
  • 5b. Transfer all folders from Bianca to you local computer
"},{"location":"software/bianca_file_transfer_using_rsync/#5a-transfer-individual-files-from-bianca-to-your-local-computer","title":"5a. Transfer individual files from Bianca to your local computer","text":"

On your local computer, do:

rsync [username]@transit.uppmax.uu.se:[project_id]/[file_in_wharf] .\n

where

  • [project_id] is the ID of your NAISS project
  • [username] is your UPPMAX username
  • [file_in_wharf] is the name of the file in wharf
  • . means 'in the current folder of my local computer' or 'here'

for example:

rsync sven@transit.uppmax.uu.se:sens2016001/my_file_in_wharf.txt .\n

To copy the individual files in your wharf to your local computer.

"},{"location":"software/bianca_file_transfer_using_rsync/#5b-transfer-all-folders-from-bianca-to-you-local-computer","title":"5b. Transfer all folders from Bianca to you local computer","text":"

This will copy all folders in your wharf

This command below will copy all folders in your wharf folder to your local computer.

This assumes that there is few data in your wharf folder.

We assume your follow good wharf hygiene, i.e. your wharf folder is mostly empty most of the time.

On your local computer, do:

rsync --recursive [username]@transit.uppmax.uu.se:[project_id] .\n

where

  • [project_id] is the ID of your NAISS project
  • [username] is your UPPMAX username
  • . means 'in the current folder of my local computer' or 'here'

for example:

rsync --recursive sven@transit.uppmax.uu.se:sens2016001 .\n

To your wharf folder to your local computer. The folder created on your local computer will be called [project_id], for example, sens2016001.

"},{"location":"software/bianca_file_transfer_using_sftp/","title":"Using sftp with Bianca","text":"

sftp is a command-line program to transfer files to/from Bianca.

"},{"location":"software/bianca_file_transfer_using_sftp/#usage","title":"Usage","text":"Would you enjoy a video?

A video showing how to sftp with Bianca can be found here.

When inside of SUNET (which can be on a local computer or on Rackham) do:

sftp [user_name]-[project_id]@bianca-sftp.uppmax.uu.se:/[user_name]-[project_id]\n

where

  • [project_id] is the ID of your NAISS project
  • [user_name] is the name of your UPPMAX user account

For example:

sftp sven-sens2016001@bianca-sftp.uppmax.uu.se:/sven-sens2016001\n

sftp will ask for a password:

sven-sens2016001@bianca-sftp.uppmax.uu.se's password:\n

The password is your normal UPPMAX password directly followed by the six digits from the the UPPMAX 2-factor authentication. For example, if your password is VerySecret and the second factor code is 123456 you would type VerySecret123456 as the password in this step.

After typing in the password and 2FA one sees a welcome message and the sftp prompt.

How does that look like?

This is the welcome message:

Hi!\n\nYou are connected to the bianca wharf (sftp service) at\nbianca-sftp.uppmax.uu.se.\n\nNote that we only support SFTP, which is not exactly the\nsame as SSH (rsync and scp will not work).\n\nPlease see our homepage and the Bianca User Guide\nfor more information:\n\nhttps://www.uppmax.uu.se/support/user-guides/bianca-user-guide/\n\nIf you have any questions not covered by the User Guide, you are\nwelcome to contact us at support@uppmax.uu.se.\n\nBest regards,\nUPPMAX\n\nsven-sens2016001@bianca-sftp.uppmax.uu.se's password:\nConnected to bianca-sftp.uppmax.uu.se.\nsftp>\n
How do I get rid of the welcome message?

Use sftp's -q (which is short for 'quiet') flag:

sftp -q sven-sens2016001@bianca-sftp.uppmax.uu.se\n

The last line, sftp> is the sftp prompt.

Once connected you will have to type the sftp commands to upload/download files. See the UPPMAX page on sftp how to do so.

With sftp you only have access to your wharf folder.

"},{"location":"software/bianca_file_transfer_using_winscp/","title":"File transfer to/from Bianca using WinSCP","text":"

Download and install WinSCP

WinSCP is a secure file transfer tool that works under Windows.

To transfer files to/from Bianca using WinSCP, do the following steps:

","tags":["transfer","data transfer","file transfer","Bianca","WinSCP"]},{"location":"software/bianca_file_transfer_using_winscp/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

Get inside SUNET.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","data transfer","file transfer","Bianca","WinSCP"]},{"location":"software/bianca_file_transfer_using_winscp/#2-start-winscp","title":"2. Start WinSCP","text":"

Start WinSCP.

","tags":["transfer","data transfer","file transfer","Bianca","WinSCP"]},{"location":"software/bianca_file_transfer_using_winscp/#3-create-a-new-site","title":"3. Create a new site","text":"

In WinSCP, click on 'Create new site'.

For that site, use all standards, except:

  • Set file protocol to 'SFTP'
  • Set host name to bianca-sftp.uppmax.uu.se
  • Set user name to [username]-[project], e.g. sven-sens123456
  • Do not set password! Provide your UPPMAX password followed immediately by the UPPMAX 2FA when asked by the interactive login.
How does that look like?

It looks like this:

","tags":["transfer","data transfer","file transfer","Bianca","WinSCP"]},{"location":"software/bianca_file_transfer_using_winscp/#4-transfer-files","title":"4. Transfer files","text":"

Now you can transfer files between your local computer and your wharf folder.

","tags":["transfer","data transfer","file transfer","Bianca","WinSCP"]},{"location":"software/cellranger/","title":"Cell Ranger","text":"

According to the Cell Ranger GitHub repository:

Cell Ranger is a set of analysis pipelines that perform sample demultiplexing, barcode processing, single cell 3' and 5' gene counting, V(D)J transcript sequence assembly and annotation, and Feature Barcode analysis from 10x Genomics Chromium Single Cell data.

Cell Ranger (the tool) is part of the cellranger module.

Finding the module that has cowsay installed:

module spider cellranger\n
How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ module spider cellranger\n\n----------------------------------------------------------------------------\n  cellranger:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger/1.1.0\n        cellranger/1.3.0\n        cellranger/2.0.2\n        cellranger/2.2.0\n        cellranger/3.0.1\n        cellranger/4.0.0\n        cellranger/5.0.1\n        cellranger/6.0.2\n        cellranger/6.1.2\n        cellranger/7.0.0\n        cellranger/7.0.1\n        cellranger/7.1.0\n        cellranger/8.0.1\n     Other possible modules matches:\n        cellranger-ARC  cellranger-ARC-data  cellranger-ATAC  cellranger-ATAC-da\nta  ...\n\n----------------------------------------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*cellranger.*'\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger\" package (including how \nto load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger/8.0.1\n----------------------------------------------------------------------------\n
How to see the tools similar to cellranger?

In case you want to search for similar tools, add a dash at the end of the search term:

module spider cellranger-\n

Your output will look similar to this:

[sven@rackham1 ~]$ module spider cellranger-\n\n----------------------------------------------------------------------------\n  cellranger-ARC:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-ARC/1.0.0\n        cellranger-ARC/2.0.2\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-ARC\" package (including \nhow to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-ARC/2.0.2\n----------------------------------------------------------------------------\n\n----------------------------------------------------------------------------\n  cellranger-ARC-data:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-ARC-data/2020-A\n        cellranger-ARC-data/2020-A-2.0.0\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-ARC-data\" package (inclu\nding how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-ARC-data/2020-A-2.0.0\n----------------------------------------------------------------------------\n\n----------------------------------------------------------------------------\n  cellranger-ATAC:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-ATAC/1.2.0\n        cellranger-ATAC/2.0.0\n        cellranger-ATAC/2.1.0\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-ATAC\" package (including\n how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-ATAC/2.1.0\n----------------------------------------------------------------------------\n\n----------------------------------------------------------------------------\n  cellranger-ATAC-data:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-ATAC-data/1.2.0\n        cellranger-ATAC-data/2.0.0\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-ATAC-data\" package (incl\nuding how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-ATAC-data/2.0.0\n----------------------------------------------------------------------------\n\n----------------------------------------------------------------------------\n  cellranger-DNA: cellranger-DNA/1.1.0\n----------------------------------------------------------------------------\n\n    You will need to load all module(s) on any one of the lines below before the\n \"cellranger-DNA/1.1.0\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n       cellranger-DNA - use cellranger-DNA 1.1.0\n\n\n      The cellranger-DNA-data/1.0.0 module is loaded as a prerequisite.\n\n\n\n\n----------------------------------------------------------------------------\n  cellranger-DNA-data: cellranger-DNA-data/1.0.0\n----------------------------------------------------------------------------\n\n    This module can be loaded directly: module load cellranger-DNA-data/1.0.0\n\n    Help:\n       cellranger-DNA-data - use cellranger-DNA-data 1.0.0\n\n\n      10X Genomics Chromium Cell Ranger DNA data\n      Version 1.0.0\n      https://support.10xgenomics.com/single-cell-dna/software/downloads/latest\n\n      NOTE: This is a data module. The software that uses this data is the cellr\nanger-DNA module, which loads this.\n\n\n      Default data for GRCh38, GRCh38 and GRCm38 references can be found in $CEL\nLRANGER_DNA_DATA.\n      To see the top-level directories:\n\n       ls -l $CELLRANGER_DNA_DATA\n\n      Genome assembly    Subdirectory\n      ---------------    ------------\n      GRCh38             refdata-GRCh38-1.0.0\n      GRCh37             refdata-GRCh37-1.0.0\n      GRCm38             refdata-GRCm38-1.0.0\n\n      Sample Index Set Sequences (both CSV and JSON formats)\n      ------------------------------------------------------\n      Chromium DNA     chromium-shared-sample-indexes-plate.csv\n                       chromium-shared-sample-indexes-plate.json\n\n      For information on how each dataset was produced, see the References secti\non of\n      https://support.10xgenomics.com/single-cell-dna/software/downloads/latest\n\n\n\n\n----------------------------------------------------------------------------\n  cellranger-VDJ-data:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-VDJ-data/4.0.0\n        cellranger-VDJ-data/5.0.0\n        cellranger-VDJ-data/7.0.0\n        cellranger-VDJ-data/7.1.0\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-VDJ-data\" package (inclu\nding how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-VDJ-data/7.1.0\n----------------------------------------------------------------------------\n\n----------------------------------------------------------------------------\n  cellranger-data:\n----------------------------------------------------------------------------\n     Versions:\n        cellranger-data/1.1.0\n        cellranger-data/1.2.0\n        cellranger-data/3.0.0\n        cellranger-data/2020-A\n        cellranger-data/2024-A\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"cellranger-data\" package (including\n how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider cellranger-data/2024-A\n----------------------------------------------------------------------------\n

Loading the latest version of the cellranger module:

module load bioinfo-tools cellranger/8.0.1\n
How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ module load bioinfo-tools cellranger/8.0.1\nDefault data for several references are available at $CELLRANGER_DATA; see 'module help cellranger-data/2024-A' for more information\nDefault data for GRCh38 and GRCm38 immune profiling references are available at $CELLRANGER_VDJ_DATA; see 'module help cellranger-VDJ-data/7.1.0' for more information\n

Now you can run Cell Ranger:

cellranger\n
How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ cellranger\ncellranger cellranger-8.0.1\n\nProcess 10x Genomics Gene Expression, Feature Barcode, and Immune Profiling data\n\nUsage: cellranger <COMMAND>\n\nCommands:\n  count           Count gene expression and/or feature barcode reads from a\n                      single sample and GEM well\n  multi           Analyze multiplexed data or combined gene\n                      expression/immune profiling/feature barcode data\n  multi-template  Output a multi config CSV template\n  vdj             Assembles single-cell VDJ receptor sequences from 10x\n                      Immune Profiling libraries\n  aggr            Aggregate data from multiple Cell Ranger runs\n  reanalyze       Re-run secondary analysis (dimensionality reduction,\n                      clustering, etc)\n  mkvdjref        Prepare a reference for use with CellRanger VDJ\n  mkfastq         Run Illumina demultiplexer on sample sheets that contain\n                      10x-specific sample index sets\n  testrun         Execute the 'count' pipeline on a small test dataset\n  mat2csv         Convert a feature-barcode matrix to CSV format\n  mkref           Prepare a reference for use with 10x analysis software.\n                      Requires a GTF and FASTA\n  mkgtf           Filter a GTF file by attribute prior to creating a 10x\n                      reference\n  upload          Upload analysis logs to 10x Genomics support\n  sitecheck       Collect Linux system configuration information\n  help            Print this message or the help of the given subcommand(s)\n\nOptions:\n  -h, --help     Print help\n  -V, --version  Print version\n
","tags":["cellranger","cell ranger","Cellranger","Cell Ranger","10XGenomics","10x Genomics"]},{"location":"software/cellranger/#using-cell-ranger-from-python","title":"Using Cell Ranger from Python","text":"For staff

Related to ticket 297240

","tags":["cellranger","cell ranger","Cellranger","Cell Ranger","10XGenomics","10x Genomics"]},{"location":"software/cellranger/#links","title":"Links","text":"
  • Cell Ranger GitHub repository
","tags":["cellranger","cell ranger","Cellranger","Cell Ranger","10XGenomics","10x Genomics"]},{"location":"software/chmod/","title":"chmod","text":"

chmod is a Linux command to change the ownership of a folder

"},{"location":"software/chmod/#how-to-make-a-script-executable","title":"How to make a script executable?","text":"

Use (when the script is called my_script.sh):

chmod +x my_script.sh\n

You can now run the script using:

./my_script.sh\n
"},{"location":"software/chmod/#how-to-create-a-folder-in-the-shared-project-folder-that-only-i-can-access","title":"How to create a folder in the shared project folder that only I can access?","text":"

Your project folders at /proj/[naiss_project] are shared by members of that NAISS project.

If you need a folder that only you can access, assuming that folder is called my_private_folder, do the following:

chmod 700 my_private_folder\n
How can I confirm it worked?

Use ll:

$ ll\ndrwxrwsr-x 2 sven my_group 4096 Aug 14 09:07 a_shared_folder/\ndrwx--S--- 2 sven my_group 4096 Aug 14 09:06 my_private_folder/\n

The first characters is what it is about:

  • drwxrwsr-x: accessible with group
  • drwx--S---: only accessible by you

Now, you can enter that folder:

cd my_private_folder\n

However, others cannot and get this error message:

bash: cd: my_private_folder/: Permission denied\n
"},{"location":"software/compilers/","title":"Compilers","text":"

UPPMAX supports multiple compilers:

Compiler Language(s) Description GCC C, C++, Fortran The GNU compiler collection icc C Older Intel C compiler icpc C++ Intel C++ compiler icx C Newer Intel C compiler ifort Fortran Older Intel Fortran compiler ifx Fortran Newer Intel Fortran compiler javac Java Java compiler

Different compilers are association with different debuggers and different profiling tools.

How to make sure you have only the right compiler loaded?

Use

module list\n

to get a list of modules.

This may look like this:

Currently Loaded Modules:\n  1)  uppmax    2) intel/19.5\n

If there are modules connected to the incorrect compiler, unload the module, for example:

module unload intel\n

This scenario is valid if you want to use tools that use the GCC compiler.

"},{"location":"software/compiling_parallel/","title":"MPI and OpenMP user guide","text":"

Table of contents:

  • Compiling and running parallel programs on UPPMAX clusters.
    • Introduction
  • Overview of available compilers from GCC and Intel and compatible MPI libraries
  • Running serial programs on execution nodes
  • MPI using the OpenMPI library
    • C programs -Fortran programs
  • OpenMP
    • C programs
    • Fortran programs
  • Pthreads

This is a short tutorial about how to use the queuing system, and how to compile and run MPI and OpenMP jobs.

For serial programs, see a short version of this page at Compiling source code.

"},{"location":"software/compiling_parallel/#compiling-and-running-parallel-programs-on-uppmax-clusters","title":"Compiling and running parallel programs on UPPMAX clusters","text":""},{"location":"software/compiling_parallel/#introduction","title":"Introduction","text":"

These notes show by brief examples how to compile and run serial and parallel programs on the clusters at UPPMAX.

All programs are of the trivial \"hello, world\" type. The point is to demonstrate how to compile and execute the programs, not how to write parallel programs!

"},{"location":"software/compiling_parallel/#running-serial-programs-on-execution-nodes","title":"Running serial programs on execution nodes","text":""},{"location":"software/compiling_parallel/#standard-compatibility","title":"Standard compatibility","text":"
  • c11 gcc/4.8 intel/16+
  • c17 (bug-fix) gcc/8 intel/17+ 19 full
  • Fortran2008 gcc/9 intel/15+ 18 full
  • Fortran2018 gcc/9 intel/19+
"},{"location":"software/compiling_parallel/#examples","title":"Examples","text":"

Jobs are submitted to execution nodes through the resource manager. We use Slurm on our clusters.

We will use the hello program we wrote in the section Compiling source code. The program language should not matter here when we deal with serial programs.

To run the serial program hello as a batch job using Slurm, enter the following shell script in the file hello.sh:

#!/bin/bash -l\n# hello.sh :  execute hello serially in Slurm\n# command: $ sbatch hello.sh\n# sbatch options use the sentinel #SBATCH\n# You must specify a project\n#SBATCH -A your_project_name\n#SBATCH -J serialtest\n# Put all output in the file hello.out\n#SBATCH -o hello.out\n# request 5 seconds of run time\n#SBATCH -t 0:0:5\n# request one core\n#SBATCH -p core -n 1\n./hello\n

The last line in the script is the command used to start the program.

Submit the job to the batch queue:

sbatch hello.sh\n

The program's output to stdout is saved in the file named at the -o flag.

$ cat hello.out\nhello, world\n
"},{"location":"software/compiling_parallel/#mpi-using-the-openmpi-library","title":"MPI using the OpenMPI library","text":"

Before compiling a program for MPI we must choose, in addition to the compiler, which version of MPI we want to use. At UPPMAX there are two, openmpi and intelmpi. These, with their versions, are compatible only to a subset of the gcc and intel compiler versions.

Tip

Check this compatibility page for a more complete picture of compatible versions.

"},{"location":"software/compiling_parallel/#c-programs-using-openmpi","title":"C programs using OpenMPI","text":"

Enter the following mpi program in c and save in the file hello.c

/* hello-mpi.c :  mpi program in c printing a message from each process */\n#include <stdio.h>\n#include <mpi.h>\nint main(int argc, char *argv[])\n{\n    int npes, myrank;\n    MPI_Init(&argc, &argv);\n    MPI_Comm_size(MPI_COMM_WORLD, &npes);\n    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);\n    printf(\"From process %d out of %d, Hello World!\\n\", myrank, npes);\n    MPI_Finalize();\n    return 0;\n}\n

Before compiling a program for MPI we must choose which version of MPI. At UPPMAX there are two, openmpi and intelmpi. For this example we will use openmpi. To load the openmpi module, enter the command below or choose other versions according to the lists above.

module load gcc/10.3.0 openmpi/3.1.6\n

To check that the openmpi modules is loaded, use the command:

module list\n

The command to compile a c program for mpi is mpicc. Which compiler is used when this command is issued depends on what compiler module was loaded before openmpi

To compile, enter the command:

mpicc -o hello-mpi hello-mpi.c\n

You should add optimization and other flags to the mpicc command, just as you would to the compiler used. So if the gcc compiler is used and you wish to compile an mpi program written in C with good, fast optimization you should use a command similar to the following:

mpicc -fast -o hello-mpi hello-mpi.c\n

To run the mpi program hello using the batch system, we make a batch script with name hello-mpi.sh

#!/bin/bash -l\n# hello.sh :  execute parallel mpi program hello on Slurm\n# use openmpi\n# command: $ sbatch hello.sh\n# Slurm options use the sentinel #SBATCH\n#SBATCH -A your_project_name\n#SBATCH -J mpitest\n#SBATCH -o hello.out\n#\n# request 5 seconds of run time\n#SBATCH -t 00:00:05\n#SBATCH -p node -n 8\nmodule load gcc/10.3 openmpi/3.1.3\nmpirun ./hello-mpi\n

The last line in the script is the command used to start the program. The last word on the last line is the program name hello.

Submit the job to the batch queue:

sbatch hello-mpi.sh\n

The program's output to stdout is saved in the file named at the -o flag. A test run of the above program yelds the following output file:

$ cat hello-mpi.out\nFrom process 4 out of 8, Hello World!\nFrom process 5 out of 8, Hello World!\nFrom process 2 out of 8, Hello World!\nFrom process 7 out of 8, Hello World!\nFrom process 6 out of 8, Hello World!\nFrom process 3 out of 8, Hello World!\nFrom process 1 out of 8, Hello World!\nFrom process 0 out of 8, Hello World!\n
"},{"location":"software/compiling_parallel/#fortran-programs-using-openmpi","title":"Fortran programs using OpenMPI","text":"

The following example program does numerical integration to find Pi (inefficiently, but it is just an example):

program testampi\n    implicit none\n    include 'mpif.h'\n    double precision :: h,x0,x1,v0,v1\n    double precision :: a,amaster\n    integer :: i,intlen,rank,size,ierr,istart,iend\n    call MPI_Init(ierr)\n    call MPI_Comm_size(MPI_COMM_WORLD,size,ierr)\n    call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)\n    intlen=100000000\n    write (*,*) 'I am node ',rank+1,' out of ',size,' nodes.'\n\n    h=1.d0/intlen\n    istart=(intlen-1)*rank/size\n    iend=(intlen-1)*(rank+1)/size\n    write (*,*) 'start is ', istart\n    write (*,*) 'end is ', iend\n    a=0.d0\n    do i=istart,iend\n           x0=i*h\n           x1=(i+1)*h\n           v0=sqrt(1.d0-x0*x0)\n           v1=sqrt(1.d0-x1*x1)\n           a=a+0.5*(v0+v1)*h\n    enddo\n    write (*,*) 'Result from node ',rank+1,' is ',a\n    call MPI_Reduce(a,amaster,1, &\n             MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,ierr)\n    if (rank.eq.0) then\n           write (*,*) 'Result of integration is ',amaster\n           write (*,*) 'Estimate of Pi is ',amaster*4.d0\n    endif\n    call MPI_Finalize(ierr)\n    stop\nend program testampi\n

The program can be compiled by this procedure, using mpif90:

module load intel/20.4 openmpi/3.1.6\nmpif90 -Ofast -o testampi testampi.f90\n

The program can be run by creating a submit script sub.sh:

#!/bin/bash -l\n# execute parallel mpi program in Slurm\n# command: $ sbatch sub.sh\n# Slurm options use the sentinel #SBATCH\n#SBATCH -J mpitest\n#SBATCH -A your_project_name\n#SBATCH -o pi\n#\n# request 5 seconds of run time\n#SBATCH -t 00:00:05\n#\n#SBATCH -p node -n 8\nmodule load intel/20.4 openmpi/3.1.6\n\nmpirun ./testampi\n

Submit it:

sbatch sub.sh\n

Output from the program on Rackham:

I am node             8  out of             8  nodes.\nstart is      87499999\nend is      99999999\nI am node             3  out of             8  nodes.\nstart is      24999999\nend is      37499999\nI am node             5  out of             8  nodes.\nstart is      49999999\nend is      62499999\nI am node             2  out of             8  nodes.\nstart is      12499999\nend is      24999999\nI am node             7  out of             8  nodes.\nstart is      74999999\nend is      87499999\nI am node             6  out of             8  nodes.\nstart is      62499999\nend is      74999999\nI am node             1  out of             8  nodes.\nstart is             0\nend is      12499999\nI am node             4  out of             8  nodes.\nstart is      37499999\nend is      49999999\nResult from node             8  is    4.0876483237300587E-002\nResult from node             5  is    0.1032052706959522\nResult from node             2  is    0.1226971551244773\nResult from node             3  is    0.1186446918315650\nResult from node             7  is    7.2451466712425514E-002\nResult from node             6  is    9.0559231928350928E-002\nResult from node             1  is    0.1246737119371059\nResult from node             4  is    0.1122902087263801\nResult of integration is    0.7853982201935574\nEstimate of Pi is     3.141592880774230\n
"},{"location":"software/compiling_parallel/#openmp","title":"OpenMP","text":"

OpenMP uses threads that use shared memory. OpenMP is supported by both the gcc and intel compilers and in the c/c++ and Fortran languages. Don't mix with OpenMPI whis is an open source library for MPI. OpenMP is built in in all modern compiler libraries.

Depending on your preferences load the chosen compiler:

module load gcc/12.1.0\n

or

module load intel/20.4\n
"},{"location":"software/compiling_parallel/#c-programs-using-openmp","title":"C programs using OpenMP","text":"

Enter the following openmp program in c and save in the file hello_omp.c

/* hello.c :  openmp program in c printing a message from each thread */\n#include <stdio.h>\n#include <omp.h>\nint main()\n{\n      int nthreads, tid;\n      #pragma omp parallel private(nthreads, tid)\n      {\n            nthreads = omp_get_num_threads();\n            tid = omp_get_thread_num();\n           printf(\"From thread %d out of %d, hello, world\\n\", tid, nthreads);\n    }\n    return 0;\n}\n

To compile, enter the command (note the -fopenmp or -qopenmp flag depending on compiler):

gcc -fopenmp -o hello_omp hello_omp.c\n

or

icc qfopenmp -o hello_omp hello_omp.c\n

Also here you should add optimization flags such as -fast as appropriate.

To run the OpenMP program hello using the batch system, enter the following shell script in the file hello.sh:

#!/bin/bash -l\n# hello.sh :  execute parallel openmp program hello on Slurm\n# use openmp\n# command: $ sbatch hello.sh\n# Slurm options use the sentinel #SBATCH\n#SBATCH -J omptest\n#SBATCH -A your_project_name\n#SBATCH -o hello.out\n#\n# request 5 seconds of run time\n#SBATCH -t 00:00:05\n#SBATCH -p node -n 8\nuname -n\n#Tell the openmp program to use 8 threads\nexport OMP_NUM_THREADS=8\nmodule load intel/20.4\n# or gcc...\nulimit -s  $STACKLIMIT\n./hello_omp\n

The last line in the script is the command used to start the program.

Submit the job to the batch queue:

sbatch hello.sh\n

The program's output to stdout is saved in the file named at the -o flag. A test run of the above program yelds the following output file:

$ cat hello.out\nr483.uppmax.uu.se\nunlimited\nFrom thread 0 out of 8, hello, world\nFrom thread 1 out of 8, hello, world\nFrom thread 2 out of 8, hello, world\nFrom thread 3 out of 8, hello, world\nFrom thread 4 out of 8, hello, world\nFrom thread 6 out of 8, hello, world\nFrom thread 7 out of 8, hello, world\nFrom thread 5 out of 8, hello, world\n
"},{"location":"software/compiling_parallel/#fortran-programs-using-openmp","title":"Fortran programs using OpenMP","text":"

Enter the following openmp program in Fortran and save in the file hello_omp.f90

PROGRAM HELLO\nINTEGER NTHREADS, TID, OMP_GET_NUM_THREADS, OMP_GET_THREAD_NUM\n! Fork a team of threads giving them their own copies of variables\n!$OMP PARALLEL PRIVATE(NTHREADS, TID)\n! Obtain thread number\nTID = OMP_GET_THREAD_NUM()\nPRINT *, 'Hello World from thread = ', TID\n! Only master thread does this\nIF (TID .EQ. 0) THEN\n NTHREADS = OMP_GET_NUM_THREADS()\nPRINT *, 'Number of threads = ', NTHREADS\nEND IF\n! All threads join master thread and disband\n!$OMP END PARALLEL\nEND\n

With gcc compiler:

gfortran hello_omp.f90 -o hello_omp -fopenmp\n

and with Intel compiler:

ifort hello_omp.f90 -o hello_omp -qopenmp\n

Run with:

$ ./hello_omp\n\n Hello World from thread =            1\n Hello World from thread =            2\n Hello World from thread =            0\n Hello World from thread =            3\n Number of threads =            4\n

A batch file would look similar to the C version, above.

"},{"location":"software/compiling_parallel/#pthreads","title":"Pthreads","text":"

Pthreads (Posix threads) are more low-level than OpenMP. That means that for a beginner it is easier to get rather expected gain only with a few lines with OpenMP. On the other hand it may be possible to gain more efficiency from your code with pthreads, though with quite some effort. Pthreads is native in c/c++. With additional installation of a POSIX library for Fortran it is possible to run it in there as well.

Enter the following program in c and save in the file hello_pthreads.c

/* hello.c :  create system pthreads and print a message from each thread */\n#include <stdio.h>\n#include <pthread.h>\n// does not work for setting array length of \"tid\": const int NTHR = 8;\n// Instead use \"#define\"\n#define NTHR 8\nint nt = NTHR, tid[NTHR];\npthread_attr_t attr;\nvoid *hello(void *id)\n{\n     printf(\"From thread %d out of %d: hello, world\\n\", *((int *) id), nt);\n     pthread_exit(0);\n}\nint main()\n{\n    int i, arg1;\n    pthread_t thread[NTHR];\n    /* system threads */\n    pthread_attr_init(&attr);\n    pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);\n    /* create threads */\n    for (i = 0; i < nt; i++) {\n          tid[i] = i;\n          pthread_create(&thread[i], &attr, hello, (void *) &tid[i]);\n     }\n    /* wait for threads to complete */\n    for (i = 0; i < nt; i++)\n            pthread_join(thread[i], NULL);\n      return 0;\n}\n

To compile, enter the commands

module load gcc/10.2.0\ngcc -pthread -o hello_pthread hello_pthread.c\n

To run the pthread program hello using the batch system, enter the following shell script in the file hello.sh:

#!/bin/bash -l\n# hello.sh :  execute parallel pthreaded program hello on Slurm\n# command: $ sbatch hello.sh\n# Slurm options use the sentinel #SBATCH\n#SBATCH -J pthread\n#SBATCH -A your_project_name\n#SBATCH -o hello.out\n#\n# request 5 seconds of run time\n#SBATCH -t 00:00:05\n# use openmp programming environment\n# to ensure all processors on the same node\n#SBATCH -p node -n 8\nuname -n\n./hello_pthread\n

The last line in the script is the command used to start the program. Submit the job to the batch queue:

sbatch hello.sh\n

The program's output to stdout is saved in the file named at the -o flag. A test run of the above program yelds the following output file:

$ cat hello.out\nr483.uppmax.uu.se\nFrom thread 0 out of 8: hello, world\nFrom thread 4 out of 8: hello, world\nFrom thread 5 out of 8: hello, world\nFrom thread 6 out of 8: hello, world\nFrom thread 7 out of 8: hello, world\nFrom thread 1 out of 8: hello, world\nFrom thread 2 out of 8: hello, world\nFrom thread 3 out of 8: hello, world\n
"},{"location":"software/compiling_serial/","title":"Compiling serial source code","text":"

For parallel programs, see MPI and OpenMP user guide.

"},{"location":"software/compiling_serial/#overview","title":"Overview","text":"Language Compiler Find guide at ... C GCC Compile C using GCC C Intel, icc Compile C using icc C Intel, icx Compile C using icx C++ GCC Compile C++ using GCC C++ Intel, icpc Compile C++ using icpc Fortran GCC Compile Fortran using GCC Fortran Intel, ifort Compile Fortran using ifort Fortran Intel, ifx Compile Fortran using ifx Java javac Compile Java using javac"},{"location":"software/conda/","title":"Conda","text":"Want to see the video 'How to use Conda on Rackham'?

If you want to see a video how to use Conda on Rackham, go here

"},{"location":"software/conda/#install-packages-or-not-check-it","title":"Install packages or not? Check it","text":""},{"location":"software/conda/#python","title":"Python","text":"
  • Check python versions: module spider python
How does that look like?

It will look similar to this:

[sven@rackham1 ~]$ module spider python\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  python:\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n     Versions:\n        python/2.7.6\n        python/2.7.9\n        python/2.7.11\n        python/2.7.15\n        python/3.3\n        python/3.3.1\n        python/3.4.3\n        python/3.5.0\n        python/3.6.0\n        python/3.6.8\n        python/3.7.2\n        python/3.8.7\n        python/3.9.5\n        python/3.10.8\n        python/3.11.4\n        python/3.11.8\n        python/3.12.1\n     Other possible modules matches:\n        Biopython  Boost.Python  GitPython  IPython  Python  biopython  flatbuffers-python  netcdf4-python  protobuf-python  python-parasail  python3  python_GIS_packages  ...\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*python.*'\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  For detailed information about a specific \"python\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider python/3.12.1\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n
  • load a python version like: module load python/3.10.8
  • from the Python shell with the import command
  • from BASH shell with the
    • pip list command
    • module help python/3.9.5 (or other version) at UPPMAX
"},{"location":"software/conda/#is-it-not-there-or-is-it-a-stand-alone-tool-then-proceed","title":"Is it not there, or is it a stand-alone tool? Then proceed!**","text":"

Tip Python packages

  • Try Conda first directly on Bianca.
  • Otherwise, on Rackham, in first case use Pip.
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.
  • If you want to keep number of files down, use PyPI (pip).
"},{"location":"software/conda/#python-packages-with-pip","title":"Python packages with pip","text":"Want to see the video 'Load and use Python packages on UPPMAX'?

If you want to see a video how to load and use Python packages on the UPPMAX (and HPC2N) HPC clusters, go here

See the Python user guide

"},{"location":"software/conda/#conda-repositories","title":"Conda repositories","text":"

We have mirrored all major non-proprietary Conda repositories (not main, anaconda and r) directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.

Available Conda channels

  • bioconda
  • biocore
  • conda-forge
  • dranew
  • free
  • main
  • pro
  • qiime2
  • r
  • r2018.11
  • scilifelab-lts
  • nvidia
  • pytorch

More info

  • Installing Conda packages on Bianca
"},{"location":"software/conda/#using-conda","title":"Using Conda","text":"

Conda cheat sheet

  • List all environments: conda info -e or conda env list

  • Create a conda environment (it is good to directly define the packages included AND channels do not need to be explicitly mentioned)

    conda create --prefix /some/path/to/env <package1> [<package2> ... ]\n
    • On our systems the above should replace conda create --name myenvironment ...
  • Create a new environment from requirements.txt:

    • conda create --prefix /some/path/to/env --file requirements.txt
  • Activate a specific environment: conda activate myenvironment

  • List packages in present environment: conda list

    • Also pip list will work
  • Install additional package from an active environment:

    • conda install somepackage
  • Install from certain channel (conda-forge):

    • conda install -c conda-forge somepackage
  • Install a specific version: conda install somepackage=1.2.3

    • Install a specific version: conda install somepackage=1.2.3
  • Deactivate current environment: conda deactivate

  • More

"},{"location":"software/conda/#your-conda-settings-on-rackham-and-bianca","title":"Your conda settings on Rackham and Bianca","text":"
  • export CONDA_ENVS_PATH=/a/path/to/a/place/in/your/project/

Tip

  • You may want to have the same path for all conda environments in the present project
  • echo \"export CONDA_ENVS_PATH=/a/path/to/a/place/in/your/project/\" >> ~/.bashrc
    • Example: echo \"export CONDA_ENVS_PATH=/proj/<project>/conda\" >> ~/.bashrc

Warning

  • It seems you are required to use this path, ending with the name of your environment, together with --prefix when you install new envronments AND packages also after activating the conda environment! Like: conda install --prefix $CONDA_ENVS_PATH/<your-environment> ...

Tip

  • REMEMBER TO conda clean -a once in a while to remove unused and unnecessary files
By choice
  • Run source conda_init.sh to initialise your shell (bash) to be able to run conda activate and conda deactivate etcetera instead of source activate. It will modify (append) your .bashrc file.

  • When conda is loaded you will by default be in the base environment, which works in the same way as other Conda environments. It is a \u201cbest practice\u201d to avoid installing additional packages into your base software environment unless it is very general packages

"},{"location":"software/conda/#installing-using-conda","title":"Installing using Conda","text":"

We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day. See above for these conda channels.

  • You reach them all by loading the conda module.
  • You don't have to state the specific channel when using UPPMAX.
  • Also, you are offline on Bianca which means that the default is --offline, which you can specify if you want to simulate the experience on Rackham.

Tip

If you need a channel that isn't in our repository, we can easily add it. Just send us a message and we will do it.

"},{"location":"software/conda/#make-a-new-conda-environment","title":"Make a new conda environment","text":"

Tip

  • Since python or other packages are dependent on each-other expect solving the versions takes some time.
  • use an interactive session!
  1. Do module load conda

    • This grants you access to the latest version of Conda and all major repositories on all UPPMAX systems.
    • Check the text output as conda is loaded, especially the first time, see below
  2. Create the Conda environment

    • Example:

      conda create --prefix  $CONDA_ENVS_PATH/python36-env python=3.6 numpy=1.13.1 matplotlib=2.2.2\n

    !!! info \"The mamba alternative is not needed in newer versions of Conda!

    • It all worked if you get something like this:

      # To activate this environment, use\n#\n#     $ conda activate python36-env\n#\n# To deactivate an active environment, use\n#\n#     $ conda deactivate\n
  3. Activate the conda environment by source activate if you have not enabled conda activate, see above:

    source activate python36-env\n
    • You will see that your prompt is changing to start with (python-36-env) to show that you are within an environment.

    • You can also see the installed packages by:

    conda list\npip list\n
    • you can also add more packages within the environment by exact version (use =) or latest (?) compatible version:
    conda install --prefix   $CONDA_ENVS_PATH/python36-env pandas\n
    • that may have given you pandas=1.1.5 which would be the newest version compatible with python3.6 and numpy=1.13.1
  4. Now do your work!

  5. Deactivate with conda deactivate (this will work in any case!)

    (python-36-env) $ conda deactivate\n

Warning

  • Conda is known to create many small files. Your diskspace is not only limited in gigabytes, but also in number of files (typically 300000 in $HOME).
  • Check your disk usage and quota limit with uquota
  • Do a conda clean -a once in a while to remove unused and unnecessary files
"},{"location":"software/conda/#working-with-conda-environments-defined-by-files","title":"Working with Conda environments defined by files","text":"
  • Create an environment based on dependencies given in an environment file:

    conda env create --file environment.yml\n
  • Create file from present conda environment:

    conda env export > environment.yml\n

environments.yml (for conda) is a yaml-file which looks like this:

name: my-environment\nchannels:        # not needed on bianca\n- defaults\ndependencies:\n- numpy\n- matplotlib\n- pandas\n- scipy\n

environments.yml with versions:

name: my-environment\nchannels:            #not needed on bianca\n- defaults\ndependencies:\n- python=3.7\n- numpy=1.18.1\n- matplotlib=3.1.3\n- pandas=1.1.2\n- scipy=1.6.2\n

More on dependencies

  • Dependency management from course Python for Scientific computing

keypoints

  • Conda is an installer of packages but also bigger toolkits

  • Conda on Bianca is easy since the repos in the most used channels are local.

  • Conda creates isolated environments not clashing with other installations of python and other versions of packages

  • Conda environment requires that you install all packages needed by yourself, although automatically.

  • That is, you cannot load the python module and use the packages therein inside your Conda environment.

"},{"location":"software/conda/#conda-in-batch-scripts","title":"Conda in batch scripts","text":"

If you already have setup the CONDA_ENVS_PATH path and run 'conda init bash' a batch script containing a conda environment shall include

module load conda\nconda activate <name of environment>\n
"},{"location":"software/conda/#packages-on-bianca","title":"Packages on Bianca","text":"

Since we have mirrored conda repositories locally conda will work also on Bianca!

First try Conda! There is a mirrored repository with many available packages.

If your desired package is not there but available as pip follow the guide below, perhaps , while looking at Bianca user guide and Transit user guide.

Make an installation on Rackham and then use the wharf to copy it over to your directory on Bianca.

Path on Rackham and Bianca could be ~/.local/lib/python<version>/site-packages/.

You may have to:

in source directory:

cp \u2013a <package_dir> <wharf_mnt_path>\n

you may want to tar before copying to include all possible symbolic links:

$ tar cfz <tarfile.tar.gz> <package>\nand in target directory (wharf_mnt) on Bianca:\n$ tar xfz <tarfile.tar.gz> #if there is a tar file!\n$ mv \u2013a  <file(s)> ~/.local/lib/python<version>/site-packages/\n

If problems arise, send an email to support@uppmax.uu.se and we'll help you.

"},{"location":"software/containers/","title":"Containers","text":"

Containers allow one to bundle installed software into a file, with the goal to run software on any platform.

  • Docker containers: cannot be run on UPPMAX clusters
  • Singularity containers
"},{"location":"software/cowsay/","title":"cowsay","text":"

cowsay is a tool that commonly use as a toy example.

Because cowsay is not part of the Linux kernel, users commonly need to install it. Or in our case: load a module to use it.

cowsay (the tool) is part of the identically-named cowsay module.

Finding the module that has cowsay installed:

module spider cowsay\n
How does that look like?

You output will look similar to this:

[sven@rackham1 ~]$ module spider cowsay\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  cowsay: cowsay/3.03\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n    This module can be loaded directly: module load cowsay/3.03\n\n    Help:\n       cowsay - use cowsay\n

Loading the latest version of the cowsay module:

module load cowsay/3.03\n

Now you can run cowsay:

cowsay hello\n

results in:

 _______\n< hello >\n -------\n        \\   ^__^\n         \\  (oo)\\_______\n            (__)\\       )\\/\\\n                ||----w |\n                ||     ||\n
"},{"location":"software/cram/","title":"Using CRAM to compress BAM files","text":""},{"location":"software/cram/#introduction","title":"Introduction","text":"

Biological data is being produced at a higher rate each day, and it is a challenge to store it all somewhere

The bioinformatics community is trying to keep up with the growing data amounts, and new file formats is part of this evolution. The BAM format was a huge success due to its ability to compress aligned reads by ~50-80% of their original size, but even that is not sustainable in the long run.

CRAM is a new program that can compress SAM/BAM files even more, which makes it suitable for long-term storage. We think this format will become more common, and that it will be supported by most tools, like the BAM format is today.

There are a couple of options you can give to CRAM that will make it behave differently. Even more about the different options on the developers homepage.

Lossless compression: When converting BAM -> CRAM -> BAM, the final BAM file will look identical to the initial BAM file.

Lossy compression: You can specify how to deal with the quality scores in a multitude of different way. To cite the creators of CRAM:

\"Bam2Cram allows to specify lossy model via a string which can be composed of one or more words separated by '-'. Each word is read or base selector and quality score treatment, which can be binning (Illumina 8 bins) or full scale (40 values).

Here are some examples:

  • N40-D8 - preserve quality scores for non-matching bases with full precision, and bin quality scores for positions flanking deletions.
  • m5 - preserve quality scores for reads with mapping quality score lower than 5
  • R40X10-N40 - preserve non-matching quality scores and those matching with coverage lower than 10
  • *8 - bin all quality scores

Selectors:

  • R - bases matching the reference sequence N aligned bases mismatching the reference, this only applies to 'M', '=' (EQ) or 'X' BAM cigar elements.
  • U - unmapped read
  • Pn - pileup: capture all bases at a given position on the reference if there are at least n mismatches D read positions flanking a deletion
  • Mn - reads with mapping quality score higher than n
  • mn - reads with mapping quality score lower than n
  • I - insertions
  • * - all

By default no quality scores will be preserved.

Illumina 8-binning scheme:

0, 1, 6, 6, 6, 6, 6, 6, 6, 6, 15, 15, 15, 15, 15, 15, 15, 15, 15,\n15, 22, 22, 22, 22, 22, 27, 27, 27, 27, 27, 33, 33, 33, 33, 33, 37,\n37, 37, 37, 37, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,\n40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,\n40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,\n40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,\n40, 40, 40, 40, 40, 40\"\n

Illumina's white paper on the matter

"},{"location":"software/cram/#compression-rate","title":"Compression rate","text":"

So, how much compression are we talking about here? Here are the results of a test with a 1.9 GB BAM file (7.4 GB SAM format).

CRAM COMPRESSION RATE

File format File size (GB) SAM 7.4 BAM 1.9 CRAM lossless 1.4 CRAM 8 bins 0.8 CRAM no quality scores 0.26

"},{"location":"software/cram/#examples","title":"Examples","text":""},{"location":"software/cram/#lossless-compression-of-a-bam-file","title":"Lossless compression of a BAM file","text":"

Lossless compression means that the BAM file will be identical before and after compression/decompression The downside of this is that the produced CRAM file will be larger since if has to save each and every quality score. To make a lossless compression, use the following command (can also be written as a single line by removing the backslashes):

$ module load bioinfo-tools cramtools\n$ java -jar $CRAM_HOME/cram.jar cram \\\n-I file.bam \\\n-O file.cram \\\n-R ref.fa \\\n--capture-all-tags \\\n--lossless-quality-score\n

The important parts here are:

  • -I which means the input file (name of the BAM file to be compressed).
  • -O which means the output file (name of the new compressed CRAM file).
  • -R which means the reference file (the FASTA reference to be used. Must be the same when decompressing).
  • --capture-all-tags which means that all the tags in the BAM file will be saved.
  • --lossless-quality-score which means the quality scores will be preserved.

CRAM assumed you have indexed your reference genome using e.g. samtools faidx, i.e. that you will have both ref.fa and ref.fa.fai (Note the index name: ref.fa.fai, NOT ref.fai)

To decompress the CRAM file to a BAM file again, use this command (can also be written as a single line by removing the backslashes):

$ module load bioinfo-tools cramtools\n$ java -jar $CRAM_HOME/cram.jar bam \\\n-I file.cram \\\n-O file.bam \\\n-R ref.fa\n

If you had NM or MD tags in your original BAM file, you have to specify that they should be added in the BAM file that is to be created by adding

--calculate-md-tag\nand/or\n--calculate-nm-tag\n

to the command.

"},{"location":"software/cram/#lossy-compression-of-a-bam-file","title":"Lossy compression of a BAM file","text":"

The motivation to use a lossy compression is that the compression ratio will be much larger, i.e. the cram file will be much smaller. The best compression ratio is reached, naturally, when the quality scores are removed all together. This does have an impact on future analysis such as SNP calling, so the trick is, as usual, to find a good balance.

Illumina has started with a practice called binning. That means that instead of having 40 unique quality scores, you put similar values into bins. Illumina thought 8 bins would get the job done, and that is what CRAM recommends. See this page's introduction for more details about the bins.

To compress your BAM file and binning the quality scores in the same way as Illumina, use this command (can also be written as a single line by removing the backslashes):

$ module load bioinfo-tools cramtools\n$ java -jar $CRAM_HOME/cram.jar cram \\\n-I file.bam \\\n-O file.cram \\\n-R ref.fa \\\n--capture-all-tags \\\n--lossy-quality-score-spec \\*8\n

The important parts here are:

  • -I which means the input file (name of the BAM file to be compressed).
  • -O which means the output file (name of the new compressed BRAM file).
  • -R which means the reference file (the FASTA reference to be used. Must be the same when decompressing.).
  • --capture-all-tags which means that all the tags in the BAM file will be saved.
  • --lossy-quality-score-spec *8 which means the quality scores will be binned into 8 bins the Illumina way. (Notice that we need to apply a \"\\\" before the \"8\" as your shell Bash will otherwise expand this expression if you'd happen to have any filenames ending with eights in the current directory.)

To decompress the CRAM file to a BAM file again, use this command (can also be written as a single line by removing the backslashes):

$ module load bioinfo-tools cramtools\n$ java -jar $CRAM_HOME/cram.jar bam \\\n-I file.cram \\\n-O file.bam \\\n-R ref.fa\n
"},{"location":"software/create_singularity_container/","title":"Creating a Singularity container","text":"

There are many ways to create a Singularity container.

","tags":["Singularity","Singularity container","create","build"]},{"location":"software/create_singularity_container/#how-and-where-to-build","title":"How and where to build?","text":"

Here is a decision tree on how and where to build a Singularity container.

flowchart TD\n  where_to_build[Where to build my Singularity container?]\n  where_to_build --> have_linux\n  have_linux[Do you have Linux with sudo rights and Singularity installed?]\n  build_short[Is the build short?]\n  use_linux(Build on Linux computer with sudo rights)\n  use_remote_builder_website(Build using Sylabs remote builder website)\n  use_remote_builder_rackham(Build using Sylabs remote builder from Rackham)\n\n  have_linux --> |yes| use_linux\n  have_linux --> |no| build_short\n  build_short --> |yes| use_remote_builder_website\n  build_short --> |yes| use_remote_builder_rackham
How and where Features Local Linux Easiest for Linux users, can do longer builds Remote builder from website Easiest for non-Linux users, short builds only Remote builder from Rackham Can do longer builds","tags":["Singularity","Singularity container","create","build"]},{"location":"software/create_singularity_container_for_r_package/","title":"Create a Singularity container for an R package","text":"

There are multiple ways how to create a Singularity container.

This page shows how to create a Singularity container for an R package.

Although the R_Packages module has thousands of packages, sometimes you need a package from GitHub.

","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_for_r_package/#procedure","title":"Procedure","text":"Prefer a video?

See the video 'Create a Singularity container for an R package on GitHub'

The hardest part of this procedure may be to have Linux with Singularity installed on a computer where you have super-user rights.

The most important things for creating a Singularity container is to start with a good container.

","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_for_r_package/#1-create-a-singularity-script","title":"1. Create a Singularity script","text":"

Create a file called Singularity (this is the recommended filename for Singularity scripts) with the following content:

Bootstrap: docker\nFrom: rocker/tidyverse\n\n%post\n    # From https://github.com/brucemoran/Singularity/blob/8eb44591284ffb29056d234c47bf8b1473637805/shub/bases/recipe.CentOs7-R_3.5.2#L21\n    echo 'export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C' >> $SINGULARITY_ENVIRONMENT\n\n    Rscript -e 'install.packages(c(\"remotes\", \"devtools\"))'\n    Rscript -e 'remotes::install_github(\"bmbolstad/preprocessCore\")'\n\n%runscript\nRscript \"$@\"\n

This example script installs the R package hosted on GitHub at https://github.com/bmbolstad/preprocessCore. Replace the R package to suit your needs.

","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_for_r_package/#2-build-the-singularity-container","title":"2. Build the Singularity container","text":"

Here is how you create a Singularity container called my_container.sif from the Singularity script:

sudo singularity build my_container.sif Singularity\n

Which will build a Singularity container called my_container.sif.

How does that look like?

You output will be similar to this:

sven@sven-N141CU:~/temp$ sudo singularity build my_container.sif Singularity \nINFO:    Starting build...\nINFO:    Fetching OCI image...\n307.6MiB / 307.6MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n30.9MiB / 30.9MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n28.2MiB / 28.2MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n261.1MiB / 261.1MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n193.7MiB / 193.7MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n26.3MiB / 26.3MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n288.7KiB / 288.7KiB [================================================================================================================================================] 100 % 0.0 b/s 0s\nINFO:    Extracting OCI image...\nINFO:    Inserting Singularity configuration...\nINFO:    Running post scriptlet\n+ echo export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C\n+ Rscript -e install.packages(c(\"remotes\", \"devtools\"))\nInstalling packages into \u2018/usr/local/lib/R/site-library\u2019\n(as \u2018lib\u2019 is unspecified)\ntrying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/remotes_2.5.0.tar.gz'\nContent type 'binary/octet-stream' length 436043 bytes (425 KB)\n==================================================\ndownloaded 425 KB\n\ntrying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/devtools_2.4.5.tar.gz'\nContent type 'binary/octet-stream' length 435688 bytes (425 KB)\n==================================================\ndownloaded 425 KB\n\n* installing *binary* package \u2018remotes\u2019 ...\n* DONE (remotes)\n* installing *binary* package \u2018devtools\u2019 ...\n* DONE (devtools)\n\nThe downloaded source packages are in\n \u2018/tmp/Rtmpow1CFQ/downloaded_packages\u2019\n+ Rscript -e remotes::install_github(\"bmbolstad/preprocessCore\")\nDownloading GitHub repo bmbolstad/preprocessCore@HEAD\n\u2500\u2500 R CMD build \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n\u2714  checking for file \u2018/tmp/Rtmpx5C1XE/remotes13a1238df5bce/bmbolstad-preprocessCore-33ccbd9/DESCRIPTION\u2019 (337ms)\n\u2500  preparing \u2018preprocessCore\u2019:\n\u2714  checking DESCRIPTION meta-information ...\n\u2500  cleaning src\n\u2500  running \u2018cleanup\u2019\n\u2500  checking for LF line-endings in source and make files and shell scripts\n\u2500  checking for empty or unneeded directories\n\u2500  building \u2018preprocessCore_1.61.0.tar.gz\u2019\n\nInstalling package into \u2018/usr/local/lib/R/site-library\u2019\n(as \u2018lib\u2019 is unspecified)\n* installing *source* package \u2018preprocessCore\u2019 ...\n** using staged installation\n'config' variable 'CPP' is defunct\nchecking for gcc... gcc\nchecking whether the C compiler works... yes\nchecking for C compiler default output file name... a.out\nchecking for suffix of executables... \nchecking whether we are cross compiling... no\nchecking for suffix of object files... o\nchecking whether we are using the GNU C compiler... yes\nchecking whether gcc accepts -g... yes\nchecking for gcc option to accept ISO C89... none needed\nchecking how to run the C preprocessor... gcc -E\nchecking for library containing pthread_create... none required\nchecking for grep that handles long lines and -e... /usr/bin/grep\nchecking for egrep... /usr/bin/grep -E\nchecking for ANSI C header files... yes\nchecking for sys/types.h... yes\nchecking for sys/stat.h... yes\nchecking for stdlib.h... yes\nchecking for string.h... yes\nchecking for memory.h... yes\nchecking for strings.h... yes\nchecking for inttypes.h... yes\nchecking for stdint.h... yes\nchecking for unistd.h... yes\nchecking for stdlib.h... (cached) yes\nchecking if PTHREAD_STACK_MIN is defined... yes\nchecking if R is using flexiblas... flexiblas not found. preprocessCore threading will not be disabled\nconfigure: Enabling threading for preprocessCore\nconfigure: creating ./config.status\nconfig.status: creating src/Makevars\n** libs\nusing C compiler: \u2018gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\u2019\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_colSummarize.c -o R_colSummarize.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmd_interfaces.c -o R_plmd_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmr_interfaces.c -o R_plmr_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_rlm_interfaces.c -o R_rlm_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subColSummarize.c -o R_subColSummarize.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subrcModel_interfaces.c -o R_subrcModel_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg.c -o avg.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg_log.c -o avg_log.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c biweight.c -o biweight.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c init_package.c -o init_package.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c lm.c -o lm.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_avg.c -o log_avg.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_median.c -o log_median.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c matrix_functions.c -o matrix_functions.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median.c -o median.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median_log.c -o median_log.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c medianpolish.c -o medianpolish.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmd.c -o plmd.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmr.c -o plmr.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c psi_fns.c -o psi_fns.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c qnorm.c -o qnorm.o\nqnorm.c: In function \u2018qnorm_c_l\u2019:\nqnorm.c:595:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n  595 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n  596 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c:616:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n  616 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n  617 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c: In function \u2018qnorm_c_determine_target_l\u2019:\nqnorm.c:2004:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n 2004 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n 2005 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c: In function \u2018qnorm_c_determine_target_via_subset_l\u2019:\nqnorm.c:2604:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n 2604 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n 2605 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm.c -o rlm.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_anova.c -o rlm_anova.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_se.c -o rlm_se.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_background4.c -o rma_background4.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_common.c -o rma_common.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c weightedkerneldensity.c -o weightedkerneldensity.o\ngcc -shared -L/usr/local/lib/R/lib -L/usr/local/lib -o preprocessCore.so R_colSummarize.o R_plmd_interfaces.o R_plmr_interfaces.o R_rlm_interfaces.o R_subColSummarize.o R_subrcModel_interfaces.o avg.o avg_log.o biweight.o init_package.o lm.o log_avg.o log_median.o matrix_functions.o median.o median_log.o medianpolish.o plmd.o plmr.o psi_fns.o qnorm.o rlm.o rlm_anova.o rlm_se.o rma_background4.o rma_common.o weightedkerneldensity.o -llapack -lblas -lgfortran -lm -lquadmath -L/usr/local/lib/R/lib -lR\ninstalling to /usr/local/lib/R/site-library/00LOCK-preprocessCore/00new/preprocessCore/libs\n** R\n** inst\n** byte-compile and prepare package for lazy loading\n** help\n*** installing help indices\n** building package indices\n** testing if installed package can be loaded from temporary location\n** checking absolute paths in shared objects and dynamic libraries\n** testing if installed package can be loaded from final location\n** testing if installed package keeps a record of temporary installation path\n* DONE (preprocessCore)\nINFO:    Adding runscript\nINFO:    Creating SIF file...\nINFO:    Build complete: my_container.sif\n
","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_for_r_package/#3-create-an-r-script-for-the-container-to-use","title":"3. Create an R script for the container to use","text":"

Here we create an R script of the container to use.

Here is such an example R script, that prints the contents of the preprocessCore::colSummarizeAvgLog function:

preprocessCore::colSummarizeAvgLog\n

Save this R script, for example, as my_r_script.R.

","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_for_r_package/#4-use-the-singularity-container-on-an-r-script","title":"4. Use the Singularity container on an R script","text":"

Run the container on the R script:

./my_container.sif my_r_script.R\n
How does that look like?

You output will be similar to this:

sven@sven-N141CU:~/temp$ ./my_container.sif my_r_script.R\nfunction (y) \n{\n    if (!is.matrix(y)) \n        stop(\"argument should be matrix\")\n    if (!is.double(y) & is.numeric(y)) \n        y <- matrix(as.double(y), dim(y)[1], dim(y)[2])\n    else if (!is.numeric(y)) \n        stop(\"argument should be numeric matrix\")\n    .Call(\"R_colSummarize_avg_log\", y, PACKAGE = \"preprocessCore\")\n}\n<bytecode: 0x62d460a4d470>\n<environment: namespace:preprocessCore>\n
","tags":["Singularity","Singularity script","create","build","R package","R"]},{"location":"software/create_singularity_container_from_a_singularity_script/","title":"Create a Singularity container from a Singularity script","text":"

There are multiple ways how to create a Singularity container.

This page shows how to create a Singularity container from a Singularity script.

These are the procedures:

Procedure Description using a website Easiest for Mac and Windows users using a computer with Linux where you have super-user right Harder for Mac and Windows users

Note that users have no super-user rights on our UPPMAX clusters.

","tags":["Singularity","Singularity script","create","build"]},{"location":"software/create_singularity_container_from_a_singularity_script_on_linux/","title":"Create a Singularity container from a Singularity script on a computer with Linux where you have super-user rights","text":"

There are multiple ways how to create a Singularity container.

This page shows how to do so using a computer with Linux where you have super-user rights.

Note that users have no super-user rights on our UPPMAX clusters.

","tags":["Singularity","Singularity script","create","build","Linux","local computer"]},{"location":"software/create_singularity_container_from_a_singularity_script_on_linux/#procedure","title":"Procedure","text":"","tags":["Singularity","Singularity script","create","build","Linux","local computer"]},{"location":"software/create_singularity_container_from_a_singularity_script_on_linux/#1-save-the-script-to-a-singularity-file","title":"1. Save the script to a Singularity file","text":"

Save the script as a file called Singularity (this is the recommended filename for Singularity scripts).

Do you have an example Singularity script?

Yes! Here is an example Singularity script:

Bootstrap: docker\nFrom: rocker/tidyverse\n\n%post\n    # From https://github.com/brucemoran/Singularity/blob/8eb44591284ffb29056d234c47bf8b1473637805/shub/bases/recipe.CentOs7-R_3.5.2#L21\n    echo 'export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C' >> $SINGULARITY_ENVIRONMENT\n\n    Rscript -e 'install.packages(c(\"remotes\", \"devtools\"))'\n    Rscript -e 'remotes::install_github(\"bmbolstad/preprocessCore\")'\n\n%runscript\nRscript \"$@\"\n
","tags":["Singularity","Singularity script","create","build","Linux","local computer"]},{"location":"software/create_singularity_container_from_a_singularity_script_on_linux/#2-build-the-singularity-container","title":"2. Build the Singularity container","text":"

Here is how you create a Singularity container called my_container.sif from the Singularity script:

sudo singularity build my_container.sif Singularity\n

Which will build a Singularity container called my_container.sif.

How does that look like?

You output will be similar to this:

sven@sven-N141CU:~/temp$ sudo singularity build my_container.sif Singularity \nINFO:    Starting build...\nINFO:    Fetching OCI image...\n307.6MiB / 307.6MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n30.9MiB / 30.9MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n28.2MiB / 28.2MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n261.1MiB / 261.1MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n193.7MiB / 193.7MiB [================================================================================================================================================] 100 % 0.0 b/s 0s\n26.3MiB / 26.3MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s\n288.7KiB / 288.7KiB [================================================================================================================================================] 100 % 0.0 b/s 0s\nINFO:    Extracting OCI image...\nINFO:    Inserting Singularity configuration...\nINFO:    Running post scriptlet\n+ echo export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C\n+ Rscript -e install.packages(c(\"remotes\", \"devtools\"))\nInstalling packages into \u2018/usr/local/lib/R/site-library\u2019\n(as \u2018lib\u2019 is unspecified)\ntrying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/remotes_2.5.0.tar.gz'\nContent type 'binary/octet-stream' length 436043 bytes (425 KB)\n==================================================\ndownloaded 425 KB\n\ntrying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/devtools_2.4.5.tar.gz'\nContent type 'binary/octet-stream' length 435688 bytes (425 KB)\n==================================================\ndownloaded 425 KB\n\n* installing *binary* package \u2018remotes\u2019 ...\n* DONE (remotes)\n* installing *binary* package \u2018devtools\u2019 ...\n* DONE (devtools)\n\nThe downloaded source packages are in\n \u2018/tmp/Rtmpow1CFQ/downloaded_packages\u2019\n+ Rscript -e remotes::install_github(\"bmbolstad/preprocessCore\")\nDownloading GitHub repo bmbolstad/preprocessCore@HEAD\n\u2500\u2500 R CMD build \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\n\u2714  checking for file \u2018/tmp/Rtmpx5C1XE/remotes13a1238df5bce/bmbolstad-preprocessCore-33ccbd9/DESCRIPTION\u2019 (337ms)\n\u2500  preparing \u2018preprocessCore\u2019:\n\u2714  checking DESCRIPTION meta-information ...\n\u2500  cleaning src\n\u2500  running \u2018cleanup\u2019\n\u2500  checking for LF line-endings in source and make files and shell scripts\n\u2500  checking for empty or unneeded directories\n\u2500  building \u2018preprocessCore_1.61.0.tar.gz\u2019\n\nInstalling package into \u2018/usr/local/lib/R/site-library\u2019\n(as \u2018lib\u2019 is unspecified)\n* installing *source* package \u2018preprocessCore\u2019 ...\n** using staged installation\n'config' variable 'CPP' is defunct\nchecking for gcc... gcc\nchecking whether the C compiler works... yes\nchecking for C compiler default output file name... a.out\nchecking for suffix of executables... \nchecking whether we are cross compiling... no\nchecking for suffix of object files... o\nchecking whether we are using the GNU C compiler... yes\nchecking whether gcc accepts -g... yes\nchecking for gcc option to accept ISO C89... none needed\nchecking how to run the C preprocessor... gcc -E\nchecking for library containing pthread_create... none required\nchecking for grep that handles long lines and -e... /usr/bin/grep\nchecking for egrep... /usr/bin/grep -E\nchecking for ANSI C header files... yes\nchecking for sys/types.h... yes\nchecking for sys/stat.h... yes\nchecking for stdlib.h... yes\nchecking for string.h... yes\nchecking for memory.h... yes\nchecking for strings.h... yes\nchecking for inttypes.h... yes\nchecking for stdint.h... yes\nchecking for unistd.h... yes\nchecking for stdlib.h... (cached) yes\nchecking if PTHREAD_STACK_MIN is defined... yes\nchecking if R is using flexiblas... flexiblas not found. preprocessCore threading will not be disabled\nconfigure: Enabling threading for preprocessCore\nconfigure: creating ./config.status\nconfig.status: creating src/Makevars\n** libs\nusing C compiler: \u2018gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0\u2019\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_colSummarize.c -o R_colSummarize.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmd_interfaces.c -o R_plmd_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmr_interfaces.c -o R_plmr_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_rlm_interfaces.c -o R_rlm_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subColSummarize.c -o R_subColSummarize.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subrcModel_interfaces.c -o R_subrcModel_interfaces.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg.c -o avg.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg_log.c -o avg_log.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c biweight.c -o biweight.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c init_package.c -o init_package.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c lm.c -o lm.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_avg.c -o log_avg.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_median.c -o log_median.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c matrix_functions.c -o matrix_functions.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median.c -o median.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median_log.c -o median_log.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c medianpolish.c -o medianpolish.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmd.c -o plmd.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmr.c -o plmr.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c psi_fns.c -o psi_fns.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c qnorm.c -o qnorm.o\nqnorm.c: In function \u2018qnorm_c_l\u2019:\nqnorm.c:595:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n  595 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n  596 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c:616:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n  616 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n  617 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c: In function \u2018qnorm_c_determine_target_l\u2019:\nqnorm.c:2004:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n 2004 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n 2005 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\nqnorm.c: In function \u2018qnorm_c_determine_target_via_subset_l\u2019:\nqnorm.c:2604:63: warning: format \u2018%d\u2019 expects argument of type \u2018int\u2019, but argument 2 has type \u2018size_t\u2019 {aka \u2018long unsigned int\u2019} [-Wformat=]\n 2604 |          error(\"ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\\n\",\n      |                                                              ~^\n      |                                                               |\n      |                                                               int\n      |                                                              %ld\n 2605 |                i, returnCode, *((int *) status));\n      |                ~                                               \n      |                |\n      |                size_t {aka long unsigned int}\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm.c -o rlm.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_anova.c -o rlm_anova.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_se.c -o rlm_se.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_background4.c -o rma_background4.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_common.c -o rma_common.o\ngcc -I\"/usr/local/lib/R/include\" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\\\"\\\" -DPACKAGE_TARNAME=\\\"\\\" -DPACKAGE_VERSION=\\\"\\\" -DPACKAGE_STRING=\\\"\\\" -DPACKAGE_BUGREPORT=\\\"\\\" -DPACKAGE_URL=\\\"\\\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c weightedkerneldensity.c -o weightedkerneldensity.o\ngcc -shared -L/usr/local/lib/R/lib -L/usr/local/lib -o preprocessCore.so R_colSummarize.o R_plmd_interfaces.o R_plmr_interfaces.o R_rlm_interfaces.o R_subColSummarize.o R_subrcModel_interfaces.o avg.o avg_log.o biweight.o init_package.o lm.o log_avg.o log_median.o matrix_functions.o median.o median_log.o medianpolish.o plmd.o plmr.o psi_fns.o qnorm.o rlm.o rlm_anova.o rlm_se.o rma_background4.o rma_common.o weightedkerneldensity.o -llapack -lblas -lgfortran -lm -lquadmath -L/usr/local/lib/R/lib -lR\ninstalling to /usr/local/lib/R/site-library/00LOCK-preprocessCore/00new/preprocessCore/libs\n** R\n** inst\n** byte-compile and prepare package for lazy loading\n** help\n*** installing help indices\n** building package indices\n** testing if installed package can be loaded from temporary location\n** checking absolute paths in shared objects and dynamic libraries\n** testing if installed package can be loaded from final location\n** testing if installed package keeps a record of temporary installation path\n* DONE (preprocessCore)\nINFO:    Adding runscript\nINFO:    Creating SIF file...\nINFO:    Build complete: my_container.sif\n
","tags":["Singularity","Singularity script","create","build","Linux","local computer"]},{"location":"software/create_singularity_container_from_a_singularity_script_on_linux/#3-use-the-container","title":"3. Use the container","text":"

How to use a container, depends on what it does.

Here are some thing to try:

Run the container without arguments, in the hope of getting a clear error message with instructions:

./my_container.sif\n

Run the container in the hope of seeing its documentation:

./my_container.sif --help\n

Run the container on the local folder, in the hope of getting a clear error message with instructions:

./my_container.sif .\n
","tags":["Singularity","Singularity script","create","build","Linux","local computer"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/","title":"Create a Singularity container from a Singularity script using a website","text":"

There are multiple ways how to create a Singularity container.

This page shows how to do so using a website

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#procedure","title":"Procedure","text":"","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#1-go-to-to-sylabs-website","title":"1. Go to to Sylabs website","text":"

Go to the Sylabs website

How does that look like?

The Sylabs website looks similar to this:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#2-got-to-the-sylabs-singularity-container-services-website","title":"2. Got to the Sylabs Singularity Container Services website","text":"

On the Sylabs website, click 'Products | Singularity Container Services'

Where to click?

Click here:

You will be takes to the 'Singularity Container Services'.

How does that look like?

The Singularity Container Services website looks similar to this:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#3-sign-in-or-sign-up","title":"3. Sign in or sign up","text":"

At the 'Singularity Container Services' website, click 'Sign Up' or 'Sign In'

How does signing in look like?

Signing in looks similar to this:

You are now logged in at the 'Singularity Container Services':

How does that look like?

The Singularity Container Services looks similar to this after logging in:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#4-go-to-the-remote-builder","title":"4. Go to the remote builder","text":"

Click on 'Remote builder'.

Where to click?

Click here:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#5-setup-the-remote-builder","title":"5. Setup the remote builder","text":"

The remote builder shows a Singularity script and some default settings.

How does that look like?

The remote builder's default settings look similar to this:

Make the following changes:

  • paste your Singularity script in the text box
  • change Repository to a valid name (as indicated), for example, as default/my_container
How does that look like?

The remote builder with modified values looks similar to this:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#6-let-the-container-be-built","title":"6. Let the container be built","text":"

Click 'Submit Build'.

Where to click?

Click here:

The building will start.

How does that look like?

A build that has just started looks similar to this:

After a while the building will be done.

How does that look like?

A build that has finished looks similar to this:

","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#7-download-the-container","title":"7. Download the container","text":"

There are multiple ways to download your Singularity container:

  • Download from the website: click on 'View image', then scroll down and click 'Download'
How does that look like?

Click on 'View image' here:

The 'View image' page looks similar to this:

At the 'View image' page, scroll down to find the 'Download' button:

  • Use a singularity pull

For example:

singularity pull library://sven/default/my_container\n
How does that look like?

For example:

$ singularity pull library://pontus/default/sortmerna:3.0.3\nWARNING: Authentication token file not found : Only pulls of public images will succeed\nINFO:    Downloading library image\n 65.02 MiB / 65.02 MiB [=========================================================================================================================================] 100.00% 30.61 MiB/s 2s\n
","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder/#8-use-the-container","title":"8. Use the container","text":"

How to use a container, depends on what it does.

Here are some thing to try:

Run the container without arguments, in the hope of getting a clear error message with instructions:

./my_container.sif\n

Run the container in the hope of seeing its documentation:

./my_container.sif --help\n

Run the container on the local folder, in the hope of getting a clear error message with instructions:

./my_container.sif .\n
","tags":["Singularity","Singularity script","create","build","website","Sylabs","remote builder"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/","title":"Create a Singularity container from a Singularity script using a remote build from Rackham","text":"

There are multiple ways how to create a Singularity container.

This page shows how to do so using a remote build from Rackham.

","tags":["Singularity","Singularity script","create","build","remote builder","Rackham"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/#building-images-on-rackham","title":"Building images on Rackham","text":"

On Rackham, the singularity capabilities are instead provided by Apptainer. The differences are beyond the scope of this material, but you can safely assume you are working with Singularity. Apptainer, also allows you to build containers without sudo/administrative rights. In most of the cases, you can simply start building directly without sudo i.e. singularity build myimage.img examples/ubuntu.def. Here are some precautions that will allow you to safely build images on Rackham.

# Change to fit your account\nPRJ_DIR=/crex/uppmax2022-0-00\n\n# Singularity\nexport SINGULARITY_CACHEDIR=${PRJ_DIR}/nobackup/SINGULARITY_CACHEDIR\nexport SINGULARITY_TMPDIR=${PRJ_DIR}/nobackup/SINGULARITY_TMPDIR\nmkdir -p $SINGULARITY_CACHEDIR $SINGULARITY_TMPDIR\n\n# Apptainer\nexport APPTAINER_CACHEDIR=${PRJ_DIR}/nobackup/SINGULARITY_CACHEDIR\nexport APPTAINER_TMPDIR=${PRJ_DIR}/nobackup/SINGULARITY_TMPDIR\nmkdir -p $APPTAINER_CACHEDIR $APPTAINER_TMPDIR\n\n# Disabling cache completelly - perfect when you only need to pull containers\n# export SINGULARITY_DISABLE_CACHE=true\n# export APPTAINER_DISABLE_CACHE=true\n
","tags":["Singularity","Singularity script","create","build","remote builder","Rackham"]},{"location":"software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/#procedure","title":"Procedure","text":"

The remote builder service provided by Sylabs also supports remote builds through an API. This means you can call on it from the shell at UPPMAX.

Using this service also requires you to register/log in to the Sylabs cloud service. To use this, simply run

singularity remote login SylabsCloud\n

and you should see

Generate an API Key at https://cloud.sylabs.io/auth/tokens, and paste here:\nAPI Key:\n

if you visit that link and give a name, a text-token will be created for you. Copy and paste this to the prompt at UPPMAX. You should see

INFO: API Key Verified!\n

once you've done this, you can go on and build images almost as normal, using commands like

singularity build --remote testcontainer.sif testdefinition.def\n

which will build the container from testdefinition.def remotely and transfer it to your directory, storing it as testcontainer.sif.

Could you give an example script?

A sample job script for running a tool provided in a container may look like

#!/bin/bash -l\n#SBATCH -N 1\n#SBATCH -n 1\n#SBATCH -t 0:30:00\n#SBATCH -A your-project\n#SBATCH -p core\ncd /proj/something/containers\n\nsingularity exec ./ubuntu.img echo \"Hey, I'm running ubuntu\"\nsingularity exec ./ubuntu.img lsb_release -a\nsingularity run ./anotherimage some parameters here\n./yetanotherimage parameters\n
","tags":["Singularity","Singularity script","create","build","remote builder","Rackham"]},{"location":"software/create_singularity_container_from_conda/","title":"Create a Singularity container from conda","text":"

There are multiple ways how to create a Singularity container.

This page shows how to create a Singularity container from a Singularity script that uses conda.

As an example we use a script that build qiime2:

BootStrap: library\nFrom: centos:7\n\n%runscript\n  . /miniconda/etc/profile.d/conda.sh\n  PATH=$PATH:/miniconda/bin\n  conda activate qiime2-2019.7\n  qiime \"$@\"\n\n%post\n  yum clean all\n  yum -y update\n  yum -y install wget python-devel\n  cd /tmp\n  wget https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh\n  bash ./Miniconda2-latest-Linux-x86_64.sh -b -p /miniconda\n  /miniconda/bin/conda update -y conda\n  wget https://data.qiime2.org/distro/core/qiime2-2019.7-py36-linux-conda.yml\n  /miniconda/bin/conda env create -n qiime2-2019.7 --file qiime2-2019.7-py36-linux-conda.yml\n  # OPTIONAL CLEANUP\n  rm qiime2-2019.7-py36-linux-conda.yml\n  /miniconda/bin/conda clean -a\n
","tags":["Singularity","Singularity script","create","build","conda"]},{"location":"software/create_singularity_container_from_docker_pull/","title":"Create a Singularity container from a Docker pull","text":"

There are multiple ways how to create a Singularity container.

This page shows how to create a Singularity container from a Docker pull, such as this one (from here)

docker pull lycheeverse/lychee\n
","tags":["Singularity","Singularity script","create","build","Docker","docker pull","pull"]},{"location":"software/create_singularity_container_from_docker_pull/#procedure","title":"Procedure","text":"Prefer a video?

You can see the procedure below in the video Create a Singularity container from docker pull.

The hardest part of this procedure may be to have Linux with Singularity installed on a computer where you have super-user rights.

In this example, we create a Singularity container for lychee, a tool to check for broken links in text files.

","tags":["Singularity","Singularity script","create","build","Docker","docker pull","pull"]},{"location":"software/create_singularity_container_from_docker_pull/#1-create-the-singularity-container","title":"1. Create the Singularity container","text":"

Here we build a Singularity container from a Docker file:

sudo singularity build my_container.sif [location to Docker file]\n

The magic is in [location to Docker file].

In our case, we have seen the documentation state the command docker pull lycheeverse/lychee to install this Docker container. Using a docker pull like this, means that the Docker script is on Docker Hub. And yes, our Docker script is on Docker Hub!

To build a Singularity container from a Docker file on Docker Hub, do:

sudo singularity build my_container.sif docker:lycheeverse/lychee\n
","tags":["Singularity","Singularity script","create","build","Docker","docker pull","pull"]},{"location":"software/create_singularity_container_from_docker_pull/#2-use-the-singularity-container","title":"2. Use the Singularity container","text":"
./my_container.sif [your command-line arguments]\n

For example, in this case:

./my_container.sif .\n

The . means 'in this folder'.

As, in this example, we have created a Singularity container for lychee, a tool to check for broken links in text files. Hence, the full command can be read as 'Check all files in this folder for broken links'.

","tags":["Singularity","Singularity script","create","build","Docker","docker pull","pull"]},{"location":"software/create_singularity_container_from_dockerhub/","title":"Create a Singularity container from Docker Hub","text":"

There are multiple ways how to create a Singularity container.

This page shows how to create a Singularity container from a Docker script on Docker Hub.

","tags":["Singularity","Singularity script","create","build","Docker Hub"]},{"location":"software/create_singularity_container_from_dockerhub/#procedure","title":"Procedure","text":"

The hardest part of this procedure may be to have Linux with Singularity installed on a computer where you have super-user rights.

In this example, we create a Singularity container for https://github.com/lindenb/jvarkit with a Docker Hub script at https://hub.docker.com/r/lindenb/jvarkit.

","tags":["Singularity","Singularity script","create","build","Docker Hub"]},{"location":"software/create_singularity_container_from_dockerhub/#1-create-the-singularity-container","title":"1. Create the Singularity container","text":"

Here we build a Singularity container from a Docker file:

sudo singularity build my_container.sif docker:[owner/file]\n

The magic is in docker:[owner/file], which for us becomes docker:lindenb/jvarkit:

sudo singularity build my_container.sif docker:lindenb/jvarkit\n

In some case, the Singularity container is now created.

How does that look like?
$ sudo singularity build my_container.sif docker:lindenb/jvarkit\nINFO:    Starting build...\nINFO:    Fetching OCI image...\n28.2MiB / 28.2MiB [================================================================================================================================================] 100 % 2.5 MiB/s 0s\n1.0GiB / 1.0GiB [==================================================================================================================================================] 100 % 2.5 MiB/s 0s\nINFO:    Extracting OCI image...\nINFO:    Inserting Singularity configuration...\nINFO:    Creating SIF file...\nINFO:    Build complete: my_container.sif\n
","tags":["Singularity","Singularity script","create","build","Docker Hub"]},{"location":"software/create_singularity_container_from_dockerhub/#11-troubleshooting","title":"1.1 Troubleshooting","text":"

In our case, however, we get the MANIFEST_UNKNOWN error:

[sudo] password for sven: \nINFO:    Starting build...\nINFO:    Fetching OCI image...\nFATAL:   While performing build: conveyor failed to get: GET https://index.docker.io/v2/lindenb/jvarkit/manifests/latest: MANIFEST_UNKNOWN: manifest unknown; unknown tag=latest\n

This means that Docker Hub cannot conclude with Docker script we want to use exactly. To solve this, we need to find a tag that allows us to find an exact script. On Docker Hub, we can find the tags for our Docker script ar https://hub.docker.com/r/lindenb/jvarkit/tags.

How does that page look like?

Here is how https://hub.docker.com/r/lindenb/jvarkit/tags looks like:

We can see there that 1b2aedf24 is the tag for the latest version.

sudo singularity build my_container.sif docker:lindenb/jvarkit:1b2aedf24\n
How does that look like?
$ sudo singularity build my_container.sif docker:lindenb/jvarkit\nINFO:    Starting build...\nINFO:    Fetching OCI image...\n28.2MiB / 28.2MiB [================================================================================================================================================] 100 % 2.5 MiB/s 0s\n1.0GiB / 1.0GiB [==================================================================================================================================================] 100 % 2.5 MiB/s 0s\nINFO:    Extracting OCI image...\nINFO:    Inserting Singularity configuration...\nINFO:    Creating SIF file...\nINFO:    Build complete: my_container.sif\n

Works!

","tags":["Singularity","Singularity script","create","build","Docker Hub"]},{"location":"software/create_singularity_container_from_dockerhub/#2-use-the-singularity-container","title":"2. Use the Singularity container","text":"
./my_container.sif [your command-line arguments]\n

For example, in this case:

./my_container.sif --help\n

However, this container is setup differently. From the documentation, one find that this container is used as such:

./jvarkit.sif java -jar /opt/jvarkit/dist/jvarkit.jar --help\n
","tags":["Singularity","Singularity script","create","build","Docker Hub"]},{"location":"software/darsync/","title":"Darsync","text":"

Darsync is a tool used to prepare your project for transfer to Dardel. It has two modes; check mode where it goes through your files and looks for uncompressed file formats and counts the number of files, and gen mode where it generates a script file you can submit to Slurm to do the actual data transfer.

The idea is to

  1. Run the check mode and mitigate any problems problems it finds.
  2. Run the gen mode.
  3. Submit the generated script as a job.
flowchart TD\n  check[Check files]\n  generate[Generate script for transferring files safely]\n  submit[Submit script]\n\n  check --> |no errors| generate\n  check --> |errors that need fixing| check\n  generate --> |no errors| submit

The Darsync workflow

Temporarily add a PATH

Until the darsync script is added to the /sw/uppmax/bin folder you will have to add its location to your PATH variable manually:

export PATH=$PATH:/proj/staff/dahlo/testarea/darsync\n
"},{"location":"software/darsync/#tldr","title":"TLDR","text":"

If you know your way around Linux, here is the short version.

# run check\ndarsync check -l /path/to/dir\n\n# fix warnings on your own\n\n# book a 30 day single core job on Snowy and run the rsync command\nrsync -e \"ssh -i ~/.ssh/id_rsa\" -acPuv /local/path/to/files/ username@dardel.pdc.kth.se:/remote/path/to/files/\n
How does that look like?

Running the temporary export gives no output:

[sven@rackham4 ~]$ export PATH=$PATH:/proj/staff/dahlo/testarea/darsync\n

The folder GitHubs is a folder containing multiple GitHub repositories and is chosen as the test subject:

[sven@rackham4 ~]$ darsync check -l GitHubs/\n\n\n   ____ _   _ _____ ____ _  __\n  / ___| | | | ____/ ___| |/ /\n | |   | |_| |  _|| |   | ' /\n | |___|  _  | |__| |___| . \\\n  \\____|_| |_|_____\\____|_|\\_\\\n\nThe check module of this script will recursivly go through\nall the files in, and under, the folder you specify to see if there\nare any improvments you can to do save space and speed up the data transfer.\n\nIt will look for file formats that are uncompressed, like fasta and vcf files\n(most uncompressed file formats have compressed variants of them that only\ntake up 25% of the space of the uncompressed file).\n\nIf you have many small files, e.g. folders with 100 000 or more files,\nit will slow down the data transfer since there is an overhead cost per file\nyou want to transfer. Large folders like this can be archived/packed into\na single file to speed things up.\nGitHubs/git/scripts\n\n\nChecking completed. Unless you got any warning messages above you should be good to go.\n\nGenerate a Slurm script file to do the transfer by running this script again, but use the 'gen' option this time.\nSee the help message for details, or continue reading the user guide for examples on how to run it.\nhttps://\n\ndarsync gen -h\n\nA file containing file ownership information,\ndarsync_GitHubs.ownership.gz\nhas been created. This file can be used to make sure that the\nfile ownership (user/group) will look the same on Dardel as it does here. See https:// for more info about this.\n
NBIS staff test project code

Follow the project application procedure as described here. Request permission to join project NAISS 2023/22-1027

"},{"location":"software/darsync/#check-mode","title":"Check mode","text":"

To initiate the check mode you run Darsync with the check argument. If you run it without any other arguments it will ask you interactive questions to get the information it needs.

# interactive mode\ndarsync check\n\n# or give it the path to the directory to check directly\ndarsync check -l /path/to/dir\n

The warnings you can get are:

"},{"location":"software/darsync/#too-many-uncompressed-files","title":"Too many uncompressed files","text":"

It looks for files with file endings matching common uncompressed file formats, like .fq, .sam, .vcf, .txt. If the combined file size of these files are above a threshold it will trigger the warning. Most programs that uses these formats can also read the compressed version of them.

Examples of how to compress common formats:

# fastq/fq/fasta/txt\ngzip file.fq\n\n# vcf\nbgzip file.vcf\n\n# sam\nsamtools view -b file.sam > file.bam\n# when the above command is completed successfully:\n# rm file.sam\n

For examples on how to compress other file formats, use an internet search engine to look for

how to compress <insert file format name> file\n
"},{"location":"software/darsync/#too-many-files","title":"Too many files","text":"

If a project consists of many small files it will decrease the data transfer speed, as there is an overhead cost to starting and stopping each file transfer. A way around this is to pack all the small files into a single tar archive, so that it only has to start and stop a single time.

Example of how to pack a folder and all files in it into a single tar archive.

# pack it\ntar -czvf folder.tar.gz /path/to/folder\n\n# unpack it after transfer\ntar -xzvf folder.tar.gz\n

Once you have mitigated any warnings you got you are ready to generate the Slurm script that will preform the data transfer.

"},{"location":"software/darsync/#gen-mode","title":"Gen mode","text":"

To generate a transfer script you will need to supply Darsync with some information. Make sure to have this readily available:

  • ID of the UPPMAX project that will run the transfer job, e.g. naiss2099-23-99
    • If you don't remember if, find the name of the project you want to transfer by looking in the list of active project in SUPR.
  • Path to the folder you want to transfer, .e.g. /proj/naiss2099-23-999
    • Either transfer your whole project, or put the files and folder your want to transfer into a new folder in your project folder and transfer that folder.
    • The project's folder on UPPMAX will be located in the /proj/ folder, most likely a folder with the same name as the project's ID, /proj/<project id>, e.g. /proj/naiss2024-23-999. If your project has picked a custom directory name when it was created it will have that name instead of the project ID, e.g. /proj/directory_name. Check which directory name your project has by looking at the project's page in SUPR and look at the field called Directory name:
  • Your Dardel username.
    • You can see your Dardel username in SUPR
  • The path on Dardel where you want to put your data, e.g. /cfs/klemming/projects/snic/naiss2099-23-999
    • Check which project ID you have for your project on Dardel in the list of active project in SUPR.
  • The path to the SSH key you have prepared to be used to login from Rackham to Dardel, e.g. ~/.ssh/id_rsa
    • Check
  • The path to where you want to save the generated transfer script.

To initiate the gen mode you run Darsync with the gen argument. If you run it without any other arguments it will ask you interactive questions to get the information it needs.

# interactive mode\ndarsync gen\n\n\n# or give it any or all arguments directly\ndarsync check -l /path/to/dir/on/uppmax/ -r /path/to/dir/on/dardel/ -A naiss2099-23-99 -u dardel_username -s ~/.ssh/id_rsa -o ~/dardel_transfer_script.sh\n
"},{"location":"software/darsync/#starting-the-transfer","title":"Starting the transfer","text":"

Before you submit the generated transfer script you should make sure everything is in order. You can try to run the transfer script directly on the UPPMAX login node and see if it starts or if you get any errors:

bash ~/dardel_transfer_script.sh\n

If you start see progress reports from rsync you know it works and you can press ctrl+c to stop.

Example of how it can look when it works:

bash darsync_temp.slurm\nsending incremental file list\ntemp/\ntemp/counts\n             10 100%    0,51kB/s    0:00:00 (xfr#4, to-chk=72/77)\ntemp/export.sh\n             13 100%    0,67kB/s    0:00:00 (xfr#5, to-chk=71/77)\ntemp/my_stuff.py\n             70 100%    3,60kB/s    0:00:00 (xfr#7, to-chk=69/77)\ntemp/run.sh\n             52 100%    2,67kB/s    0:00:00 (xfr#8, to-chk=68/77)\ntemp/sequence_tools.py\n            345 100%   17,73kB/s    0:00:00 (xfr#9, to-chk=67/77)\ntemp/similar_sequences.txt\n             24 100%    1,23kB/s    0:00:00 (xfr#10, to-chk=66/77)\ntemp/t.py\n            328 100%   16,86kB/s    0:00:00 (xfr#11, to-chk=65/77)\n

Example of how it can look when it doesn't work:

bash darsync_temp.slurm\nuser@dardel.pdc.kth.se: Permission denied (publickey,gssapi-keyex,gssapi-with-mic).\nrsync: connection unexpectedly closed (0 bytes received so far) [sender]\nrsync error: unexplained error (code 255) at io.c(231) [sender=3.2.7]\n
"},{"location":"software/darsync/#troubleshooting","title":"Troubleshooting","text":"

Apart from getting the username or paths wrong, we foresee that the most common problem will be to get the SSH keys generated, added to the PDC login portal, and adding the UPPMAX ip/hostname as authorized for that SSH key. Please see the PDC user guide on how to set up SSH keys. Once you have your key created and added to the login portal, go to the login portal again and add the address *.uppmax.uu.se to your key to make it work from Rackham.

"},{"location":"software/darsync/#links","title":"Links","text":"
  • darsync GitHub repository
"},{"location":"software/debuggers/","title":"Debuggers","text":"

There are debugging tools provided with each compiler.

  • Allinea DDT: a C, C++ and Fortran debugger
  • ddt: the Allinea DDT C, C++ and Fortran debugger
  • gdb, the GNU debugger: works well with C, C++, and Fortran programs
  • idb, the Intel debugger (obsolete, use gdb instead)
  • [some other compiler]: works well with Fortran90/95 programs
"},{"location":"software/dnabert2/","title":"DNABERT 2","text":"

DNABERT 2 is 'a foundation model trained on large-scale multi-species genome that achieves the state-of-the-art performance on 28 tasks of the GUE benchmark', according to DNABERT 2

DNABERT 2 is not part of the UPPMAX module system.

For UPPMAX staff

Notes on installing and running DNABERT2 on Rackham and Snowy can be found here

"},{"location":"software/dnabert2/#installing-dnabert-2","title":"Installing DNABERT 2","text":"

Run dnabert2_install_on_rackham.sh.

"},{"location":"software/dnabert2/#running-dnabert-2","title":"Running DNABERT 2","text":"

Run dnabert2_run_on_rackham.sh with the example Python script dnabert2_example.py.

"},{"location":"software/dnabert2/#links","title":"Links","text":"
  • DNABERT 2 GitHub repository
"},{"location":"software/doc/","title":"Software-specific documentation","text":""},{"location":"software/emacs/","title":"Emacs","text":"

UPPMAX has multiple text editors available. This page describes the Emacs text editor.

Emacs is an advanced terminal editor that is fast fast and powerful, once you learn it.

"},{"location":"software/emacs/#examples-how-to-use-emacs","title":"Examples how to use Emacs","text":"

Start emacs on a terminal with:

emacs\n

Start emacs to edit a file:

emacs filename\n

Start Emacs keeping you in your terminal window:

emacs \u2013nw\n

Do the editing you want, then save with:

Control-x, Control-s\n

Exit emacs with:

Control-x, Control-c\n

You can read a tutorial in emacs by doing:

Control-h t\n
"},{"location":"software/eog/","title":"eog","text":"

eog is a tool to view images on an UPPMAX cluster.

To be able to see the images, either use SSH with X-forwarding or login to a remote desktop

Usage:

eog [filename]\n

for example:

eog my.png\n
Need an example image to work with?

In the terminal, do:

convert -size 32x32 xc:transparent my.png\n

This will create an empty PNG image.

How does this look like?

"},{"location":"software/filezilla/","title":"FileZilla","text":"

FileZilla connected to Bianca

FileZilla is a free, open-source and cross-platform tool to transfer files.

  • Transfer file to/from Bianca using FileZilla
  • Transfer file to/from Rackham using FileZilla
  • Transfer file to/from Transit using FileZilla
","tags":["FileZilla","transfer","software"]},{"location":"software/finishedjobinfo/","title":"finishedjobinfo","text":"

finishedjobinfo shows information on jobs that have finished, which is useful to help optimize Slurm jobs.

"},{"location":"software/finishedjobinfo/#usage","title":"Usage","text":"
finishedjobinfo\n
How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ finishedjobinfo\n2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15\n2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56\n2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00\n2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00\n2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14\n2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56\n2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15\n2024-10-08 00:00:43 jobid=50661471 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r441 procs=1 partition=core qos=normal jobname=P8913_277.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r441 timelimit=12:00:00 submit_time=2024-10-07T21:07:30 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:43 runtime=02:49:18 margin=09:10:42 queuetime=00:03:55\n2024-10-08 00:00:58 jobid=50661227 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r387 procs=1 partition=core qos=normal jobname=P8913_264.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r387 timelimit=12:00:00 submit_time=2024-10-07T21:07:24 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:58 runtime=02:49:35 margin=09:10:25 queuetime=00:03:59\n2024-10-08 00:01:00 jobid=50661458 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:01:00 runtime=02:49:35 margin=09:10:25 queuetime=00:03:56\n
"},{"location":"software/finishedjobinfo/#show-the-help","title":"Show the help","text":"

To show the help of finishedjobinfo, in a terminal , do:

finishedjobinfo -h\n
How does that look like?

```bash [sven@rackham3 ~]$ finishedjobinfo -h Usage: finishedjobinfo [-h] [-M cluster_name] [-j jobid[,jobid...]] [-m|-y|-s YYYY-MM-DD[/hhss]] [-e YYYY-MM-DD[/hhss]] [project_or_user]... -h Ask for help -M Request data from a named other cluster -j Request data for a specific jobid or jobids (comma-separated) -q Quiet, quick, abbreviated output (no QOS or memory information) -v Verbose, tells a little more -m Start time is start of this month -y Start time is start of this year -s Request a start time (default is a month back in time) -e Request an end time (default is now) Time can also be specified as NOW, TODAY, YYYY, YYYY-MM, YYYY-w, w, hhss, or name of month

Meaning of jobstate: CANCELLED Job was cancelled, before or after it had started COMPLETED Job run to finish, last command gave exit code 0 FAILED Job crashed or at least ended with an exit code that was not 0 NODE_FAIL One of your job nodes experienced a major problem, perhaps your job used all available memory TIMEOUT Job exceeded the specified timelimit and was therefore terminated ````

"},{"location":"software/finishedjobinfo/#show-the-information-about-a-specific-job","title":"Show the information about a specific job","text":"

Use finishedjobinfo -j [job_number] to get information about a specific job, where [job_number] is the job number, for example finishedjobinfo -j 44981366.

How does that look like?

Here is an example output:

[sven@rackham3 ~]$ finishedjobinfo -j 44981366\n2024-02-09 12:30:37 jobid=44981366 jobstate=TIMEOUT username=sven account=staff nodes=r35 procs=1 partition=core qos=normal jobname=run_beast2.sh maxmemory_in_GiB=0.1 maxmemory_node=r35 timelimit=00:01:00 submit_time=2024-02-09T12:27:29 start_time=2024-02-09T12:29:18 end_time=2024-02-09T12:30:37 runtime=00:01:19 margin=-00:00:19 queuetime=00:01:49\n

2024-02-09 12:30:37 jobid=44981366 jobstate=TIMEOUT username=sven account=staff nodes=r35 procs=1 partition=core qos=normal jobname=run_beast2.sh maxmemory_in_GiB=0.1 maxmemory_node=r35 timelimit=00:01:00 submit_time=2024-02-09T12:27:29 start_time=2024-02-09T12:29:18 end_time=2024-02-09T12:30:37 runtime=00:01:19 margin=-00:00:19 queuetime=00:01:49

"},{"location":"software/finishedjobinfo/#how-do-i-find-jobs-that-have-finished-and-took-longer-than-an-hour-and-less-than-a-day","title":"How do I find jobs that have finished and took longer than an hour and less than a day?","text":"
finishedjobinfo | grep \"runtime.[0-9][1-9]\"\n

Press CTRL-C to stop the process: it will take very long to finish.

How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ finishedjobinfo | grep \"runtime.[0-9][1-9]\"\n2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15\n2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56\n2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00\n2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00\n2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14\n2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56\n2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15\n

This output took around 1 second to produce.

"},{"location":"software/finishedjobinfo/#how-do-i-find-jobs-that-have-finished-and-took-longer-than-an-hour","title":"How do I find jobs that have finished and took longer than an hour?","text":"
finishedjobinfo | grep -E \"runtime.([0-9]-)?[0-9][1-9]\"\n

Press CTRL-C to stop the process: it will take very long to finish.

How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ finishedjobinfo | grep -E \"runtime.([0-9]-)?[0-9][1-9]\"\n2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15\n2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56\n2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00\n2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00\n2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14\n2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56\n2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15\n

This output took around 1 second to produce.

"},{"location":"software/finishedjobinfo/#how-do-i-find-jobs-that-have-finished-and-took-longer-than-a-day","title":"How do I find jobs that have finished and took longer than a day?","text":"
finishedjobinfo | grep \"runtime.[0-9]-\"\n

Press CTRL-C to stop the process: it will take very long to finish.

How does that look like?

Your output will look similar to this:

[sven@rackham1 ~]$ finishedjobinfo | grep \"runtime.[0-9]-\"\n2024-10-08 00:01:18 jobid=50597318 jobstate=COMPLETED username=nikolay account=naiss2024-22-35 nodes=r356 procs=20 partition=node qos=normal jobname=168011 maxmemory_in_GiB=5.3 maxmemory_node=r356 timelimit=10-00:00:00 submit_time=2024-10-02T10:36:59 start_time=2024-10-06T21:05:31 end_time=2024-10-08T00:01:18 runtime=1-02:55:47 margin=8-21:04:13 queuetime=4-10:28:32\n2024-10-08 00:21:55 jobid=50597286 jobstate=COMPLETED username=nikolay account=naiss2024-22-35 nodes=r432 procs=20 partition=node qos=normal jobname=1578718 maxmemory_in_GiB=5.3 maxmemory_node=r432 timelimit=10-00:00:00 submit_time=2024-10-02T10:36:10 start_time=2024-10-06T14:32:36 end_time=2024-10-08T00:21:55 runtime=1-09:49:19 margin=8-14:10:41 queuetime=4-03:56:26\n

This output took 30 seconds to produce as there were few jobs at that time that took longer than a day to finish.

"},{"location":"software/games_us/","title":"GAMESS-US user guide","text":"

GAMESS-US versions 20170930 is installed on Rackham. Newer versions can be installed on request to UPPMAX support. Snowy currently lacks GAMESS-US.

"},{"location":"software/games_us/#running-gamess","title":"Running GAMESS","text":"

Load the module using

module load gamess/20170930\n

Below is an example submit script for Rackham, running on 40 cores (2 nodes with 20 cores each). It is essential to specify the project name:

#!/bin/bash -l\n#SBATCH -J jobname\n#SBATCH -p node -n 40\n#SBATCH -A PROJECT\n#SBATCH -t 03:00:00\n\nmodule load gamess/20170930\n\nrungms gms >gms.out\n
"},{"location":"software/games_us/#memory-specification","title":"Memory specification","text":"

GAMESS uses two kinds of memory: replicated memory and distributed memory. Both kinds of memory should be given in the $SYSTEM specification. Replicated memory is specified using the MWORDS keyword and distributed memory with the MEMDDI keyword. It is very important that you understand the uses of these keywords. Check the GAMESS documentation for further information.

If your job requires 16MW (mega-words) of replicated memory and 800MW of distributed memory, as in the example below, the memory requirements per CPU core varies as 16+800/N where N is the number of cores. Each word is 8 bytes of memory, why the amount of memory per core is (16+800/N)*8. The amount of memory per node depends on the number of cores per node. Rackham has 20 cores per node, most nodes have 128 GB of memory, but 30 nodes have 512 GB and 4 nodes at 1 TB.

"},{"location":"software/games_us/#communication","title":"Communication","text":"

For intra-node communication shared memory is used. For inter-node communication MPI is used which uses the InfiniBand interconnect.

"},{"location":"software/games_us/#citing-gamess-papers","title":"Citing GAMESS papers","text":"

It is essential that you read the GAMESS manual thoroughly to properly reference the papers specified in the instructions. All publications using GAMESS should cite at least the following paper:

@Article{GAMESS,\nauthor={M.W.Schmidt and K.K.Baldridge and J.A.Boatz and S.T.Elbert and\nM.S.Gordon and J.J.Jensen and S.Koseki and N.Matsunaga and\nK.A.Nguyen and S.Su and T.L.Windus and M.Dupuis and J.A.Montgomery},\njournal={J.~Comput.~Chem.},\nvolume=14,\npages={1347},\nyear=1993,\ncomment={The GAMESS program}}\n

If you need to obtain GAMESS yourself, please visit the GAMESS website for further instructions.

"},{"location":"software/gaussian/","title":"Gaussian 09 user guide","text":"

A short guide on how to run g09 on UPPMAX.

"},{"location":"software/gaussian/#access-to-gaussian-09","title":"Access to Gaussian 09","text":"

Gaussian 09 is available at UPPMAX. Uppsala University has an university license for all employees. If you want to be able to run g09 email support@uppmax.uu.se and ask to be added to the g09 group.

"},{"location":"software/gaussian/#running-g09","title":"Running g09","text":"

In order to run g09 you must first set up the correct environment. You load this module with:

module load gaussian/g09.d01\n
"},{"location":"software/gaussian/#running-single-core-jobs-in-slurm","title":"Running single core jobs in Slurm","text":"

Here is an example of a submit script for Slurm:

#!/bin/bash -l\n#SBATCH -J g09test\n#SBATCH -p core\n#SBATCH -n 1\n#If you ask for a single core in slurm on Rackham you get 6.4 Gb of memory\n#SBATCH -t 1:00:00\n#SBATCH -A your_project_name\n\nmodule load gaussian/g09.d01\ng09 mp2.inp mp2.out\n

If you run a single core job on Rackham you can't use more than 6.4GB of memory.

When specifying the memory requirements, make sure that you ask for some more memory in the submit-script than in g09 to allow for some memory overhead for the program. As a general rule you should ask for 200MB more than you need in the calculation.

The mp2.inp input file in the example above:

%Mem=800MB\n#P MP2 aug-cc-pVTZ OPT\n\ntest\n\n0 1\nLi\nF 1 1.0\n
"},{"location":"software/gaussian/#scratch-space","title":"Scratch space","text":"

The g09 module sets the environment GAUSS_SCRDIR to /scratch/$SLURM_JOBID in slurm. These directories are removed after the job is finished.

If you want to set GAUSS_SCRDIR, you must do it after module load gaussian/g09.a02 in your script.

If you set GAUSS_SCRDIR to something else in your submit script remember to remove all unwanted files after your job has finished.

If you think you will use a large amount of scratch space, you might want to set maxdisk in your input file. You can either set maxdisk directly on the command line in your input file:

#P MP2 aug-cc-pVTZ SCF=Tight maxdisk=170GB\n

or you can put something like:

MAXDISK=$( df | awk '/scratch/ { print $4 }' )KB\nsed -i '/^#/ s/ maxdisk=[[:digit:]]*KB//' inputfile\nsed -i '/^#/ s/$/ maxdisk='$MAXDISK'/'; inputfile\n

in your scriptfile. This will set maxdisk to the currently available size of the /scratch disk on the node you will run on. Read more on maxdisk in the online manual.

"},{"location":"software/gaussian/#running-g09-in-parallel","title":"Running g09 in parallel","text":"

Gaussian can be run in parallel on a single node using shared memory. This is the input file for the slurm example below:

The dimer4.inp input:

%Mem=3800MB\n%NProcShared=4\n#P MP2 aug-cc-pVTZ SCF=Tight\n\nmethanol dimer MP2\n\n0 1\n6 0.754746 -0.733607 -0.191063\n1 -0.033607 -1.456810 -0.395634\n1 1.007890 -0.778160 0.867678\n1 1.635910 -0.998198 -0.774627\n8 0.317192 0.576306 -0.534002\n1 1.033100 1.188210 -0.342355\n6 1.513038 3.469264 0.971885\n1 1.118398 2.910304 1.819367\n1 0.680743 3.818664 0.361783\n1 2.062618 4.333044 1.344537\n8 2.372298 2.640544 0.197416\n1 2.702458 3.161614 -0.539550\n
"},{"location":"software/gaussian/#running-g09-in-parallel-in-slurm","title":"Running g09 in parallel in slurm","text":"

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcShared link 0 command.

An example submit-script:

#!/bin/bash -l\n#SBATCH -J g09_4\n#SBATCH -p node -n 8\n#SBATCH -t 1:00:00\n#SBATCH -A your_project_name\n\nmodule load gaussian/g09.d01\nexport OMP_NUM_THREADS=1\nulimit -s $STACKLIMIT\n\ng09 dimer4.inp dimer4.out\n

Notice that 8 cores are requested from the queue-system using the line #SLURM -p node -n 8 and that Gaussian is told to use 4 cores with the link 0 command %NProcShared=4. The example above runs about 1.7 times as fast on eight cores than on four, just change in the input file to %NProcShared=8. Please benchmark your own inputs as the speedup depends heavily on the method and size of system. In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:

PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda.

The reason for specifying OMP_NUM_THREADS=1 is to not use the parts of OpenMP in the Gaussian code, but to use Gaussians own threads.

"},{"location":"software/gaussian/#running-g09-in-parallel-with-linda","title":"Running g09 in parallel with linda","text":"

In order to run g09 in parallel over several nodes we have acquired Linda TCP.

"},{"location":"software/gaussian/#running-g09-in-parallel-with-linda-in-slurm","title":"Running g09 in parallel with linda in slurm","text":"

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcLinda and NProcShared link 0 command.

An example submit-script:

#!/bin/bash -l\n#SBATCH -J g09-linda\n#\n#SBATCH -t 2:00:0\n#\n#SBATCH -p node -n 40\n#SBATCH -A your_project_name\n\nmodule load gaussian/g09.d01\nulimit -s $STACKLIMIT\nexport OMP_NUM_THREADS=1\n\n#Next lines are there for linda to know what nodes to run on\nsrun hostname -s | sort -u > tsnet.nodes.$SLURM_JOBID\nexport GAUSS_LFLAGS='-nodefile tsnet.nodes.$SLURM_JOBID -opt \"Tsnet.Node.lindarsharg: ssh\"'\n\n#export GAUSS_SCRDIR=\ntime g09 dimer20-2.inp dimer20-2.out\n\nrm tsnet.nodes.$SLURM_JOBID\n

Here is the input file:

%NProcLinda=2\n%NProcShared=20\n%Mem=2800MB\n#P MP2 aug-cc-pVTZ SCF=Tight\n\nmethanol dimer MP2\n\n0 1\n6 0.754746 -0.733607 -0.191063\n1 -0.033607 -1.456810 -0.395634\n1 1.007890 -0.778160 0.867678\n1 1.635910 -0.998198 -0.774627\n8 0.317192 0.576306 -0.534002\n1 1.033100 1.188210 -0.342355\n6 1.513038 3.469264 0.971885\n1 1.118398 2.910304 1.819367\n1 0.680743 3.818664 0.361783\n1 2.062618 4.333044 1.344537\n8 2.372298 2.640544 0.197416\n1 2.702458 3.161614 -0.539550\n

Notice that 40 cores are requested from the queue-system using the line #SLURM -p node -n 40 and that g09 is told to use 2 nodes via linda with the %NProcLinda=2 link 0 command and 20 cores on each node with the link 0 command %NProcShared=20.

Please benchmark your own inputs as the speedup depends heavily on the method and size of system.

In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:

_ PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda._\n
"},{"location":"software/gaussian/#number-of-cpus-on-the-shared-memory-nodes","title":"Number of CPUs on the shared memory nodes","text":"

Use the information below as a guide to how many CPUs to request for your calculation:

"},{"location":"software/gaussian/#on-rackham","title":"On Rackham","text":"
  • 272 nodes with two 10-core CPUs and 128GB memory
  • 32 nodes with two 10-core CPUs and 256GB memory
"},{"location":"software/gaussian/#on-milou","title":"On Milou","text":"
  • 174 nodes with two 8-core CPUs and 128GB memory
  • 17 nodes with two 8-core CPUs and 256GB memory
  • 17 nodes with two 8-core CPUs and 512GB memory
"},{"location":"software/gaussian/#note-on-chk-files","title":"Note on chk-files","text":"

You may experience difficulties if you mix different versions (g09 and g03) or revisions of gaussian. If you use a checkpoint file (.chk file) from an older revision (say g03 e.01), in a new calculation with revision a.02, g09 may not run properly.

We recommend using the same revision if you want to restart a calculation or reuse an older checkpoint file.

"},{"location":"software/gcc/","title":"GCC/gcc","text":"

GCC is shorthand for 'GNU Compiler Collection', a collection of compilers, where gcc is the name of the actual program.

gcc is part of the gcc module.

How do I find the gcc module?

Like you'd find any module:

module spider gcc\n
Which versions does the gcc module have?

Like you'd find the version of any module:

module spider gcc\n

This will look similar to this:

[sven@rackham2 ~]$ module spider gcc\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  gcc:\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n     Versions:\n        gcc/4.2.3\n        gcc/4.3.0\n        gcc/4.4\n        gcc/4.8.2\n        gcc/4.8.3\n        gcc/4.9.2\n        gcc/4.9.4\n        gcc/5.2.0\n        gcc/5.3.0\n        gcc/5.4.0\n        gcc/5.5.0\n        gcc/6.1.0\n        gcc/6.2.0\n        gcc/6.3.0\n        gcc/6.4.0\n        gcc/7.1.0\n        gcc/7.2.0\n        gcc/7.3.0\n        gcc/7.4.0\n        gcc/8.1.0\n        gcc/8.2.0\n        gcc/8.3.0\n        gcc/8.4.0\n        gcc/9.1.0\n        gcc/9.2.0\n        gcc/9.3.0\n        gcc/10.1.0\n        gcc/10.2.0\n        gcc/10.3.0\n        gcc/11.2.0\n        gcc/11.3.0\n        gcc/12.1.0\n        gcc/12.2.0\n        gcc/12.3.0\n        gcc/13.1.0\n        gcc/13.2.0\n        gcc/13.3.0\n        gcc/14.1.0\n     Other possible modules matches:\n        GCC  GCCcore  gcccuda\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*gcc.*'\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  For detailed information about a specific \"gcc\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider gcc/14.1.0\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n

The GCC can be used to:

  • Compile a C program
  • Compile a C++ program
  • Compile a Fortran program

Working together with GCC are:

  • A debugger called gdb
  • A profiler called gprof
  • A general profiler called Valgrind
"},{"location":"software/gcc_compile_c/","title":"Compile C using GCC","text":"

GCC (shorthand for 'GNU Compiler Collection') is a collection of compilers able to compile multiple different programming languages.

This page describes how to compile C code using the GCC.

"},{"location":"software/gcc_compile_c/#procedure","title":"Procedure","text":""},{"location":"software/gcc_compile_c/#0-create-a-c-source-file","title":"0. Create a C source file","text":"

You will need C code to work on.

In this optional step, a file with a minimal C program is created.

Create and write a C source file called hello_world.c:

nano hello_world.c\n

In nano, write the C program as such:

#include <stdio.h>\n\nint main() {\n  printf(\"hello, world\\n\");\n}\n
"},{"location":"software/gcc_compile_c/#1-load-a-gcc-module","title":"1. Load a GCC module","text":"

Load a recent GCC module:

module load gcc/13.2.0\n
Do I really need to load a module?

No, as there is a system-installed GCC.

For sake of doing reproducible research, always load a module of a specific version.

If you need the C11 or C17 standards, use these module versions or newer:

Module version Description gcc/4.8 Fully implemented C11 standard gcc/8 Fully implemented C17 standard"},{"location":"software/gcc_compile_c/#2-compile-the-source-file","title":"2. Compile the source file","text":"

After saving and closing nano, compile as such:

gcc hello_world.c\n

This compiles the file hello_world.c using all defaults:

  • default/no optimization
  • the executable created is called a.out

To compiles the file hello_world.c with run-time speed optimization and creating an executable with a more sensible name, use:

gcc -O3 -o hello_world hello_world.c\n
  • -O3: optimize for run-time speed
  • -o hello_world: the executable created is called hello_world
"},{"location":"software/gcc_compile_c/#3-run","title":"3. Run","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/gcc_compile_cpp/","title":"Compile C++ using GCC","text":"

GCC (shorthand for 'GNU Compiler Collection') is a collection of compilers able to compile multiple different programming languages.

This page describes how to compile C++ code using the GCC.

"},{"location":"software/gcc_compile_cpp/#procedure","title":"Procedure","text":""},{"location":"software/gcc_compile_cpp/#0-create-a-c-source-file","title":"0. Create a C++ source file","text":"

You will need C++ code to work on.

In this optional step, a file with a minimal C++ program is created.

Create and write a C++ source file called hello_world.cpp:

nano hello_world.c\n

In nano, write the C++ program as such:

#include <iostream>\n\nint main()\n{\n  std::cout << \"hello, world\\n\";\n}\n
"},{"location":"software/gcc_compile_cpp/#1-load-a-gcc-module","title":"1. Load a GCC module","text":"

Load a recent GCC module:

module load gcc/13.2.0\n
Do I really need to load a module?

No, as there is a system-installed GCC.

For sake of doing reproducible research, always load a module of a specific version.

"},{"location":"software/gcc_compile_cpp/#2-compile-the-source-file","title":"2. Compile the source file","text":"

After saving and closing nano, compile as such:

g++ hello_world.cpp\n

This compiles the file hello_world.cpp using all defaults:

  • default/no optimization
  • the executable created is called a.out

To compiles the file hello_world.cpp with run-time speed optimization and creating an executable with a more sensible name, use:

g++ -O3 -o hello_world hello_world.cpp\n
  • -O3: optimize for run-time speed
  • -o hello_world: the executable created is called hello_world
"},{"location":"software/gcc_compile_cpp/#3-run","title":"3. Run","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/gcc_compile_fortran/","title":"Compile Fortran using GCC","text":"

GCC (shorthand for 'GNU Compiler Collection') is a collection of compilers able to compile multiple different programming languages.

This page describes how to compile Fortran code using the GCC.

"},{"location":"software/gcc_compile_fortran/#procedure","title":"Procedure","text":""},{"location":"software/gcc_compile_fortran/#0-create-a-fortran-source-file","title":"0. Create a Fortran source file","text":"

You will need Fortran code to work on.

In this optional step, a file with a minimal Fortran program is created.

Create and write a Fortran source file called hello_world.f:

nano hello_world.f\n

In nano, write the Fortran program as such:

C     HELLO.F :  PRINT MESSAGE ON SCREEN\n      PROGRAM HELLO\n      WRITE(*,*) \"hello, world\";\n      END\n
"},{"location":"software/gcc_compile_fortran/#1-load-a-gcc-module","title":"1. Load a GCC module","text":"

Load a recent GCC module:

module load gcc/13.2.0\n
Do I really need to load a module?

No, as there is a system-installed GCC.

For sake of doing reproducible research, always load a module of a specific version.

"},{"location":"software/gcc_compile_fortran/#2-compile-the-source-file","title":"2. Compile the source file","text":"

After saving and closing nano, compile as such:

gfortran hello_world.f\n

This compiles the file hello_world.f using all defaults:

  • default/no optimization
  • the executable created is called a.out

To compiles the file hello_world.f with run-time speed optimization and creating an executable with a more sensible name, use:

gfortran -Ofast -o hello_world hello_world.f\n
  • -Ofast: optimize for run-time speed, similar to -O3
  • -o hello_world: the executable created is called hello_world
"},{"location":"software/gcc_compile_fortran/#3-run","title":"3. Run","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/gdb/","title":"gdb","text":"

There are many debuggers. This page described gdb, the GNU debugger.

gdb is a debugger provided with the GNU compilers. It works fine for C, C++ and Fortran. With older versions there were problems with fortran90/95.

  • Debugging GCC-compiled programs
  • Debugging Intel-compiled programs
"},{"location":"software/gdb/#debugging-gcc-compiled-programs","title":"Debugging GCC-compiled programs","text":"

In order to use gdb do the following. load a recent gcc module and a gdb module (system gdb is from 2013!).

module load gcc/10.3.0 gdb/11.2\n

compile your program with flags for debugging added, e.g. -ggdb

gcc -ggdb your-program.c -o your-program\n

run the gdb program:

gdb your-program\n

Then you can use the gdb commands, like run, break, step, help, ...

Exit with Ctrl+D.

"},{"location":"software/gdb/#debugging-intel-compiled-programs","title":"Debugging Intel-compiled programs","text":"

In order to use gdb with Intel-compiled programs, do the following\"

Load the icc module

module load intel/20.4\n

Compile your program with flags for debugging added, e.g. -g

icc -g your-program.c -o your-program\n

Run the gdb program:

gdb your-program\n

Then you can use the gdb commands, like run, break, step, help, ...

Exit with Ctrl+D.

"},{"location":"software/gedit/","title":"gedit","text":"

There are many text editors installed on the UPPMAX systems. gedit is one of these.

gedit has a graphical user interface and is included within MobaXterm.

"},{"location":"software/git_on_bianca/","title":"Git on Bianca","text":"

NOTE: This guide assumes you know basic git commands and will not cover how to use git as a tool.

  • One of the security features of Bianca is that there is no internet access from the cluster.
  • This makes it a bit more complicated to use things like Git to collaborate on files.
  • In this guide we will cover two use-cases:

    1. collaborate with other users within the same Bianca project, and
    2. collaborate with other users using Github.
"},{"location":"software/git_on_bianca/#within-the-same-bianca-project","title":"Within the same Bianca project","text":"

Usually an external service like GitHub is used to host a remote repository (repo) that everyone pushes and pulls from. Since we don\u2019t have an internet connection on Bianca we have to push and pull from a location within your Bianca project. Luckily that is simple to setup with git.

To create your own remote repo that everyone will push and pull from, create an empty directory somewhere in your project folder, go into it and initialize the repo.

# go to project dir\ncd /proj/nobackup/sens2023999/\n\n# create dir\nmkdir my_repo.git\n\n# go into dir\ncd my_repo.git\n\n# init repo\ngit init --bare --share=group\n

The name of the created directory doesn\u2019t have to end with .git but it is good for us humans to indicate that this is a repo people will use to push and pull from, and not where you will manually edit files.

To start using this repo you will clone it just like you would clone a GitHub repo.

# go to where you want to clone the repo, e.g. your home\ncd ~/\n\n# clone it\ngit clone /proj/nobackup/sens2023999/my_repo.git\n\n# add a file and make the first commit\necho \"# my_repo\" >> README.md\ngit add README.md\ngit commit -m \"first commit\"\ngit branch -M main\ngit push -u origin main\n

Now you will have a new directory named my_repo that only has a README.md file, and you can start creating other files in there. From this point onwards git will work the same way as if you were using a GitHub hosted repo to collaborate. Once you have pushed your files the others in your project can clone the repo and start pushing and pulling their changes.

"},{"location":"software/git_on_bianca/#using-github-or-any-other-git-hosting-service","title":"Using Github (or any other git hosting service)","text":"

These instructions will work with any git hosting provider, like GitLab or Bitbucket, but we\u2019ll use GitHub in the examples.

In the examples we use Rackham to mount the wharf directory. This is not the only way to do it. If you\u2019d rather use a sftp client to transfer your files from the outside of Bianca to and from the wharf it will work just as well.

"},{"location":"software/git_on_bianca/#cloning-and-pulling-only","title":"Cloning and pulling only","text":"

If you only want to run someone else's software that they have stored in a GitHub repo, you only need to clone the repo to be able to use it. Since you are only a user of the software there is no need to be able to push to the repo. If there are any updates to the repo you only need to pull the repo to get them.

The way to do this on Bianca is to simply clone the repo on a computer with internet access, move it to the Bianca wharf, and then copy it to its final destination on Bianca. If there are any updates to the repo you want to get you move the repo back to the wharf, pull the updates to the mounted wharf directory on Rackham, then move the directory back to its final destination on Bianca.

### on rackham ###\n\n# set variables for readability\nPROJ=sens2023999\nUNAME=youruppmaxusername\n\n# mount the wharf directory\nmkdir -p ~/wharf_mnt\n/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt\n\n# clone the repo to the wharf directory\ncd ~/wharf_mnt\ngit clone git@github.com:example/example.git\n\n### on Bianca ###\n\n# move the directory to its final destination on Bianca\n\nmv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/example/ /proj/$PROJ/\n

If there are any updates to the software you might want to pull the changes from GitHub.

### on bianca ###\n\n# move the directory you cloned from GitHub back to the wharf\nmv /proj/$PROJ/example/ /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/\n\n### on rackham ###\n\n# mount the wharf directory\nmkdir -p ~/wharf_mnt\n/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt\n\n# pull the updates\ncd ~/wharf_mnt/example\ngit pull\n\n### on bianca ###\n\n# move the directory to its final destination on Bianca\nmv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/example/ /proj/$PROJ/\n
"},{"location":"software/git_on_bianca/#pushing-and-pulling","title":"Pushing and pulling","text":"

If you are a collaborator on a software you will need to do both pulling and pushing to the repo

The general approach to using git as a collaborator with GitHub on Bianca is:

  1. On Bianca: make a backup of your code directory.
  2. On Bianca: move the entire code directory to the wharf folder.
  3. On Rackham: mount the wharf directory.
  4. On Rackham: change the git remote URL to GitHub\u2019s URL.
  5. On Rackham: pull and push from GitHub.
  6. On Bianca: move the directory from the wharf back to your project.
  7. On Bianca: change the git remote URL back to your local Bianca repo.
  8. On Bianca: push any changes you got from GitHub to your local Bianca repo.

Best way to show this is by an example:

### on bianca ###\n\n# set variables for readability\nPROJ=sens2023999\nUNAME=youruppmaxusername\n\n# make a copy of your code dir, delete this later if all goes well :)\ncp -ar /proj/$PROJ/code_dir /proj/$PROJ/code_dir.$(date +%Y-%m-%d)\n\n# move the directory with your code to the wharf\nmv /proj/$PROJ/code_dir/ /proj/$PROJ/nobackup/wharf/$UNAME/$UNAME-$PROJ/\n\n### on rackham ###\n\n# set variables for readability\nPROJ=sens2023999\nUNAME=youruppmaxusername\n\n# mount the wharf folder\nmkdir -p ~/wharf_mnt\n/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt\n\n# update the remote repo's URL to your GitHub URL\ncd ~/wharf_mnt/code_dir\ngit remote set-url origin git@github.com:example/example.git\ngit pull\ngit push\n\n### on bianca ###\n\n# move the directory back from the wharf\nmv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/code_dir/ /proj/$PROJ/\n\n# change the remote repo's URL back to your local repo on Bianca\ngit remote set-url origin /path/to/local/repo\n\n# push any changes you got from GitHub to your local repo\ngit push\n
"},{"location":"software/globus/","title":"globus","text":"

Globus is a service to easily and safely transfer data.

However, Uppsala University does not have a subscription.

Does UU have a subscription now?

Please contribute by letting us know. Thanks!

"},{"location":"software/globus/#links","title":"Links","text":"
  • Globus homepage
"},{"location":"software/gprof/","title":"gprof","text":"

There are multiple profilers available on UPPMAX. This page describes gprof.

gprof is the GNU profiler, provided with the GNU compiler package.

In order to use gprof do the following:

Load a recent gcc module and a recent binutils module:

module load gcc\nmodule load binutils\n

Compile your program with the -pg -g flags added

gcc -O0 -pg -g your-program.c -o your-program\n

run it:

./your-program\n

then do:

gprof your-program gmon.out > output-file\n
"},{"location":"software/gromacs/","title":"Running Gromacs at UPPMAX","text":"

This page describes how to run the GROMACS molecular dynamics software on UPPMAX systems. See the gromacs web page for more information.

Have a look on this page as well - best practices running GROMAC on HPC.

Selected setups for benchmarking on HPC2N as examples.

"},{"location":"software/gromacs/#loading-the-gromac-module","title":"Loading the gromac module","text":"
module load gromacs/2021.1.th\n
"},{"location":"software/gromacs/#sbatch-script","title":"SBATCH script","text":"

adapted from HPC2N

#!/bin/bash -l\n#SBATCH -A SNIC_project\n#SBATCH -t 00:15:00\n#SBATCH -p node -n 10\n# Use 2 threads per task\n#SBATCH -c 2\n\nmodule load gromacs/2021.1.th\n\n# Automatic selection of single or multi node based GROMACS\nif [ $SLURM_JOB_NUM_NODES -gt 1 ]; then\n  GMX=\"gmx_mpi\"\n  MPIRUN=\"mpirun\"\n  ntmpi=\"\"\nelse\n  GMX=\"gmx\"\n  MPIRUN=\"\"\n  ntmpi=\"-ntmpi $SLURM_NTASKS\"\nfi\n\n# Automatic selection of ntomp argument based on \"-c\" argument to sbatch\nif [ -n \"$SLURM_CPUS_PER_TASK\" ]; then\n  ntomp=\"$SLURM_CPUS_PER_TASK\"\nelse\n  ntomp=\"1\"\nfi\n# Make sure to set OMP_NUM_THREADS equal to the value used for ntomp\n# to avoid complaints from GROMACS\nexport OMP_NUM_THREADS=$ntomp\n$MPIRUN $GMX mdrun $ntmpi -ntomp $ntomp -s MEM.tpr -nsteps 10000 -resethway\n
"},{"location":"software/gromacs/#how-important-is-to-select-appropriate-options","title":"How important is to select appropriate options","text":"

Here is a simple benchmark ran on single interactive node with 20CPUs using the MEM example from this benchmark:

module load gromacs/2021.1.th\nmpirun -np XX gmx_mpi mdrun -ntomp YY -s MEM.tpr -nsteps 10000 -resethway\n

where XX * YY = 20

$ grep \"gmx_mpi\\|MPI ranks\\|Performance\" *\n\n#md.log.1#:  gmx_mpi mdrun -ntomp 1 -s MEM.tpr -nsteps 10000 -resethway\n#md.log.1#:On 12 MPI ranks doing PP, and\n#md.log.1#:on 8 MPI ranks doing PME\n#md.log.1#:Performance:       20.520        1.170\n\n#md.log.2#:  gmx_mpi mdrun -ntomp 2 -s MEM.tpr -nsteps 10000 -resethway\n#md.log.2#:On 10 MPI ranks, each using 2 OpenMP threads\n#md.log.2#:Performance:       25.037        0.959\n\n#md.log.3#:  gmx_mpi mdrun -ntomp 4 -s MEM.tpr -nsteps 10000 -resethway\n#md.log.3#:On 5 MPI ranks, each using 4 OpenMP threads\n#md.log.3#:Performance:        5.388        4.454\n\n#md.log.4#:  gmx_mpi mdrun -ntomp 5 -s MEM.tpr -nsteps 10000 -resethway\n#md.log.4#:On 4 MPI ranks, each using 5 OpenMP threads\n#md.log.4#:Performance:       24.090        0.996\n\n#md.log.5#:  gmx_mpi mdrun -ntomp 10 -s MEM.tpr -nsteps 10000 -resethway\n#md.log.5#:NOTE: Your choice of number of MPI ranks and amount of resources results in using 10 OpenMP threads per rank, which is most likely inefficient. The optimum is usually between 1 and 6 threads per rank.\n#md.log.5#:On 2 MPI ranks, each using 10 OpenMP threads\n#md.log.5#:Performance:        3.649        6.577\n\nmd.log:  gmx_mpi mdrun -ntomp 20 -s MEM.tpr -nsteps 10000 -resethway\nmd.log:Performance:        2.012       11.931\n

Notice how bad is the last run

$ mpirun -np 1 gmx_mpi mdrun -ntomp 20 -s MEM.tpr -nsteps 10000 -resethway (lines 25-26)

According to this short test, this particular setup runs best on single Rackham node with

$ mpirun -np 10 gmx_mpi mdrun -ntomp 2 -s MEM.tpr -nsteps 10000 -resethway (lines 8-10)

"},{"location":"software/gromacs/#running-older-versions-of-gromacs","title":"Running older versions of gromacs","text":""},{"location":"software/gromacs/#versions-451-to-504","title":"Versions 4.5.1 to 5.0.4","text":"

The gromacs tools have been compiled serially. The mdrun program has also been compiled in parallel using MPI. The name of the parallel binary is mdrun_mpi.

Run the parallelized program using:

mpirun -np XXX mdrun_mpi\n

... where XXX is the number of cores to run the program on.

"},{"location":"software/gromacs/#version-511","title":"Version 5.1.1","text":"

The binary is gmx_mpi and (e.g.) the mdrun command is issued like this:

mpirun -np XXX gmx_mpi mdrun\n
"},{"location":"software/icc/","title":"icc","text":"

There are multiple compilers on the UPPMAX HPC clusters. This page describes icc, an Intel C compiler.

The Intel compiler is part of the intel module and can be used to:

  • Compile a C program

Working together with this Intel compiler are:

  • A debugger called gdb
  • An obsolete debugger called idb
  • Some general profiler called Intel VTune and Intel Advisor
"},{"location":"software/icc_compile_c/","title":"Compile a C program using icc","text":"

icc is the Intel C compiler. This page describes how to compile C code using icc.

"},{"location":"software/icc_compile_c/#procedure","title":"Procedure","text":""},{"location":"software/icc_compile_c/#1-load-an-intel-module","title":"1. Load an intel module","text":"

For version of the Intel compiler to and including 2020, load an intel module with a version having two digits, from 15 to and including 20:

module load intel/20.4\n

C11 and C17 (bug fix) standards have support from intel/17+ (fully from 19).

"},{"location":"software/icc_compile_c/#2","title":"2","text":"

Create and write a C source file called hello_world.c:

nano hello_world.c\n

In nano, write the C program as such:

#include <stdio.h>\n\nint main() {\n  printf(\"hello, world\\n\");\n}\n

After saving and closing nano, compile as such:

icc hello_world.c\n

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/icpc/","title":"icpc","text":"

There are multiple compilers on the UPPMAX HPC clusters. This page describes icpc, an Intel C++ compiler.

The Intel compiler is part of the intel module and can be used to:

  • Compile a C++ program

Working together with this Intel compiler are:

  • A debugger called gdb
  • An obsolete debugger called idb
  • Some general profiler called Intel VTune and Intel Advisor
"},{"location":"software/icpc_compile_cpp/","title":"Compile a C++ program using icpc","text":"

icpc is an Intel C++ compiler. This page describes how to compile C++ code using icpc.

"},{"location":"software/icpc_compile_cpp/#procedure","title":"Procedure","text":""},{"location":"software/icpc_compile_cpp/#1-load-the-modules","title":"1. Load the modules","text":"

Load a recent intel module:

module load intel/20.4\n
"},{"location":"software/icpc_compile_cpp/#2-write-the-c-program","title":"2. Write the C++ program","text":"

Create and write a C++ source file called hello_world.cpp:

nano hello_world.cpp\n

In nano, write the C++ program as such:

#include <iostream>\n\nint main() \n{\n  std::cout << \"hello, world\\n\";\n}\n
"},{"location":"software/icpc_compile_cpp/#3-compile-the-c-program","title":"3. Compile the C++ program","text":"

After saving and closing nano, compile as such:

icpc hello_world.cpp \n
"},{"location":"software/icpc_compile_cpp/#4-run-the-executable","title":"4. Run the executable","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/icx/","title":"icx","text":"

There are multiple compilers on the UPPMAX HPC clusters. This page describes icx, an Intel C compiler.

The Intel compiler is part of the intel module and can be used to:

  • Compile a C program

Working together with this Intel compiler are:

  • A debugger called gdb
  • An obsolete debugger called idb
  • Some general profiler called Intel VTune and Intel Advisor
"},{"location":"software/icx_compile_c/","title":"Compile a C program using icx","text":"

icx is an Intel C compiler. This page describes how to compile C code using icx.

"},{"location":"software/icx_compile_c/#procedure","title":"Procedure","text":""},{"location":"software/icx_compile_c/#1-load-the-modules","title":"1. Load the modules","text":"

load an these modules:

module load intel-oneapi \nmodule load compiler/2023.1.0\n
"},{"location":"software/icx_compile_c/#2-write-the-c-program","title":"2. Write the C program","text":"

Create and write a C source file called hello_world.c:

nano hello_world.c\n

In nano, write the C program as such:

#include <stdio.h>\n\nint main() {\n  printf(\"hello, world\\n\");\n}\n
"},{"location":"software/icx_compile_c/#3-compile-the-c-program","title":"3. Compile the C program","text":"

After saving and closing nano, compile as such:

icx hello_world.c\n
"},{"location":"software/icx_compile_c/#4-run-the-executable","title":"4. Run the executable","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/idb/","title":"idb","text":"

There are many debuggers. This page described idb, the Intel debugger.

idb was provided with the Intel compiler. Now it is deprecated and you are advised to use gdb. to debug programs compiled with the Intel compiler. See gdb how to do so.

"},{"location":"software/ides/","title":"IDE:s","text":"

RStudio is an IDEs. Here, it is run on Bianca.

"},{"location":"software/ides/#introduction","title":"Introduction","text":"

IDE (pronounce aj-dee-ee) is short for 'Integrated Development Environment', or 'a program in which you do programming'. The goal of an IDE is to help develop code, with features such as code completion, code hints and interactive debugging.

There are many different IDEs), of which some are tailored to one programming language (e.g. RStudio) and some allow multiple programming languages.

How to use an IDE depends on the UPPMAX cluster you want to use:

  • IDEs on Bianca
  • IDEs on Rackham

In general, using an IDE is easiest on Rackham and hardest on Bianca.

"},{"location":"software/ides_on_bianca/","title":"IDEs on Bianca","text":"

RStudio is one of the IDEs that can be used on Bianca.

Here we show how to use some IDEs on Bianca.

Forgot what an IDE is?

See at the general page on IDEs here.

Do you really want to use an IDE on Bianca?

Using an IDE on Bianca is cumbersome and there are superior ways to develop code on Bianca.

However, using an IDE may make it easier for a new user to feel comfortable using Bianca.

The UPPMAX 'Programming Formalisms' course will teach you a superior workflow, where development takes place on your own regular computer and testing is done using simulated/fake data. When development is done, the tested project is uploaded to Bianca and setup to use the real data instead.

This avoids using a clumsy remote desktop environment, as well as many added bonuses.

Here are step-by-step guides to start these IDEs on Rackham:

IDE Languages Screenshot Jupyter Python RStudio R VSCode General-purpose Impossible VSCodium General-purpose"},{"location":"software/ides_on_rackham/","title":"IDEs on Rackham","text":"

RStudio is one of the IDEs that can be used on Rackham.

Here we show how to use some IDEs on Rackham.

Forgot what an IDE is?

See at the general page on IDEs here.

Do you really want to use an IDE on Rackham?

Using an IDE on Rackham is cumbersome and there are superior ways to develop code on Rackham.

However, using an IDE may make it easier for a new user to feel comfortable using Rackham.

The UPPMAX 'Programming Formalisms' course will teach you a superior workflow, where development takes place on your own regular computer and testing is done using simulated/fake data. When development is done, the tested project is uploaded to Rackham and setup to use the real data instead.

This avoids using a clumsy remote desktop environment, as well as many added bonuses.

Here are step-by-step guides to start these IDEs on Rackham:

IDE Languages Jupyter Python RStudio R VSCode General-purpose VSCodium General-purpose

IDEs on Rackham. IDEs marked with cannot be run on Rackham.

"},{"location":"software/ifort/","title":"ifort","text":"

ifort is an Intel Fortran compiler.

ifort is part of the intel module and can be used to:

  • Compile a Fortran program
"},{"location":"software/ifort_compile_fortran/","title":"Compile a Fortran program using ifort","text":"

ifort is an Intel Fortran compiler. This page describes how to compile Fortran code using ifort.

"},{"location":"software/ifort_compile_fortran/#procedure","title":"Procedure","text":""},{"location":"software/ifort_compile_fortran/#1-load-the-modules","title":"1. Load the modules","text":"

Load a recent intel module:

module load intel/20.4\n
"},{"location":"software/ifort_compile_fortran/#2-write-the-fortran-program","title":"2. Write the Fortran program","text":"

Create and write a Fortran source file called hello_world.f:

nano hello_world.f\n

In nano, write the Fortran program as such:

C     HELLO.F :  PRINT MESSAGE ON SCREEN\n      PROGRAM HELLO\n      WRITE(*,*) \"hello, world\";\n      END\n
"},{"location":"software/ifort_compile_fortran/#3-compile-the-c-program","title":"3. Compile the C++ program","text":"

After saving and closing nano, compile as such:

ifort hello_world.f\n
"},{"location":"software/ifort_compile_fortran/#4-run-the-executable","title":"4. Run the executable","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/ifx/","title":"ifx","text":"

ifx is an Intel Fortran compiler.

ifx is part of the intel-oneapi and then compiler module.

ifx can be used to:

  • Compile a Fortran program
"},{"location":"software/ifx_compile_fortran/","title":"Compile a Fortran program using ifx","text":"

ifx is an Intel Fortran compiler. This page describes how to compile Fortran code using ifx.

"},{"location":"software/ifx_compile_fortran/#procedure","title":"Procedure","text":""},{"location":"software/ifx_compile_fortran/#1-load-the-modules","title":"1. Load the modules","text":"

Load a recent intel module:

module load intel-oneapi \nmodule load compiler/2023.1.0\n
"},{"location":"software/ifx_compile_fortran/#2-write-the-fortran-program","title":"2. Write the Fortran program","text":"

Create and write a Fortran source file called hello_world.f:

nano hello_world.f\n

In nano, write the Fortran program as such:

C     HELLO.F :  PRINT MESSAGE ON SCREEN\n      PROGRAM HELLO\n      WRITE(*,*) \"hello, world\";\n      END\n
"},{"location":"software/ifx_compile_fortran/#3-compile-the-c-program","title":"3. Compile the C++ program","text":"

After saving and closing nano, compile as such:

ifx hello_world.f\n
"},{"location":"software/ifx_compile_fortran/#4-run-the-executable","title":"4. Run the executable","text":"

Run the program:

./a.out \n

Output:

hello, world\n
"},{"location":"software/igv/","title":"Starting Integrative Genomics Viewer (IGV) on Rackham/Snowy","text":"

This guide will go through step by step how to start Integrative Genomics Viewer.

"},{"location":"software/igv/#step-1-connect-to-uppmax-with-x-forwarding-enabled-important-step","title":"Step 1: Connect to UPPMAX with X-forwarding enabled. (Important step)","text":"

In a terminal, use SSH with X forwarding enabled:

ssh -X [user name]@rackham.uppmax.uu.se\n

For example:

ssh -X sven@rackham.uppmax.uu.se\n
  • Windows users: we recommend the SSH client MobaXterm
  • MacOS users: the built-in SSH client ssh does need XQuartz installed too
"},{"location":"software/igv/#step-2-reserve-a-node-using-interactive","title":"Step 2: Reserve a node using \"interactive\"","text":"

Since genomic sequences require lots of memory, it is not suitable to run IGV on one of the login nodes. That would slow down the response times for all other users on the same login node..

Instead, reserve a node that you will have all by yourself. This command will reserve a whole node for 12 hours, the maximum amount of interactive time you can get and still receive a high priority for your job (feel free to change that if you want to).

interactive -A [UPPMAX project id] -p node -t 12:00:00\n

For example:

interactive -A snic2017-7-274 -p node -t 12:00:00\n

For interactive session on Snowy add the flag \"-M snowy\":

interactive -A snic2017-7-274 -M snowy -p node -t\\ 12:00:00\n
"},{"location":"software/igv/#step-3-load-the-igv-module","title":"Step 3: Load the IGV module","text":"

When your job has been started, type the following command to load the IGV module:

module load bioinfo-tools IGV\n
"},{"location":"software/igv/#step-4-start-igv","title":"Step 4: Start IGV","text":"

To start IGV, type the following:

igv-node\n

That's it, now IGV should be loaded and ready to go. For more information about how to use IGV, please visit IGV's user guide.

"},{"location":"software/install/","title":"Software and package installation","text":""},{"location":"software/install/#install-software-yourself","title":"Install software yourself","text":""},{"location":"software/install/#build-from-source","title":"Build from source","text":"
  • To build from source use a compiler module
  • We have several compiler versions from GNU and INTEL
  • check with: $ ml avail gcc and $ ml avail intel
  • Guide for compiling serial programs
  • Guide for compiling parallel programs
    • Available combinations of compilers and parallel libraries
"},{"location":"software/install/#example","title":"Example","text":"

This guide might not work on all programs. Read the installation instructions for your program!

  • Download the program, with wget or by other means like git clone <https-URL to GITHUB repo>.
  • If the not cloning, unpack it with tar, gunzip or similar.
tar xvfz program.tgz\n

The below is more general again:

  • Read the installation instructions!
  • If Fortran or C or C++, load a compiler. Often you'll have less problems with gcc but intel gives faster code.
module load gcc\n
  • If applicable, do:
mkdir $HOME/glob/program_name\n./configure --prefix=$HOME/glob/program_name\nmake\nmake test\nmake install\n
  • Try to find a test on the home page of the program or in the installation instructions and try to run it.
"},{"location":"software/install/#packages-and-libraries-to-scripting-programs","title":"Packages and libraries to scripting programs","text":"
  • Python, R and Julia all have some centrally installed packages that are available from the modules.
  • R has a special module called R_packages, and some Machine Learning python packages are included in the python_ML_packages module.
  • If not found there you can try to install those by yourself.

Tip Python packages

  • Try Conda first directly on Bianca and PyPI on Rackham.
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.
  • If you want to keep number of files down, use PyPI (pip).
  • Also it is easier to get conflicting environments if using both Python module and Conda in parallel.
"},{"location":"software/install/#conda","title":"Conda","text":"
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.

Available Conda channels

  • bioconda
  • biocore
  • conda-forge
  • dranew
  • free
  • main
  • pro
  • qiime2
  • r
  • r2018.11
  • scilifelab-lts
  • Conda user guide
"},{"location":"software/install/#python-packages-with-pip","title":"Python packages with pip","text":"
  • Installing with pip
"},{"location":"software/install/#r-packages","title":"R packages","text":"
  • On UPPMAX the module R_packages is an omnibus package library containing almost all packages in the CRAN and BioConductor repositories.
  • As of 2023-05-31, there were a total of 23100 R packages installed in R_packages/4.2.1.

    • A total of 23109 packages were available in CRAN and BioConductor, and 23000 of these were installed in R_packages/4.2.1
    • The additional 100 R packages available in this module were installed from the CRAN/BioConductor archives, or were hosted on github, gitlab or elsewhere.
  • Installing R packages

"},{"location":"software/install/#julia-packages","title":"Julia packages","text":"
  • At UPPMAX there is a central library with installed packages.
  • This is good, especially when working on Bianca, since you then do not need to install via the wharf.
  • A selection of the Julia packages and libraries installed on UPPMAX are:

    CSV\nCUDA\nMPI\nDistributed\nIJulia\nPlots\nPyPlot\nDataFrames\n
  • Installing julia packages

"},{"location":"software/install/#containers","title":"Containers","text":"

Info

  • Containers let you install programs without needing to think about the computer environment, like
    • operative system
    • dependencies (libraries and other programs) with correct versions
  • Everything is included
  • Draw-backs
    • you install also things that may be already installed
    • therefore, probably more disk space is needed
"},{"location":"software/install/#singularity","title":"Singularity","text":"

See the UPPMAX Singularity user guide:

  • Create a Singularity container for an R package
"},{"location":"software/install/#docker","title":"Docker","text":"

Docker will unfortunately not work on the clusters, since it requires root permission.

However, it is possible to convert a Docker script to a Singularity container.

"},{"location":"software/install/#spack","title":"Spack","text":"
  • The UPPMAX staff has already other ways to install most software applications.
  • Please use Spack only if other ways to install your tool is not possible or very difficult, e.g. requiring very many dependencies and it is not available through, e.g. EasyBuild.
  • UPPMAX Spack user guide
"},{"location":"software/install/#own-development","title":"Own development","text":"
  • You may have your own code that you want to run on UPPMAX.
  • Guide for compiling serial programs
  • Guide for compiling parallel programs
    • Available combinations of compilers and parallel libraries
  • User guide for debuggers
  • User guide for profilers
"},{"location":"software/install/#run-own-scripts-or-programs","title":"Run own scripts or programs","text":"

Unless your script or program is in the active path, you run it by the full path or ./<file> if you are in the present directory.

"},{"location":"software/install/#summary","title":"Summary","text":"

Keypoints

  • You have got an overview of the procedures to install packages/libraries and tools on Bianca through the wharf
  • If you feel uncomfortable or think that many users would benefit from the software, ask the support to install it.
"},{"location":"software/intel_advisor/","title":"Intel Advisor","text":"

There are multiple profilers available on UPPMAX. This page describes Intel Advisor.

Intel Advisor is a broad set of tools with a focus on performance analysis of Intel-compiled code.

Intel's performance analysis suite can probably answer any question you have about the performance of your code, including MPI and OpenMP code.

In order to use Advisor, do the following:

module load intel-oneapi advisor\n

Making sure you have a graphical connection through SSH X-forwarding or ThinLinc, then run Advisor graphically like this:

advixe-gui\n
"},{"location":"software/intel_vtune/","title":"Intel VTune","text":"

There are multiple profilers available on UPPMAX. This page describes Intel VTune.

Intel VTune is a broad set of tools with a focus on performance improvement of Intel-compiled code.

Intel's performance analysis suite can probably answer any question you have about the performance of your code, including MPI and OpenMP code.

VTune is focused choosing optimizing techniques that will yield good results, whereas Amplifier is more broadly aimed at performance analysis.

In order to use VTune do the following:

module load intel-oneapi vtune\n

Making sure you have a graphical connection through SSH X-forwarding or ThinLinc, then run VTune graphically like this:

vtune-gui\n
"},{"location":"software/interactive/","title":"interactive","text":"

The job scheduler consists of many programs to manage jobs. interactive is the program to request to work interactively on a compute node.

See the UPPMAX guide on how to start an interactive node.

","tags":["interactive"]},{"location":"software/ipython/","title":"IPython","text":"

IPython is a console program that extends the regular Python interpreter: among others, one can directly run scripts and re-use output.

Want to see a video?

Here are some videos on IPython:

  • YouTube video on python versus IPython
  • YouTube video on IPython

After loading a Python module, you also have the IPython Python command shell available.

Forgot how to load a Python module?

See the UPPMAX page about Python here.

What is a Python command shell?

In computing, a shell is a a program around something, for example, Bash is a shell around a UNIX-like operating system.

In computing, a command shell means that the shell is a command-line shell, i.e. text only.

A Python command shell, hence, is a text-only program around Python.

Start the IPython command shell by typing:

ipython\n

or (for explicit Python 3):

ipython3\n

The ipython3 prompt looks like this:

[sven@rackham1 ~]$ ipython\nPython 3.11.4 (main, Aug  7 2023, 16:05:58) [GCC 12.2.0]\nType 'copyright', 'credits' or 'license' for more information\nIPython 8.14.0 -- An enhanced Interactive Python. Type '?' for help.\n\nIn [1]:\n

IPython allows one to write code interactively.

For example, in IPython, type:

print('Hello, world!')\n

and IPython will show the result of that line of code.

IPython can interact with your file system.

How does IPython interact with the file system?

For example, within IPython, running ...

```python ls ````

... displays a list of files in your current working folder in the same way as Bash's ls.

The Python interpreter will give an error if you do the same.

IPython has an auto-complete triggered by Tab.

How do I get auto-complete?

As an example, writing this line of code in IPython ...

s = 'Hello, world!'\n

... and press enter. Now a variable called s will hold some text.

Now type ...

s.\n

and press Tab. You will see a menu of things you can do with that string. Hold tab to scroll through the many options.

IPython can show graphics.

How do I get IPython to show graphics?

In IPython, run this code line-by-line:

import matplotlib.pyplot as plt\nplt.plot([1, 2, 3, 4])\nplt.show()\n

(or as a one-liner: import matplotlib.pyplot as plt; plt.plot([1, 2, 3, 4]); plt.show())

You will see a window appear:

You will only see a window appear, if you've logged in to Rackham with SSH with X forwarding enabled.

Spoiler to login: ssh -X sven@rackham.uppmax.uu.se.

Spoiler to confirm: run xeyes.

IPython can directly run scripts.

How do I get IPython to directly run scripts?

In IPython, run:

run [filename]\n

where [filename] is the name of a file, for example:

run my_script.py\n

IPython will run the script and remember variables, functions and classes created in that script.

"},{"location":"software/javac/","title":"javac","text":"

javac is a Java compiler.

javac is part of the java module and can be used to:

  • Compile a Java program
"},{"location":"software/javac_compile_java/","title":"Compile Java programs using javac","text":"

javac is a Java compilers.

This page describes how to compile Java code using javac.

"},{"location":"software/javac_compile_java/#procedure","title":"Procedure","text":""},{"location":"software/javac_compile_java/#1-load-a-gcc-module","title":"1. Load a GCC module","text":"

Before compiling a java program, the java module has to be loaded. To load the java module, enter the command:

module load java\n
"},{"location":"software/javac_compile_java/#2-create-a-java-source-file","title":"2. Create a Java source file","text":"

Create and write a Java source file called hello_world.java:

nano hello_world.java\n

In nano, write the Java program as such:

class hello_world\n{\n  public static void main(String[] args)\n  {\n    System.out.println(\"hello, world\");\n  }\n}\n
"},{"location":"software/javac_compile_java/#3-compile-the-source-file","title":"3. Compile the source file","text":"

To compile, enter the command:

javac hello_world.java\n
"},{"location":"software/javac_compile_java/#4-run","title":"4. Run","text":"

to run, enter:

java hello_world\n
"},{"location":"software/jobinfo/","title":"jobinfo","text":"

jobinfo is a tool.

"},{"location":"software/jobinfo/#what-do-the-fields-priority-and-reason-mean-in-jobinfo-output","title":"What do the fields PRIORITY and REASON mean in \"jobinfo\" output?","text":"For staff

IG: Running jobs FAQ/Your priority...

"},{"location":"software/jobinfo/#initial-priority-at-submit-time","title":"Initial priority, at submit time","text":"

One of the columns in \"jobinfo\" output is named PRIORITY. The queue is sorted on priority, i.e. normally the job with the highest priority starts first, so this is an important parameter.

When you submit a job at UPPMAX, it gets an initial priority. Normally this is 100000, but some jobs start at a priority of 200000 or more:

  • On a limited amount of nodes, a group of people get a higher priority, due to e.g. that they have funded those nodes.
  • Jobs that have asked for the interactive priority, with the flag \"--qos=interact\". This is for one-node jobs with a timelimit of at most 12 hours.
  • Jobs that have asked for the short-job priority, with the flag \"--qos=short\". This is for jobs of from one to four nodes, with a timelimit of at most 15 minutes.

When your project has overdrafted its 30 days running core hour allocation, the jobs within your project get a low initial priority of 70000 or less. These jobs are named bonus jobs. Instead of disallowing them from running, they are allowed to start, if there are free resources, when all higher-priority jobs have started. For each 10000 more core hours, that the project overdrafts its allocation, the priority gets lowered by 10000 more. The bottom value is 10000, i.e. a bonus job can start queuing with any of the following priorities, depending on how big the overdraft is: 70000, 60000, 50000, 40000, 30000, 20000, or 10000.

For every minute waiting in queue, a job gets a priority increase of approximately one, up to a waiting time of 14 days.

Now the waiting for each kind of jobs will be described: For high-priority jobs, bonus jobs and normal jobs.

"},{"location":"software/jobinfo/#high-priority-job","title":"High-priority job","text":"

Getting a high priority, i.e. a priority higher than 210000, already at submit time, this job will probably start quickly.

The priority value will slowly increase, for each minute passing, until the job starts.

"},{"location":"software/jobinfo/#bonus-job","title":"Bonus job","text":"

Getting a low priority already at submit time, this job may have to wait a long time before starting. It is very difficult to estimate the waiting time, because all new high-priority and normal jobs will have a higher priority.

At night or during next weekend, this job may be lucky and start. Waiting long enough, the monthly allocation of the project will not be overdrafted any longer, and the job automatically converted to a normal job.

The priority value will slowly increase, for each minute passing, until the job starts.

Once the job has started, it will be treated like any other job.

"},{"location":"software/jobinfo/#normal-job","title":"Normal job","text":"

A normal job, starting at priority 100000, increases slowly in priority and may eventually start at a priority a little above 100000.

But more likely, something else will happen to it before that: It will be elevated to a higher starting priority: 190000. At the same time it loses the extra priority it accumulated while waiting at the priority 100000 level.

Only normal jobs, will be elevated like this, and only one job or a few jobs for each user may be elevated at the same time.

The reason for the elevated level, is to give each user a fair chance to start at least one job within a reasonable time, even if other users have thousands of jobs already waiting in queue. The job start time will not depend mainly on the number of jobs that are waiting, but instead on the number of other users that are waiting.

At least one job for each user are permitted to wait at the elevated level. Up to 64 jobs for each user are permitted there, if they are very small. Every five minutes the system will try to elevate more jobs and every five minutes each old, elevated job gets five additional priority points.

Once the job has been elevated, its START_TIME approximations will be much more accurate. The main risk for a later start, is that someone submits new, high-priority jobs. On the other hand, running jobs usually terminate earlier than what their timelimit suggests.

Here is a detailed description on how jobs are picked for elevation:

  • Jobs are picked strictly in order of priority.
  • A job is not elevated, if its timelimit does not allow it to finish before next planned maintenance stop.
  • At least one job per user is elevated, regardless of size and regardless of the two limitations mentioned below in this list.
  • The elevated jobs of a user must not together ask for more than 64 cores.
  • The elevated jobs of a user must not together ask for more than 2688 core hours, i.e. 112 core days.
"},{"location":"software/jobinfo/#how-does-slurm-decide-what-job-to-start-next","title":"How does Slurm decide what job to start next?","text":"

When there are free nodes, an approximate model of Slurm's behaviour is this:

  • Step 1: Can the job in position one start now?
  • Step 2: If it can, remove it from the queue, start it, and continue with step 1.
  • Step 3: If it can not, look at next job.
  • Step 4: Can it start now, without risking that the jobs before it in the queue get a higher START_TIME approximation?
  • Step 5: If it can, remove it from the queue, start it, recalculate what nodes are free, look at next job and continue with step 4.
  • Step 6: If it can not, look at next job, and continue with step 4.

As soon as a new job is submitted and as soon as a job finishes, Slurm restarts with step 1, so most of the time only jobs at the top of the queue are tested for the possibility to start it. As a side effect of this restart behaviour, START_TIME approximations are normally NOT CALCULATED FOR ALL JOBS.

"},{"location":"software/jobinfo/#more-about-other-jobinfo-columns-for-waiting-jobs","title":"More about other jobinfo columns for waiting jobs","text":"

Until now, we have looked into the PRIORITY and USER columns. Let us talk about some of the others, for waiting jobs:

  • JOBID: This is the best way to identify a job in a unique way. If you succeed to submit a job, it gets a jobid. The jobid of your finished jobs can be found with the finishedjobinfo command.
  • POS: This is a numbering of the lines, by jobinfo, after sorting with PRIORITY as first key and JOBID as the second. This is an approximation of the queue position.
  • PARTITION: A Slurm partition is a set of compute nodes, together with some rules about how jobs must be handled, if they ask for this partition. An UPPMAX cluster normally sports the \"devel\", \"core\" and \"node\" partitions.
  • NAME: This is the job name, specified at submission time with the \"-J\" or \"--job-name\" flag. This name can help you to keep track of what the job was meant to do.
  • ACCOUNT: The specified project name, to keep track of how many core hours each project has needed. The projinfo command sums up those core hours.
  • ST: Means status. Status \"PD\" means pending (waiting), status \"R\" means running, status \"CG\" means completing (the job has finished, but the clean-up after the job is not finished yet).
  • START_TIME: An estimation about when the job will start, if all jobs run until the end of their timelimit. You can make guesses about when nodes gets free also by looking at the TIME_LEFT column of running jobs. Slurm computes START_TIME only when it needs the information, i.e. you can not find that information for all jobs.
  • TIME_LEFT: The specified timelimit for the job. When getting near to a maintenance stop, long jobs can not start, because they may not finish before the maintenance stop starts.
  • REASON: There are a number of possible reasons for a job not to have started yet. Some are explained here:
    • AssociationJobLimit: probably means that the job never will start, because it breaks some system limit, set by UPPMAX.
    • BeginTime: says that the user has specified that the job must not start until some specified time in the future.
    • Dependency: means that the job will not start until some special other job(s) has (have) finished.
    • JobHeldAdmin: means that some systems administrator has told that the job must not start.
    • JobHeldUser: means that the job owner has told that job must not start.
    • None: might mean that Slurm has not yet had time to put a reason there.
    • Priority, ReqNodeNotAvail, and Resources: are the normal reasons for waiting jobs, meaning that your job can not start yet, because free nodes for your job are not found.
    • QOSResourceLimit: means that the job has asked for a QOS and that some limit for that QOS has been reached. The job can not start as long as the limit still is reached.
    • QOSJobLimit: probably means that the job can never start, because it breaks some system limit, set by UPPMAX.
  • FEATURES: There are quite a few of these and some are explained here:
    • null: means that no special features have been asked for.
    • fat: means that a fat node (a node with a more-than-standard -- for this cluster -- amount of memory) is needed.
    • null: means that no special features have been asked for.
    • thin: means that a standard (i.e. non-fat) node must be used, and this feature is automatically set for most jobs with no memory requirements and a high timelimit, so the job will not unnecessarily hog a fat node for a long time.
"},{"location":"software/jobstats/","title":"jobstats","text":"

An example plot produced by jobstats

jobststats is an UPPMAX tool to enable discovery of resource usage for jobs submitted to the Slurm job queue.

At this page, it is described:

  • jobstats --plot: How to use is jobstats --plot to see resource use in a graphical plot
  • Efficient use: How to use your resources efficiently
  • Examples: Examples of ineffective resource use plots
  • Other jobstats functionality
    • Using jobstats --help
"},{"location":"software/jobstats/#jobstats-plot","title":"jobstats --plot","text":"

With the --plot (or -p) option, a plot is produced showing the resource use per node for a job that completed successfully and took longer than 5 minutes.

There are many ways to use --plot, a minimal use could be:

jobstats --plot [job_id]\n

for example:

jobstats --plot 12345678\n

The produced plot will be produced in the local folder with name [cluster_name]-[project_name]-[user_name]-[jobid].png, for example rackham-uppmax1234-sven-876543.png. Use any image viewer, e.g. eog to see it.

Each plot shows:

  • detailed information in the title.
  • CPU usage in blue
  • current memory usage in solid black
  • overall memory usage in dotted black (if available)
"},{"location":"software/jobstats/#interpreting-a-plot","title":"Interpreting a plot","text":"

For example, in this plot:

  • the title shows the detailed info. milou is the name of a former UPPMAX cluster.
  • CPU usage in blue, which is around 1000%, which is the equivalent of 10 cores being used 100%
  • current memory usage in solid black, which is around 20 GB (left-side vertical axis) or a little bit less than 1 core (right-side vertical axis)
  • overall memory usage in dotted black, which is around 340 GB (left-side vertical axis) or a little bit less than 11 cores (right-side vertical axis)

For jobs running on multiple nodes, plots have multiple columns:

Some plots shows suggestions in red:

Text in plot Description nodes_overbooked : nodes booked : nodes used: More nodes were booked than used overbooked : % used The maximum percentage of booked cores and/or memory that was used (if < 80%) !!half_overbooked No more than one-half of both cores and memory of a node was used; consider booking half a node instead. !!severely_overbooked No more than one-quarter of both cores and memory of a node was used, examine your job requirements closely. !!swap_used Swap storage was used at any point within the job run node_type_overbooked : type booked : type used: A fat node was requested that was larger than was needed. This flag may be produced spuriously if Slurm ran the job on a fat node when a fat node was not requested by the user. cores_overbooked : cores booked : cores used: More cores were booked than used (if < 80%) mem_overbooked : GB booked : GB used: More memory was available than was used (if < 25% and more than one core). core_mem_overbooked : GB in used cores : GB used: Less memory was used than was available in the cores that were used (if < 50%).

In this example plot, however, the setup is considered good enough.

"},{"location":"software/jobstats/#determine-efficient-use","title":"Determine efficient use","text":"

To determine if you efficiently use UPPMAX resources, follow this chart:

flowchart TD\n  blue_line_close_to_top[CPU usage maximum above 80%?]\n  black_line_close_to_top[Memory usage maximum above 80%?]\n  can_decrease_number_of_cores[Can the number of cores be decreased?]\n  decrease_number_of_cores(Decrease the number of cores)\n  done(Done)\n  blue_line_close_to_top --> |yes| done\n  blue_line_close_to_top --> |no| black_line_close_to_top\n  black_line_close_to_top --> |yes| done\n  black_line_close_to_top --> |no| can_decrease_number_of_cores\n  can_decrease_number_of_cores --> |yes| decrease_number_of_cores\n  can_decrease_number_of_cores --> |no| done

If not, follow the strategy at efficient use

"},{"location":"software/jobstats/#efficient-use","title":"Efficient use","text":"

Here is a strategy to effectively use your UPPMAX resources:

flowchart TD\n  lower_limit_based_on_memory(Pick the number of cores to have enough memory)\n  limited_by_cpu(For that amount of cores, would runtime by limited by CPU?)\n  lower_limit_based_on_cpu(Increase the number of cores, so that on average, the right amount of CPUs is booked)\n\n  done(Use that amount of cores)\n\n  add_one(Increase the number of cores by one for safety)\n\n  lower_limit_based_on_memory --> limited_by_cpu\n  limited_by_cpu --> |no| add_one\n  limited_by_cpu --> |yes| lower_limit_based_on_cpu\n  lower_limit_based_on_cpu --> done\n  add_one --> done
Why not look at CPU usage?

Because CPU is more flexible.

For example, imagine a job with a short CPU spike, that can be processed by 16 CPUs. If 1 core of memory is enough, use 1 core or memory: the spike will be turned into a 100% CPU use (of that one core) for a longer duration.

Need a worked-out example?

Pick the number of cores to have enough memory

The dotted black line hits the right-hand vertical axis at 1070%. This means that 11 cores (i.e. 1100%) would be enough for this job.

For that amount of cores, would runtime by limited by CPU?

The answer is 'no'. Having 11 cores would mean that most of the time only 10 are used. Only in the CPU spike at the end, the runtime is limited by CPU. This short time only has a minor impact on the runtime speed.

Increase the number of cores by one for safety

This means booking 12 cores is recommended.

Need another worked-out example?

Pick the number of cores to have enough memory

The dotted black line hits the right-hand vertical axis at 90%. This means that 1 core (i.e. 100%) would be enough for this job.

For that amount of cores, would runtime by limited by CPU?

The answer is 'yes'. Having 1 core would mean that around half the time there is too little CPU power. This has an effect.

Increase the number of cores, so that on average the right amount of CPUs are booked

This is around 8 cores (800%), as with that amount of cores:

  • half of the time, there is 1 out of 7 cores booked, that is 6 too much
  • half of the time, there is 7 out of 13 cores booked, that is 6 too little

This is not an exact algorithm and all numbers from 2 to 9 cores can be considered okay.

Sometimes, however, it is inevitable to use resources inefficiently, see the examples below

No queue is possible

If everyone followed these guidelines, there would probably not even be a queue most of the time!

"},{"location":"software/jobstats/#examples","title":"Examples","text":"

Here are some examples of how inefficient jobs can look and what you can do to make them more efficient.

"},{"location":"software/jobstats/#inefficient-job-example-1-booking-too-much-cores","title":"Inefficient job example 1: booking too much cores","text":"

Here booking 5 cores is considered okay.

Pick the number of cores to have enough memory

The dotted black line hits the right-hand vertical axis at 390%. This means that 4 cores (i.e. 400%) would be enough for this job.

For that amount of cores, would runtime by limited by CPU?

The answer is 'no'. Having 4 cores would mean that most of the time only 1 are used. Only for some CPU spikes, the runtime is limited by CPU. This short time only has a minor impact on the runtime speed.

Increase the number of cores by one for safety

This means booking 5 cores is recommended.

"},{"location":"software/jobstats/#inefficient-job-example-2-booking-too-much-cores","title":"Inefficient job example 2: booking too much cores","text":"

This is one of the grayer areas: booking 2-9 cores is all considered reasonable.

Pick the number of cores to have enough memory

The dotted black line hits the right-hand vertical axis at 90%. This means that 1 core (i.e. 100%) would be enough for this job.

For that amount of cores, would runtime by limited by CPU?

The answer is 'yes'. Having 1 core would mean that around half the time there is too little CPU power. This has an effect.

Increase the number of cores, so that on average the right amount of CPUs are booked

This is around 8 cores (800%), as with that amount of cores:

  • half of the time, there is 1 out of 7 cores booked, that is 6 too much
  • half of the time, there is 7 out of 13 cores booked, that is 6 too little

This is not an exact algorithm and all numbers from 2 to 9 cores can be considered okay.

"},{"location":"software/jobstats/#inefficient-job-example-3","title":"Inefficient job example 3","text":"

Here booking 6 cores is considered okay.

Pick the number of cores to have enough memory

The dotted black line hits the right-hand vertical axis at 40%. This means that 1 core (i.e. 100%) would be enough for this job.

For that amount of cores, would runtime by limited by CPU?

The answer is 'yes'. Having 1 core would mean that most of the time our run is limited by CPU power. This has an impact on the runtime speed.

Increase the number of cores, so that on average the right amount of CPUs are booked

This is around 6 cores (600%), as with that amount of cores:

  • most of the time, there is 6 out of 6 cores booked, that is 0 too much
  • only rarely, there is a little spike up or a bigger spike down

There are no signs of anything slowing them down, as the line is very even.

This jobs should either have been booked with 6 cores, or the program running should be told to use all 8 cores.

"},{"location":"software/jobstats/#inefficient-job-example-4-slowdown","title":"Inefficient job example 4: slowdown","text":"

This job is using almost all of the cores it has booked, but there seems to be something holding them back. The uneven blue curve tells us that something is slowing down the analysis, and it's not by a constant amount.

Usually this is how it looks when the filesystem is the cause of a slowdown. Since the load of the filesystem is constantly changing, so will the speed by which a job can read data from it also change.

This job should try to copy all the files it will be working with to the nodes local harddrive before running the analysis, and by doing so not be affected by the speed of the filesystem.

Please see the guide How to use the nodes own hard drive for analysis for more information.

You basically just add 2 more commands to your script file and the problem should be solved.

"},{"location":"software/jobstats/#inefficient-job-example-5","title":"Inefficient job example 5","text":"

This job has the same problem as the example above, but in a more extreme way.

It's not uncommon that people book whole nodes out of habit and only run single threaded programs that use almost no memory. This job is a bit special in the way that it's being run on a high memory node, as you can see on the left Y-axis, that it goes up to 256 GB RAM. A normal node on Milou only have 128GB. These high memory nodes are only bookable of you book the whole node, so you can't book just a few cores on them. That means that if you need 130GB RAM and the program is only single threaded, your only option is to book a whole high memory node. The job will look really inefficient, but it's the only way to do it on our system. The example in the plot does not fall into this category though, as it uses only ~15GB of RAM, which you could get by booking 2-3 normal cores.

"},{"location":"software/jobstats/#jobstats-help","title":"jobstats --help","text":"

Use jobstats --help to see the help of jobstats:

jobstats --help\n
How does the output look like?
USAGE\n-----\n\n    jobstats  -p [-r] [-M cluster] [ jobid [ jobid ... ] | -A project | - ] [other options]\n\nDiscover jobstats for the specified job(s) on the specified cluster.  Cluster\ndefaults to the value of $SNIC_RESOURCE ('rackham' on the current system) if\nnot specified.\n\nWith the -p/--plot option, a plot is produced from the jobstats for each\njobid.  Plots contain one panel per booked node showing CPU (blue) and memory\nusage (black) traces and include text lines indicating the job number, cluster,\nend time and duration, user, project, job name, and usage flags (more on those\nbelow).  For memory usage, one or two traces are shown: a solid black line\nshows instantaneous memory usage, and a dotted black line shows overall maximum\nmemory usage if this information is available.\n\nPlots are saved to the current directory with the name\n\n    cluster-project-user-jobid.png\n\nNote that not all jobs will produce jobstats files, particularly if the job was\ncancelled or ran for less than 5 minutes.  Also, if a job booked nodes\ninefficiently by not using nodes it asked for, jobstats files will not be\navailable for the booked but unused nodes.\n\nJOBSTATS DISCOVERY\n------------------\n\nThere are five modes for discovery, depending on what the user provides on the\ncommand line: (1) discovery by job number for a completed job; (2) discovery by\njob number for a currently running job; (3) discovery by node and job number,\nfor a completed or running job; (4) discovery by project; or (5) discovery via\ninformation provided on 'stdin'.  In each of the example command lines below, the\n-p/--plot option requests that plots of job resource usage are created.\n\nMode 1:  jobstats -p jobid1 jobid2 jobid3\n-------\nJob numbers valid on the cluster.  [finishedjobinfo](finishedjobinfo.md) is used to determine further\ninformation for each job.  If multiple queries are expected, it might be quicker\nto run [finishedjobinfo](finishedjobinfo.md) yourself separately, see Mode 5 below.  See Mode 2 for a\ncurrently running job.\n\nMode 2:  jobstats -p -r jobid1 jobid2 jobid3\n-------\nJob numbers of jobs currently running on the cluster.  The Slurm squeue tool is\nused to determine further information for each running job.\n\nMode 3:  jobstats -p -n m15,m16 jobid\n-------\n[finishedjobinfo](finishedjobinfo.md) is *not* called and Uppmax's stored job statistics files are\ndiscovered directly.  If you know which node(s) your job ran on or which nodes\nyou are interested in, this will be much faster than Mode 1.\n\nMode 4:  jobstats -p -A project\n-------\nWhen providing a project name that is valid for the cluster, [finishedjobinfo](finishedjobinfo.md) is\nused to determine further information on jobs run within the project.  As for\nMode 1, this can be rather slow.  Furthermore only [finishedjobinfo](finishedjobinfo.md) defaults for\ntime span etc. are used for job discovery.  If multiple queries are expected or\nadditional [finishedjobinfo](finishedjobinfo.md) options are desired, see Mode 5 below.\n\nMode 5:  [finishedjobinfo](finishedjobinfo.md) project | jobstats - -p\n-------\nAccept input on stdin formatted like [finishedjobinfo](finishedjobinfo.md) output.  The long form of\nthis option is '--stdin'.  This mode can be especially useful if multiple\nqueries of the same job information are expected.  In this case, save the\noutput of a single comprehensive [finishedjobinfo](finishedjobinfo.md) query, and extract the parts\nof interest and present them to this script on stdin.  For example, to produce\nanalyses of all completed jobs in a project during the current calendar year,\nand produce separate tarballs analysing all jobs and providing jobstats plots\nfor each user during this same period:\n\n     [finishedjobinfo](finishedjobinfo.md) -y project > proj-year.txt\n     grep 'jobstat=COMPLETED' proj-year.txt | jobstats - > all-completed-jobs.txt\n     grep 'username=user1' proj-year.txt | jobstats - -p > user1-jobs.txt\n     tar czf user1-jobs.tar.gz user1-jobs.txt *-project-user1-*.png\n     grep 'username=user2' proj-year.txt | jobstats - -p > user2-jobs.txt\n     tar czf user2-jobs.tar.gz user2-jobs.txt *-project-user2-*.png\n     ...\n\nCOMMAND-LINE OPTIONS\n--------------------\n\n    -p | --plot        Produce CPU and memory usage plot for each jobid\n\n    -r | --running     Jobids are for jobs currently running on the cluster. The\n                       Slurm squeue tool is used to discover further information\n                       for the running jobs, and the rightmost extent of the plot\n                       produced will reflect the scheduled end time of the job.\n\n    -A project         Project valid on the cluster.  [finishedjobinfo](finishedjobinfo.md) is used to\n                       discover jobs for the project.  See further comments\n                       under 'Mode 4' above.\n\n    -M cluster         Cluster on which jobs were run [default current cluster]\n\n    -n node[,node...]  Cluster node(s) on which the job was run.  If specified,\n                       then the [finishedjobinfo](finishedjobinfo.md) script is not run and discovery\n                       is restricted to only the specified nodes.  Nodes can be\n                       specified as a comma-separated list of complete node\n                       names, or using the [finishedjobinfo](finishedjobinfo.md) syntax:\n                             m78,m90,m91,m92,m100  or  m[78,90-92,100]\n                       Nonsensical results will occur if the syntaxes are mixed.\n\n    - | --stdin        Accept input on stdin formatted like [finishedjobinfo](finishedjobinfo.md)\n                       output.  The short form of this option is a single dash\n                       '-'.\n\n    -m | --memory      Always include memory usage flags in output.  Default\n                       behaviour is to include memory usage flags only if CPU\n                       usage flags are also present.\n\n    -v | --verbose     Be wordy when describing flag values.\n\n    -b | --big-plot    Produce 'big plot' with double the usual dimensions.\n                       This implies '-p/--plot'.\n\n    -q | --quiet       Do not produce table output\n\n    -Q | --Quick       Run [finishedjobinfo](finishedjobinfo.md) with the -q option, which is slightly\n                       faster but does not include Slurm's record of maximum\n                       memory used. With this option, memory usage analyses can\n                       only rely upon what is reported at 5-minute intervals,\n                       and the trace of maximum memory used (dotted black line)\n                       is not produced.\n\n    --no-extended      Do *not* use extended jobstats information [default is to use it]\n\n    --paging           Include PAGE_IN/PAGE_OUT statistics from extended jobstats [experimental]\n\n    -d                 Produce a header for table output\n\n    --version          Produce version of this script and plot_jobstats, then exit\n\n    -h | -?            Produce brief help\n\n    --help             Produce detailed help information\n\nThe following command-line options are generally useful only for Uppmax staff.\n\n    --cpu-free FLOAT   Maximum CPU busy percentage for the CPU to count as\n                       free at that sampling time.  Default is 3 %.\n    -x directory       Directory prefix to use for jobstats files.  Default is\n                       '/sw/share/slurm', and directory structure is (depending on whether\n                       --no-extended is used):\n\n                       <prefix>/<cluster>/extended_uppmax_jobstats/<node>/<jobid>\n                       <prefix>/<cluster>/uppmax_jobstats/<node>/<jobid>\n\n    -X directory       Hard directory prefix to use for jobstats files.\n                       Jobstats files are assumed available directly:\n                           '<hard-prefix>/<jobid>'\n    --no-multijobs     Run [finishedjobinfo](finishedjobinfo.md) separately for each jobid, rather\n                       than all jobids bundled into one -j option (for debugging)\n    -f file            [finishedjobinfo](finishedjobinfo.md) script [default is '/sw/uppmax/bin/finishedjobinfo']\n    -P file            plot_jobstats script [default is '/sw/uppmax/bin/plot_jobstats']\n\n\nFURTHER DETAILS\n---------------\n\nThis script produces two types of output.  If the -p/--plot command line option\nis provided, a plot is created of core and memory usage across the life of the\njob.  The name of the file produced has the format:\n\n    cluster-project-user-jobid.png\n\nUnless the -q/--quiet option is provided, a table is also produces containing\nlines with the following tab-separated fields:\n\n  jobid cluster jobstate user project endtime runtime flags booked cores node[,node...] jobstats[,jobstats...]\n\nField contents:\n\n  jobid    : Job ID\n  cluster  : Cluster on which the job was run\n  jobstate : End status of the job: COMPLETED, RUNNING, FAILED, TIMEOUT, CANCELLED\n  user     : Username that submitted the job\n  project  : Project account under which the job was run\n  endtime  : End time of the job (with -n/--node, this is '.')\n  runtime  : Runtime of the job (with -n/--node, this is '.')\n  flags    : Flags indicating various types of resource underutilizations\n  booked   : Number of booked cores (with -n/--node, this is '.')\n  maxmem   : Maximum memory used as reported by Slurm (if unavailable, this is '.')\n  cores    : Number of cores represented in the discovered jobstats files.\n  node     : Node(s) booked for the job, expanded into individual node names,\n             separated by commas; if no nodes were found, this is '.'.\n             The nodes for which jobstats files are available are listed first.\n  jobstats : jobstats files for the nodes, in the same order the nodes are\n             listed, separated by commas; if no jobstats files were discovered,\n             this is '.'\n\nIf -r/--running was used, an additional field is present:\n\n  timelimit_minutes : The time limit of the job in minutes\n\n\nFLAGS\n-----\n\nAn important part of jobstats output are usage flags.  These provide indications\nthat booked resources -- processor cores or memory -- might have been\nunderused.\n\nIn both plot and table output, flags are a comma-separated list of cautions\nregarding core and/or memory underutilisation.  The appearance of a flag does\nnot necessarily mean that resources were used incorrectly.  It depends upon the\ntools being used and the contents of the Slurm header, and also depends upon\nthe job profile.  Because usage information is gathered every 5 minutes, higher\ntransient usage of cores or memory may not be captured in the log files.\n\nFlags most likely to represent real overbooking of resources are\nnodes_overbooked, overbooked, !!half_overbooked, !!severely_overbooked, and\n!!swap_used.\n\nFor multinode jobs, flags other than nodes_overbooked are determined based only\non the usage of the first node.  Multinode jobs require careful analysis so as\nto not waste resources unnecessarily, and it is a common mistake among\nbeginning Uppmax users to book multiple nodes and run tools that cannot use\nmore than the first.  In this case, nodes_overbooked will appear.\n\nSome flags have a threshold below which they appear.  The default format is\ngenerally 'flag:value-booked:value-used'.\n\n  nodes_overbooked : nodes booked : nodes used\n      More nodes were booked than used\n  overbooked : % used (if < 80%)\n      The maximum percentage of booked cores and/or memory that was used\n  !!half_overbooked\n      No more than 1/2 of both cores and memory of a node was used; consider booking\n      half a node instead.\n  !!severely_overbooked\n      No more than 1/4 of both cores and memory of a node was used, examine your job\n      requirements closely.\n  !!swap_used\n      Swap storage was used at any point within the job run\n  node_type_overbooked : type booked : type used\n      A fat node was requested that was larger than was needed.  This flag may be\n      produced spuriously if Slurm ran the job on a fat node when a fat node was not\n      requested by the user.\n  cores_overbooked : cores booked : cores used\n      More cores were booked than used (if < 80%)\n  mem_overbooked : GB booked : GB used\n      More memory was available than was used (if < 25% and more than one core).\n  core_mem_overbooked : GB in used cores : GB used\n      Less memory was used than was available in the cores that were used (if < 50%).\n\nBy default no flags are indicated for jobs with memory-only cautions except for\nswap usage, because it is common for jobs to heavily use processor cores\nwithout using a sizable fraction of memory.  Use the -m/--memory option to\ninclude flags for memory underutilisation when those would be the only flags\nproduced.\n\nMore verbose flags are output with the -v/--verbose option.\n\n\nScript:   /sw/uppmax/bin/jobstats\nVersion:  2023-11-16\n
"},{"location":"software/jobstats/#modes-of-jobstats-discovery","title":"Modes of jobstats discovery","text":"

There are five modes for discovery, depending on what the user provides on the command line:

  • (1) discovery by job number for a completed job;
  • (2) discovery by job number for a currently running job;
  • (3) discovery by node and job number, for a completed or running job;
  • (4) discovery by project
  • (5) discovery via information provided on stdin.

In the example command lines below, the -p/--plot option requests that plots of job resource usage are created.

"},{"location":"software/jobstats/#jobstats-discovery-mode-1-discovery-by-job-number-for-a-completed-job","title":"jobstats discovery mode 1: discovery by job number for a completed job","text":"

Discovery by job number for a completed job:

jobstats --plot jobid1 jobid2 jobid3\n

The job numbers valid on the cluster. finishedjobinfo is used to determine further information for each job. This can be rather slow, and a message asking for your patience is printed for each job.

If multiple queries are expected it would be quicker to run finishedjobinfo yourself separately, see Mode 4 below. See Mode 2 for a currently running job.

"},{"location":"software/jobstats/#jobstats-discovery-mode-2-discovery-by-job-number-for-a-currently-running-job","title":"jobstats discovery mode 2: discovery by job number for a currently running job","text":"

Discovery by job number for a currently running job.

jobstats --plot -r jobid1 jobid2 jobid3\n

Job numbers of jobs currently running on the cluster. The Slurm schedule is used to determine further information for each running job.

"},{"location":"software/jobstats/#jobstats-discovery-mode-3-discovery-by-node-and-job-number-for-a-completed-or-running-job","title":"jobstats discovery mode 3: discovery by node and job number, for a completed or running job","text":"

Discovery by node and job number, for a completed or running job.

jobstats --plot -n m15,m16 jobid\n

finishedjobinfo is not called and UPPMAX's stored job statistics files for the cluster of interest are discovered directly. If you know which node(s) your job ran on or which nodes you are interested in, this will be much faster than Mode 1.

"},{"location":"software/jobstats/#jobstats-discovery-mode-4-discovery-by-project","title":"jobstats discovery mode 4: discovery by project","text":"

Discovery by project.

jobstats --plot -A project\n

When providing a project name that is valid for the cluster, finishedjobinfo is used to determine further information on jobs run within the project. As for Mode 1, this can be rather slow, and a message asking for your patience is printed.

Furthermore only finishedjobinfo defaults for time span etc. are used for job discovery. If multiple queries are expected or additional finishedjobinfo options are desired, see Mode 5 below.

"},{"location":"software/jobstats/#jobstats-discovery-mode-5-discovery-via-information-provided-on-stdin","title":"jobstats discovery mode 5: discovery via information provided on stdin","text":"

Discovery via information provided on stdin:

What is stdin?

stdin is an abbreviation for 'Standard input', see the Wikipedia page on 'stdin'

finishedjobinfo -q project | jobstats - --plot\n

Accept input on stdin formatted like finishedjobinfo output. Note the single dash (-) option given to jobstats; the long form of this option is --stdin. This mode can be especially useful if multiple queries of the same job information are expected. In this case, save the output of a single comprehensive finishedjobinfo query, and extract the parts of interest and present them to this script on stdin.

For example, to produce analyses of all completed jobs in a project during the current calendar year, and produce separate tarballs analysing all jobs and providing jobstats plots for each user during this same period:

project=myproj\nfinishedjobinfo -q -y ${project} > ${project}-year.txt\ngrep 'jobstat=COMPLETED' ${project}-year.txt | jobstats - > ${project}-completed-jobs.txt\nfor u in user1 user2 user3 ; do\n    grep \"username=${u}\" ${project}-year.txt | jobstats - --plot > ${u}-jobs.txt\n    tar czf ${u}-jobs.tar.gz ${u}-jobs.txt *-${project}-${u}-*.png\ndone\n
"},{"location":"software/julia/","title":"Julia user guide","text":""},{"location":"software/julia/#julia-installations","title":"Julia installations","text":"

There is no system-installed Julia on the clusters. Therefore you need to load Julia with the module system. Different versions of Julia are available via the module system on Rackham, Snowy, and Bianca. Some installed packages are available via the module.

As the time of writing we have the following modules:

[user@rackham1 ~]$ module avail julia\n------------------------------------------------------\njulia:\n------------------------------------------------------\nVersions:\n        julia/1.0.5_LTS\n        julia/1.1.1\n        julia/1.4.2\n        julia/1.6.1\n        julia/1.6.3\n        julia/1.6.7_LTS\n        julia/1.7.2\n        julia/1.8.5\n        julia/1.9.1\n        julia/1.9.3 (Default)\n
  • \"LTS\" stands for Long term support.

To load a specific version of Julia into your environment, type e.g.

module load julia/1.6.7_LTS\n

\u200bDoing:

module load julia\n

will give you the default version (1.9.3), often the latest version.

A good and important suggestion is that you always specify a certain version. This is to be able to reproduce your work, a very important key in research!

You can run a julia script in the shell by:

julia example_script.jl\n

After loading the appropriate modules for Julia, you will have access to the read-eval-print-loop (REPL) command line by typing julia.

julia\n

You will get a prompt like this:

julia>\n

Julia has different modes, the one mentioned above is the so-called Julian mode where one can execute commands. The description for accessing these modes will be given in the following paragraphs. Once you are done with your work in any of the modes, you can return to the Julian mode by pressing the backspace key.

While being on the Julian mode you can enter the shell mode by typing ;:

shell>pwd\njulia>;\n/current-folder-path\n

This will allow you to use Linux commands. Notice that the availabilty of these commands depend on the OS, for instance, on Windows it will depend on the terminal that you have installed and if it is visible to the Julia installation.

Another mode available in Julia is the package manager mode, it can be accessed by typing ] in the Julian mode:

julia>]\n(v1.8) pkg>\n

This will make your interaction with the package manager Pkg easier, for instance, instead of typing the complete name of Pkg commands such as Pkg.status() in the Julian mode, you can just type status in the package mode.

The last mode is the help mode, you can enter this mode from the Julian one by typing ?, then you may type some string from which you need more information:

help?> ans\n
julia>?\nsearch: ans transpose transcode contains expanduser instances MathConstants readlines LinearIndices leading_ones leading_zeros\nans\nA variable referring to the last computed value, automatically set at the interactive promp\n

Info

Backspace will get you back to julian mode

Info

\u200b Exit with <Ctrl-D> or exit().

See

More detailed information about the modes in Julia can be found here: https://docs.julialang.org/en/v1/stdlib/REPL/

"},{"location":"software/julia/#introduction","title":"Introduction","text":"

Julia is according to https://julialang.org/:

  • Fast
  • Dynamic
  • Reproducible
  • Composable
  • General
  • Open source

Documentation for version 1.8.

Julia discussions

"},{"location":"software/julia/#packages","title":"Packages","text":"

Some packages are pre-installed. That means that they are available also on Bianca. These include:

  • \"BenchmarkTools\"
  • \"CSV\"
  • \"CUDA\"
  • DataFrames\"
  • \"Distributed\"
  • \"DistributedArrays\"
  • \"Gadfly\"
  • \"IJulia\"
  • \"MPI\"
  • \"Plots\"
  • \"PlotlyJS\"
  • \"PyPlot\"
  • all \"standard\" libraries.

This list will be extended while you, as users, may wish more packages.

You may control the present \"central library\" by typing in julia shell :

using Pkg\nPkg.activate(DEPOT_PATH[2]*\"/environments/v1.8\");     #change version accordingly\nPkg.status()\nPkg.activate(DEPOT_PATH[1]*\"/environments/v1.8\");     #to return to user library\n

Packages are imported or loaded by the commands import and using, respectively. The difference is shown here. Or briefly:

To use module functions, use import Module to import the module, and Module.fn(x) to use the functions. Alternatively, using Module will import all exported Module functions into the current namespace.

"},{"location":"software/julia/#use-centrally-installed-packages-the-first-time","title":"Use centrally installed packages the first time","text":"

You may have to build the package the first time you run it. Julia will in such case ask you to do so. Then:

julia> using Pkg\njulia> Pkg.activate(DEPOT_PATH[2]*\"/environments/v1.9\");      #change version accordingly\njulia> Pkg.build(<package_name>)\n
"},{"location":"software/julia/#how-to-install-personal-packages","title":"How to install personal packages","text":"

You may ignore the pre-installed packages. They are there mainly for Bianca users, but may help you to relieving some disk space! If you ignore you can jump over the

"},{"location":"software/julia/#check-if-packages-are-installed-centrally","title":"Check if packages are installed centrally","text":"

To make sure that the package is not already installed, type in Julia:

julia> using Pkg\njulia> Pkg.activate(DEPOT_PATH[2]*\"/environments/v1.8\");  #change version accordingly\njulia> Pkg.status()\n

To go back to your own personal packages:

julia> Pkg.activate(DEPOT_PATH[1]*\"/environments/v1.8\");\njulia> Pkg.status()\n

You can load (using/import) ANY package from both local and central installation irrespective to which environment you activate. However, the setup is that your package is prioritized if there are similar names.

"},{"location":"software/julia/#start-an-installation-locally","title":"Start an installation locally","text":"

To install personal packages, start to be sure that you are in your local environment. You type within Julia:

     Pkg.activate(DEPOT_PATH[1]*\"/environmentts/v1.8\");\n     Pkg.add(\"<package_name>\")\n

This will install under the path ~/.julia/packages/. Then you can load it by just doing \"using/import \".

      using <package_name>\n

You can also activate a \"package prompt\" in julia with ']':

(@v1.8) pkg> add <package name>\n

For installing specific versions specify with <package name>@<X.Y.Z>.

After adding you may be asked to precompile or build. Do so according to instruction given on the screen. Otherwise, first time importing or using the package, Julia may start a precompilation that will take a few seconds up to several minutes.

Exit with <backspace>:

julia>\n
"},{"location":"software/julia/#own-packages-on-bianca","title":"Own packages on Bianca","text":"

You can use make an installation on Rackham and then use the wharf to copy it over to your ~/.julia/ directory.

Otherwise, send an email to support@uppmax.uu.se and we'll help you.

"},{"location":"software/julia/#running-ijulia-from-jupyter-notebook","title":"Running IJulia from Jupyter notebook","text":"

Like for python it is possible to run a Julia in a notebook, i.e. in a web interface with possibility of inline figures and debugging. An easy way to do this is to load the python module as well. In shell:

module load julia/1.8.5\nmodule load python/3.10.8\njulia\n

In Julia:

using IJulia

notebook(dir=\"</path/to/work/dir/>\")\n

A Firefox session will start with the Jupyter notebook interface.

If not, you may have to build IJulia the first time with Pkg.build(\u201cIJulia\u201d). Since \u201cIJulia\u201d is pre-installed centrally on UPPMAX you must activate the central environment by following these steps belo. This should only be needed the first time like this

> using Pkg\n> Pkg.activate(DEPOT_PATH[2]*\"/environments/v1.8\");\n> Pkg.build(\"IJulia\")\n> notebook(dir=\"</path/to/work/dir/>\")\n

This builds the package also locally before starting the notebook. If not done, Jupyter will not find the julia kernel of that version. With notebook(dir=\"\", detached=true) the notebook will not be killed when you exit your REPL julia session in the terminal.

"},{"location":"software/julia/#how-to-run-parallel-jobs","title":"How to run parallel jobs","text":"

There are several packages available for Julia that let you run parallel jobs. Some of them are only able to run on one node, while others try to leverage several machines. You'll find an introduction here.

"},{"location":"software/julia/#run-interactively-on-compute-node","title":"Run interactively on compute node","text":"

Always run parallel only on the compute nodes. This is an example with 4 cores on Rackham

$ interactive -A <proj> -n 4 -t 3:00:00\nRunning interactively at UPPMAX\n

Slurm user guide

"},{"location":"software/julia/#threading","title":"Threading","text":"

Threading divides up your work among a number of cores within a node. The threads share their memory. Below is an example from within Julia. First, in the shell type:

export JULIA_NUM_THREADS=4\njulia\n

in Julia:

using Base.Threads\nnthreads()\n      a = zeros(10)\n@threads for i = 1:10\n        a[i] = Threads.threadid()\nend\n
"},{"location":"software/julia/#distributed-computing","title":"Distributed computing","text":"

Distributed processing uses individual processes with individual memory, that communicate with each other. In this case, data movement and communication is explicit. Julia supports various forms of distributed computing.

  • A native master-worker system based on remote procedure calls: Distributed.jl
  • MPI through MPI.jl : a Julia wrapper for the MPI protocol, see further down.
  • DistributedArrays.jl: distribute an array among workers

If choosing between distributed and MPI, distributed is easier to program, whereas MPI may be more suitable for multi-node applications.

For more detailed info please confer the manual for distributed computing and julia MPI.

"},{"location":"software/julia/#master-worker-model","title":"Master-Worker model","text":"

We need to launch Julia with

julia -p 4\n

then inside Julia you can check

nprocs()\nworkers()\n

which should print 5 and [2,3,4,5]. Why 5, you ask? Because \"worker 1\" is the \"boss\". And bosses don't work.

As you can see, you can run distributed computing directly from the julia shell.

"},{"location":"software/julia/#batch-example","title":"Batch example","text":"

Julia script hello_world_distributed.jl:

using Distributed\n# launch worker processes\nnum_cores = parse(Int, ENV[\"SLURM_CPUS_PER_TASK\"])\naddprocs(19)\nprintln(\"Number of cores: \", nprocs())\nprintln(\"Number of workers: \", nworkers())\n# each worker gets its id, process id and hostname\nfor i in workers()\n    id, pid, host = fetch(@spawnat i (myid(), getpid(), gethostname()))\n    println(id, \" \" , pid, \" \", host)\nend\n# remove the workers\nfor i in workers()\n    rmprocs(i)\nend\n
  • Batch script job_distributed.slurm:
#!/bin/bash\n#SBATCH -A j<proj>\n#SBATCH -p devel\n#SBATCH --job-name=distrib_jl     # create a short name for your job\n#SBATCH --nodes=1                # node count\n#SBATCH --ntasks=20              # total number of tasks across all nodes\n#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)\n#SBATCH --time=00:01:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n#SBATCH --mail-user=<email>\nmodule load julia/1.8.5\njulia hello_world_distributed.jl\n

\u200bPut job in queue:

sbatch job_distributed.slurm\n
"},{"location":"software/julia/#interactive-example","title":"Interactive example","text":"
salloc -A <proj> -p node -N 1 -n 10 -t 1:0:0\njulia hello_world_distributed.jl\n
"},{"location":"software/julia/#mpi","title":"MPI","text":"

The Threaded and Distributed packages are included in the Base installation. However, in order to use MPI with Julia you will need to follow the next steps (only the first time):

  • Load the tool chain which contains a MPI library

For julia/1.6.3 and earlier:

module load gcc/9.3.0 openmpi/3.1.5\n

For julia/1.6.7_LTS & 1.7.2:

module load gcc/10.3.0 openmpi/3.1.6\n

For julia/1.8.5:

module load gcc/11.3.0 openmpi/4.1.3\n
  • Load Julia
ml julia/1.8.5   # or other\n
  • Start Julia on the command line
julia\n
  • Change to package mode and add the MPI package
(v1.8) pkg> add MPI\n
  • In the julian mode run these commands:
julia> using MPI\njulia> MPI.install_mpiexecjl()\n[ Info: Installing `mpiexecjl` to `~/.julia/bin`...\n[ Info: Done!\n
  • Add the installed mpiexecjl wrapper to your path on the Linux command line
export PATH=~/.julia/bin:$PATH\n
  • Now the wrapper should be available on the command line

Because of how MPI works, we need to explicitly write our code into a file, juliaMPI.jl:

import MPI\nMPI.Init()\ncomm = MPI.COMM_WORLD\nMPI.Barrier(comm)\nroot = 0\nr = MPI.Comm_rank(comm)\nsr = MPI.Reduce(r, MPI.SUM, root, comm)\nif(MPI.Comm_rank(comm) == root)\n@printf(\"sum of ranks: %s\\n\", sr)\nend\nMPI.Finalize()\n

You can execute your code as in an interactive session with several cores (at least 3 in this case):

module load gcc/11.3.0 openmpi/4.1.3\nmpiexecjl -np 3 julia juliaMPI.jl\n

A batch script, job_MPI.slurm, should include a \"module load gcc/XXX openmpi/XXX\"

#!/bin/bash\n#SBATCH -A j<proj>\n#SBATCH -p devel\n#SBATCH --job-name=MPI_jl        # create a short name for your job\n#SBATCH --nodes=1                # node count\n#SBATCH --ntasks=20              # total number of tasks across all nodes\n#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)\n#SBATCH --time=00:05:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n#SBATCH --mail-user=<email>\nmodule load julia/1.8.5\nmodule load gcc/11.3.0 openmpi/4.1.3\nexport PATH=~/.julia/bin:$PATH\nmpiexecjl -n 20 julia juliaMPI.jl\n
  • Run with
sbatch job_MPI.slurm\n

See the MPI.jl examples for more input!

"},{"location":"software/julia/#gpu","title":"GPU","text":"

Example Julia script, juliaCUDA.jl:

using CUDA, Test\nN = 2^20\nx_d = CUDA.fill(1.0f0, N)\ny_d = CUDA.fill(2.0f0, N)\ny_d .+= x_d\n@test all(Array(y_d) .== 3.0f0)\nprintln(\"Success\")\n

Batch script juliaGPU.slurm, note settings for Bianca vs. Snowy:

#!/bin/bash\n#SBATCH -A <proj-id>\n#SBATCH -M <snowy OR bianca>\n#SBATCH -p node\n#SBATCH -C gpu   #NB: Only for Bianca\n#SBATCH -N 1\n#SBATCH --job-name=juliaGPU         # create a short name for your job\n#SBATCH --gpus-per-node=<1 OR 2>             # number of gpus per node (Bianca 2, Snowy 1)\n#SBATCH --time=00:15:00          # total run time limit (HH:MM:SS)\n#SBATCH --qos=short              # if test run t<15 min\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n#SBATCH --mail-user=<email>\nmodule purge\nmodule load julia/1.8.5          # system CUDA works as of today\njulia juliaCUDA.jl\n
  • Put job in queue:
sbatch juliaGPU.slurm\n
"},{"location":"software/julia/#interactive-session-with-gpu","title":"Interactive session with GPU","text":"

On Snowy, getting 1 cpu and 1 gpu:

interactive -A <proj> -n 1 -M snowy --gres=gpu:1  -t 3:00:00\n

On Bianca, getting 2 cpu:s and 1 gpu:

interactive -A <proj> -n 2 -C gpu --gres=gpu:1 -t 01:10:00\n
  • wait until session is started
julia/1.7.2\njulia/1.8.5 (Default)\n
"},{"location":"software/jupyter/","title":"Jupyter","text":"

There are multiple IDEs on the UPPMAX clusters, among other Jupyter. Here we describe how to run Jupyter.

Jupyter is an IDE specialized for the Python programming language.

Info

  • You can run Python in a Jupyter-notebook, i.e. in a web interface with possibility of inline figures and debugging.
  • Jupyter-lab is installed in the python>=3.10.8 module

Warning

Always start Jupyter in a ThinLinc session and preferably in an interactive session.

","tags":["Jupyter","Python","IDE"]},{"location":"software/jupyter/#introduction","title":"Introduction","text":"

Jupyter is web application that allows literature programming for Python. That is, Jupyter allows to create documents where Python code is shown and run and its results shown, surrounded by written text (e.g. English).

Additionally, Jupyter allows to share files and hence includes a file manager.

Jupyter is:

  • started and running on a server, for example, an interactive node
  • displayed in a web browser, such as firefox.

Jupyter can be slow when using remote desktop webpage (e.g. https://rackham-gui.uppmax.uu.se).

  • For UPPMAX, one can use a locally installed ThinLinc client to speed up Jupyter. See the UPPMAX documentation on ThinLinc <https://www.uppmax.uu.se/support/user-guides/thinlinc-graphical-connection-guide>_ on how to install the ThinLinc client locally.

  • It is also possible to run Jupyter with a local browser to speed up the graphics but still use the benefits of many CPU:s and much RAM.

    • Run Jupyter in your local browser
","tags":["Jupyter","Python","IDE"]},{"location":"software/jupyter/#how-to-start-jupyter","title":"How to start Jupyter","text":"
  • Run Jupyter on Bianca
  • Run Jupyter on Rackham
  • Run Jupyter in your local browser
  • Run Jupyter in a virtual environment (see below)
","tags":["Jupyter","Python","IDE"]},{"location":"software/jupyter/#run-jupyter-in-a-virtual-environment-venv","title":"Run Jupyter in a virtual environment (venv)","text":"

You could also use jupyter- (lab or notebook) in a venv virtual environment.

If you decide to use the --system-site-packages configuration you will get jupyter from the python modules you created you virtual environment with. However, you won't find your locally installed packages from that jupyter session. To solve this, reinstall jupyter within the virtual environment by force (option -I):

pip install -I jupyter\n

and run it as above.

Be sure to start the kernel with the virtual environment name, like \"project A\", and not \"Python 3 (ipykernel)\".

","tags":["Jupyter","Python","IDE"]},{"location":"software/jupyter/#links","title":"Links","text":"
  • The Jupyter project contains a lot of information and inspiration
  • The Jupyter Notebook documentation
","tags":["Jupyter","Python","IDE"]},{"location":"software/jupyter_local/","title":"Jupyter in local browser","text":"

To increase the speed of graphics it is possible to run Jupyter on a compute node, but using the graphics on your local computer. That will speed up the interaction with plotting figures and GUI management.

This possible for the [Rackham[(../cluster_guides/rackham.md) and Snowy clusters.

Warning

This feature is not possible for Bianca

","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#step-1-login-to-an-uppmax-cluster","title":"Step 1: Login to an UPPMAX cluster","text":"
  • Using ThinLinc or a terminal does not matter.
","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#step-2-start-an-interactive-session","title":"Step 2: start an interactive session","text":"

Start a terminal. Within that terminal, start an interactive session from the login node (change to the correct NAISS project ID).

","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#for-rackham","title":"For Rackham","text":"
interactive -A <naiss-project-id>  -t 4:00:00\n
","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#for-snowy","title":"For Snowy","text":"
interactive -M snowy -A <naiss-project-id>  -t 4:00:00\n
","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#step-3-start-jupyter-in-the-interactive-session","title":"Step 3: start Jupyter in the interactive session","text":"

Within your terminal with the interactive session, load a modern Python module:

module load python/3.11.8\n

Then, start jupyter-notebook (or jupyter-lab):

jupyter-notebook --ip 0.0.0.0 --no-browser\n

Leave this terminal open.

The terminal will display multiple URLs.

Copy one of these, like:

http://r486:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2\n
","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_local/#step-4-on-own-computer","title":"Step 4: On own computer","text":"
  • If you use ssh to connect to Rackham, you need to forward the port of the interactive node to your local computer.
    • On Linux or Mac this is done by running in another terminal. Make sure you have the ports changed if they are not at the default 8888.
ssh -L 8888:r486:8888 username@rackham.uppmax.uu.se\n
  • Replace r486 if you got another node
  • If you use Windows it may be better to do this in the PowerShell instead of a WSL2 terminal.
  • If you use PuTTY - you need to change the settings in \"Tunnels\" accordingly (could be done for the current connection as well).

SSH port forwarding

On your computer open the address you got but replace r486 with localhost or 127.0.0.0 i.e.

http://localhost:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2\n

or

http://127.0.0.1:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2\n

This should bring the jupyter interface on your computer and all calculations and files will be on Rackham compute node.

Back to jupyter page

","tags":["Jupyter","local","Rackham","Snowy"]},{"location":"software/jupyter_on_bianca/","title":"Jupyter on Bianca","text":"

There are multiple IDEs on the UPPMAX clusters, among other Jupyter. Here we describe how to run Jupyter on Bianca.

Jupyter is an IDE specialized for the Python programming language.

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#procedure","title":"Procedure","text":"Prefer a video?

This procedure is also demonstrated in this YouTube video. go here

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#1-get-within-sunet","title":"1. Get within SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#2-start-the-bianca-remote-desktop-environment","title":"2. Start the Bianca remote desktop environment","text":"Forgot how to start Bianca's remote desktop environment?

See the 'Logging in to Bianca' page.

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#3-start-an-interactive-session","title":"3. Start an interactive session","text":"

Within the Bianca remote desktop environment, start a terminal. Within that terminal, start an interactive node:

interactive -A [project_number] -t 8:00:00\n

Where [project_number] is your UPPMAX project, for example:

interactive -A sens2016001 -t 8:00:00\n
What is my UPPMAX project number?

Easy answers that is probably true:

The one you used to login, which is part of your prompt. For example, in the prompt below, the project is sens2016001.

[sven@sens2016001-bianca sven]$\n
","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#4-load-a-python-module","title":"4. Load a Python module","text":"

Within the terminal of the interactive session, load a Python module

module load python/3.11.4\n
Forgot what the module system is?

See the UPPMAX pages on the module system here.

Can I use other Python modules?

Yes, you can use any module later than (and including) the python/3.10.8 module.

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#5-start-the-jupyter-notebook","title":"5. Start the Jupyter notebook","text":"

Still within the terminal of the interactive session, start a notebook like this:

jupyter-notebook --ip 0.0.0.0 --no-browser\n

or jupyter lab:

jupyter-lab --ip 0.0.0.0 --no-browser\n

Jupyter will show some IP address in the terminal, which you will need in the next step.

","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_bianca/#6-browser-to-the-jupyter-notebook","title":"6. Browser to the Jupyter notebook","text":"

In the remote desktop environment on Bianca, start Firefox. Set Firefox to the URL addresses from the Jupyter output.

Can I start Firefox from the terminal too?

Yes, in another terminal, one can use:

firefox [URL]\n

where [URL] is a URL produced by Jupyter, for example:

firefox http://127.0.0.1:8889/tree?token=7c305e62f7dacf65d74a4b966e2851987479ad0a258de34f\n
","tags":["Jupyter","Bianca"]},{"location":"software/jupyter_on_rackham/","title":"Jupyter on Rackham","text":"

There are multiple IDEs on the UPPMAX clusters, among other Jupyter. Here we describe how to run Jupyter on Rackham.

Jupyter is an IDE specialized for the Python programming language.

","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#procedure","title":"Procedure","text":"Prefer a video?

This procedure is also demonstrated in this YouTube video

","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#1-start-a-rackham-remote-desktop-environment","title":"1. Start a Rackham remote desktop environment","text":"

This can be either:

  • Login to the Rackham remote desktop environment using the website
  • Login to the Rackham remote desktop environment using a local ThinLinc client
","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#2-start-an-interactive-session","title":"2. Start an interactive session","text":"

Within the Rackham remote desktop environment, start a terminal. Within that terminal, start an interactive node:

interactive -A [project_number] -t 8:00:00\n

Where [project_number] is your UPPMAX project, for example:

interactive -A sens2016001 -t 8:00:00\n
What is my UPPMAX project number?

See the UPPMAX documentation on how to see your UPPMAX projects

","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#3-load-a-python-module","title":"3. Load a Python module","text":"

Within the terminal of the interactive session, load a Python module

module load python/3.11.4\n
Forgot what the module system is?

See the UPPMAX pages on the module system here.

Can I use other Python modules?

Yes, you can use any module later than (and including) the python/3.10.8 module.

","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#4-start-the-jupyter-notebook","title":"4. Start the Jupyter notebook","text":"

Still within the terminal of the interactive session, start a notebook like this:

jupyter-notebook --ip 0.0.0.0 --no-browser\n

or jupyter lab:

jupyter-lab --ip 0.0.0.0 --no-browser\n

Jupyter will show some IP address in the terminal, which you will need in the next step.

","tags":["Jupyter","Rackham"]},{"location":"software/jupyter_on_rackham/#5-browser-to-the-jupyter-notebook","title":"5. Browser to the Jupyter notebook","text":"

In the remote desktop environment on Rackham, start Firefox. Set Firefox to the URL addresses from the Jupyter output.

Can I start Firefox from the terminal too?

Yes, in another terminal, one can use:

firefox [URL]\n

where [URL] is a URL produced by Jupyter, for example:

firefox http://127.0.0.1:8889/tree?token=7c305e62f7dacf65d74a4b966e2851987479ad0a258de34f\n
","tags":["Jupyter","Rackham"]},{"location":"software/jvarkit/","title":"jvarkit","text":"

According to the jvarkit GitHub repository jvarkit is 'Java utilities for Bioinformatics',

jvarkit is unavailable in the UPPMAX module system.

"},{"location":"software/jvarkit/#create-a-jvarkit-singularity-container","title":"Create a jvarkit Singularity container","text":"

To create a Singularity container one can follow the procedure documented at 'Create a Singularity container from Docker_Hub'.

Spoiler:

sudo singularity build my_container.sif docker:lindenb/jvarkit:1b2aedf24\n

Note that 1b2aedf24 is the tag of the latest version of this Docker script. In the future, they may be newer tags.

Usage:

./jvarkit.sif java -jar /opt/jvarkit/dist/jvarkit.jar --help\n
"},{"location":"software/jvarkit/#links","title":"Links","text":"
  • the jvarkit GitHub repository
"},{"location":"software/matlab/","title":"MATLAB user guide","text":""},{"location":"software/matlab/#the-matlab-module","title":"The MATLAB module","text":"

MATLAB can be started only if you load the matlab module first. Most of available official toolboxes are also available. At the time of this writing, our most recent installation is: matlab/R2023b

Doing:

module load matlab\n

will give you the latest version.

If you need a different version, check the availability by:

module avail matlab\n

To get started with MATLAB do (for instance):

module load matlab/R2023a\nmatlab &\n

That will start a matlab session with the common GUI. Use & to have MATLAB in background making terminal still active for other work.

A good and important suggestion is that you always specify a certain version. This is to be able to reproduce your work, a very important key in research!

"},{"location":"software/matlab/#first-time-since-may-13-2024","title":"First time, since May 13 2024","text":"
  • If you use MATLAB after May 13 2024, of any version, you have to do the following step to be able to use the full features of running parallel jobs.

    • only needs to be called once per version of MATLAB.
    • Note, however, that on Bianca this has to be done separately.
  • After logging into the cluster, configure MATLAB to run parallel jobs on the cluster by calling the shell script configCluster.sh.

module load matlab/<version>\nconfigCluster.sh <project-ID>    # Note: no '-A'\n
  • This will run a short configuration job in an interactive session.
  • Jobs will now default to the cluster rather than submit to the local machine.
  • It should look like this (example for Bianca)

  • The session should exit automatically but if not you can end the session by
    • exit
    • or <CTRL-C>
  • When done, start Matlab as you usually do with matlab &.

Warning

  • Do these steps for each matlab version you will use.
  • On Bianca you need to do this for each sens project that will use MATLAB, as well.

Tip

  • Check the Matlab version for which you have set the slurm configuration by
ls -l .matlab/*/parallel.mlsettings\n
  • Look for dates from May 2024 and onwards.
"},{"location":"software/matlab/#introduction","title":"Introduction","text":"

Using MATLAB on the cluster enables you to utilize high performance facilities like:

  • Parallel computing
    • Parallel for-loops
    • Evaluate functions in the background
  • Big data processing
    • Analyze big data sets in parallel
  • Batch Processing
    • Offload execution of functions to run in the background
  • GPU computing (Available on Bianca and Snowy)
    • Accelerate your code by running it on a GPU
  • Machine & Deep learning
    • Statistics and Machine Learning
    • Deep Learning

See MathWorks's complete user guide

Some online tutorials and courses:

  • Parallel computing
  • Machine Learning
    • Machine learning article
    • Machine learning tutorial
  • Deep Learning
    • Deep learning article
    • Deep learning tutorial
"},{"location":"software/matlab/#running-matlab","title":"Running MATLAB","text":"

Warning

  • It is possible to start Matlab on the Login node.
  • This can be a way to work if you

    • work with just light analysis
    • just use Matlab to start batch jobs from the graphical user interface.
  • Then you should start matlab with just ONE thread

matlab -singleCompThread &\n
"},{"location":"software/matlab/#graphical-user-interface","title":"Graphical user interface","text":"

To start MATLAB with its usual graphical interface (GUI), start it with:

matlab\n

If you will use significant resources, like processor or RAM, you should start an interactive session on a calculation node. Use at least 2 cores (-n 2), when running interactive. Otherwise MATLAB may not start. You can use several cores if you will do some parallel calculations (see parallel section below). Example:

interactive -A <proj> -p core -n 2 -t 1:0:0\n

This example starts a session with 2 cores for a wall time of 1 hour.

"},{"location":"software/matlab/#matlab-in-terminal","title":"MATLAB in terminal","text":"

For simple calculations it is possible to start just a command shell in your terminal:

matlab -nodisplay\n

Exit with 'exit'.

Run script from terminal or bash script

In order to run a script directly from terminal:

matlab -batch \"run('<path/to/script.m>')\" | tail -n +2\n

List all ways to run/start MATLAB:

matlab -h\n
"},{"location":"software/matlab/#thinlinc","title":"ThinLinc","text":"

You may get the best of the MATLAB graphics by running it the ThinLinc environment.

  • For rackham (in ThinLinc app): rackham-gui.uppmax.uu.se

  • For Bianca (from web-browser): https://bianca.uppmax.uu.se

You may want to confer our UPPMAX ThinLinc user guide.

"},{"location":"software/matlab/#how-to-run-parallel-jobs","title":"How to run parallel jobs","text":""},{"location":"software/matlab/#how-to-run-parallel-jobs-for-the-first-time-since-may-13-2024","title":"How to run parallel jobs for the first time, since May 13 2024","text":"
  • If you use MATLAB after May 13 2024, of any version, you have to do the following step to be able to use the full features of running parallel jobs.
    • only needs to be called once per version of MATLAB.
    • Note, however, that on Bianca this has to be done separately.
  • After logging into the cluster, configure MATLAB to run parallel jobs on the cluster by calling the shell script configCluster.sh.
module load matlab\nconfigCluster.sh <project-ID>    # Note: no '-A'\n
  • This will run a short configuration job in an interactive session, closing itself when done.
  • Jobs will now default to the cluster rather than submit to the local machine.
"},{"location":"software/matlab/#two-matlab-commands","title":"Two MATLAB commands","text":"

Two commands in MATLAB are important to make your code parallel:

  • parfor will distribute your \"for loop\" among several workers (cores)
  • parfeval runs a section or a function on workers in the background
"},{"location":"software/matlab/#use-interactive-matlab","title":"Use interactive matlab","text":"

First, start an interactive session on a calculation node with, for instance 8 cores by:

interactive -A <project> -p core -n 8 -t 3:00:00\n

In MATLAB open a parallel pool of 8 local workers:

>> p = parpool(8)\n

What happens if you try to run the above command twice? You can't run multiple parallel pools at the same time. Query the number of workers in the parallel pool:

>> p.NumWorkers\n

gcp will \"get current pool\" and return a handle to it. If a pool has not already been started, it will create a new one first and then return the handle to it:

>> p = gcp\n

Shutdown the parallel pool:

>> delete(p)\n

Will check to see if a pool is open and if so, deletes it.

>> delete(gcp('nocreate'))\n

This will delete a pool if it exists, but won't create one first if it doesn't already exist.

With parpool('local') or parcluster('local') you will use settings for 'local' . With parpool('local',20) you will get 20 cores, but else the 'local' settings, like automatic shutdown after 30 minutes. You can change your settings here: HOME > ENVIRONMENT > Parallel > Parallel preferences.

"},{"location":"software/matlab/#matlab-batch","title":"MATLAB Batch","text":"

With MATLAB you can e.g. submit jobs directly to our job queue scheduler, without having to use Slurm's commands directly. Let us first make two small function. The first one, little simpler, saved in the file parallel_example.m:

    function t = parallel_example(nLoopIters, sleepTime)\n      t0 = tic;\n      parfor idx = 1:nLoopIters\n        A(idx) = idx;\n        pause(sleepTime);\n      end\n      t = toc(t0);\n

and the second, little longer, saved in parallel_example_hvy.m:

    function t = parallel_example_hvy(nLoopIters, sleepTime)\n      t0 = tic;\n      ml = 'module list';\n      [status, cmdout] = system(ml);\n      parfor idx = 1:nLoopIters\n        A(idx) = idx;\n        for foo = 1:nLoopIters*sleepTime\n          A(idx) = A(idx) + A(idx);\n          A(idx) = A(idx)/3;\n        end\n      end\n

Begin by running the command

>> configCluster %(on Bianca it will look a little different)\n

in Matlab Command Window to choose a cluster configuration. Matlab will set up a configuration and will then print out some instructions, seen below. You can also set environments that is read if you don't specify it. Go to HOME > ENVIRONMENT > Parallel > Parallel preferences.

       [1] rackham\n       [2] snowy\n    Select a cluster [1-2]: 1\n    >>\n    >> c = parcluster('rackham'); %on Bianca 'bianca Rxxxxx'\n    >> c.AdditionalProperties.AccountName = 'snic2021-X-YYY';\n    >> c.AdditionalProperties.QueueName = 'node';\n    >> c.AdditionalProperties.WallTime = '00:10:00';\n    >> c.saveProfile\n    >> job = c.batch(@parallel_example, 1, {90, 5}, 'pool', 19) %19 is for 20 cores. On Snowy and Bianca use 15.\n    >> job.wait\n    >> job.fetchOutputs{:}\n

Follow them. These inform you what is needed in your script or in command line to run in parallel on the cluster. The line c.batch(@parallel_example, 1, {90, 5}, 'pool', 19) can be understood as put the function parallel_example to the batch queue. The arguments to batch are:

    c.batch(function name, number of output arguments, {the inputs to the function}, 'pool', no of additional workers to the master)\n\n    c.batch(@parallel_example, 1 (t=toc(t0)), {nLoopIters=90, sleepTime=5}, 'pool', 19)\n

To see the output to screen from jobs, use job.Tasks.Diary. Output from the submitted function is fetched with 'fetchOutputs()'.

For jobs using several nodes (in this case 2) you may modify the call to:

    >> configCluster\n       [1] rackham\n       [2] snowy\n    Select a cluster [1-2]: 1\n    >>\n    >> c = parcluster('rackham'); %on Bianca 'bianca R<version>'\n    >> c.AdditionalProperties.AccountName = 'snic2021-X-YYY';\n    >> c.AdditionalProperties.QueueName = 'node';\n    >> c.AdditionalProperties.WallTime = '00:10:00';\n    >> c.saveProfile\n    >> job = c.batch(@parallel_example_hvy, 1, {1000, 1000000}, 'pool', 39)% 31 on Bianca or Snowy\n    >> job.wait\n    >> job.fetchOutputs{:}\n

where parallel_example-hvy.m was the script presented above.

For the moment jobs are hard coded to be node jobs. This means that if you request 21 tasks instead (20 + 1) you will get a 2 node job, but only 1 core will be used on the second node. In this case you'd obviously request 40 tasks (39 + 1) instead.

For more information about Matlab's Distributed Computing features please see Matlab's HPC Portal.

"},{"location":"software/matlab/#gpu","title":"GPU","text":"

Running MATLAB with GPU is, as of now, only possible on the Snowy and Bianca clusters. Uppsala University affiliated staff and students with allocation on Snowy can use this resource.

Start an interactive session with at least 2 cores (otherwise MATLAB may not start). On Snowy, getting (for instance) 2 cpu:s (-n 2) and 1 gpu:

interactive -A <proj> -n 2 -M snowy --gres=gpu:1  -t 3:00:00\n

On Bianca, getting 3 cpu:s and 1 gpu:

interactive -A <proj> -n 3 -C gpu --gres=gpu:1 -t 01:10:00\n

Note that wall time -t should be set to more than one hour to not automatically put job in devel or devcore queue, which is not allowed for gpu jobs. Also check the GPU quide for Snowy at Using the GPU nodes on Snowy.

Load MATLAB module and start matlab as usual (with &) in the new session. Then test if the gpu device is found by typing:

>> gpuDevice\n>> gpuDeviceCount\n

On Bianca you may get an error. Follow the instructons and you can run anyway. Example code:

>> A = gpuArray([1 0 1; -1 -2 0; 0 1 -1]);\n>> e = eig(A);\n

For more information about GPU computing confer the MathWorks web about GPU computing.

"},{"location":"software/matlab/#deep-learning-with-gpus","title":"Deep Learning with GPUs","text":"

For many functions in Deep Learning Toolbox, GPU support is automatic if you have a suitable GPU and Parallel Computing Toolbox\u2122. You do not need to convert your data to gpuArray. The following is a non-exhaustive list of functions that, by default, run on the GPU if available.

  • trainNetwork (Deep Learning Toolbox)

  • predict (Deep Learning Toolbox)

  • predictAndUpdateState (Deep Learning Toolbox)

  • classify (Deep Learning Toolbox)

  • classifyAndUpdateState (Deep Learning Toolbox)

  • activations (Deep Learning Toolbox)

"},{"location":"software/matlab/#shell-batch-jobs","title":"Shell batch jobs","text":"

Sometimes when matlab scripts are part of workflows/pipelines it may be easier to work directly with the batch scripts.

Batch script example with 2 nodes (Rackham), matlab_submit.sh.

#!/bin/bash -l\n#SBATCH -A <proj>\n#SBATCH -p devel\n#SBATCH -N 2\n#SBATCH -n 40\nmodule load matlab/R2020b &> /dev/null\nsrun -N 2 -n 40  matlab -batch \"run('<path/to/m-script>')\"\n

Run with

sbatch matlab_submit.sh\n
"},{"location":"software/matlab/#common-problems","title":"Common problems","text":"

Sometimes things do not work out.

As a first step, try with removing local files:

rm -rf ~/.matlab\n

If the graphics is slow, try:

vglrun matlab -nosoftwareopengl\n

Unfortunately this only works from login nodes.

You may want to run MATLAB on a single thread. This makes it work:

matlab -singleCompThread\n
"},{"location":"software/matlab/#matlab-add-ons","title":"Matlab Add-Ons","text":"

Matlab Add-ons

"},{"location":"software/matlab/#matlab-client-on-the-desktop","title":"MATLAB client on the desktop","text":"

Guideline here

"},{"location":"software/matlab_addons/","title":"Matlab Add-Ons","text":"

MATLAB Add-Ons

  • Add-ons extend the capabilities of MATLAB\u00ae by providing additional functionality for specific tasks and applications, such as:
    • connecting to hardware devices
    • additional algorithms
    • interactive apps
  • Available from:
    • MathWorks\u00ae
    • the global MATLAB user community
  • Encompass a wide variety of resources
    • products
    • apps
    • toolboxes
    • support packages
  • More information from Mathworks

Learners should be able to

  • navigate to toolboxes and Add-Ons
  • view Add-Ons and toolboxes
  • install and use Add-Ons
  • Before going into installing Add-Ons let's have a background to the MATLAB environments and ecosystem!
"},{"location":"software/matlab_addons/#matlab-add-ons-manager","title":"MATLAB Add-Ons manager","text":"
  • In the GUI, the Add-Ons manager can be selected from the menu at the top. The drop-down menu options allow users to:

    • Browse a library of Add-Ons to download. Note that some Add-Ons require a separate license.

    • Manage Add-Ons already downloaded.

    • Package user-generated code as a Toolbox or App

    • Get hardware-related support packages

  • Here we will only focus on the first two options.

Note

Note that very many packages are already included in the Academic installation and license

Some toolboxes

  • Matlab products
    • Parallel Computing Toolbox
    • MATLAB Parallel Server
    • Deep Learning Toolbox
    • Statistics and Machine Learning Toolbox
  • Simulink
    • Stateflow
    • SimEvents
    • Simscape

Some toolboxes provides GUI for their tools Apps

  • Matlab products
    • Deep Network Designer - Design and visualize deep learning networks Deep Network Designer
    • Curve Fitter - Fit curves and surfaces to data
    • Deep Learning Toolbox
    • Statistics and Machine Learning Toolbox
  • Simulink
    • Stateflow
    • SimEvents
    • Simscape

  • We won't cover the usage of the toolboxes here!
"},{"location":"software/matlab_addons/#install-add-ons","title":"Install Add-Ons","text":"
  • Search in add-ons explorer and install.
  • Ends up in local folder and is in the part so it should be reached wherever you are in the file tree.

  • ~/MATLAB Add-Ons

  • It's in the path so it should be possible to run directly if you don't need to run a installation file.

  • For more information about a specific support package install location, see the documentation for the package.

Warning

To be able to install you need to use the email for a personal mathworks account.

Seealso

You can install some Add-Ons manually using an installation file. This is useful in several situations:

  • The add-on is not available for installation through the Add-On Explorer, for example, if you create a custom add-on yourself or receive one from someone else.
  • You downloaded the add-on from the Add-On Explorer without installing it.
  • You downloaded the add-on from the File Exchange at MATLAB Central\u2122.
  • MathWorkds page on getting Add-Ons

Demo

  • Search for kalmanf
  • Click \"Learning the Kalman Filter\"
  • Look at the documentation
  • Test if the command works today:
  >> kalmanf\n  Unrecognized function or variable 'kalmanf'.\n
  • OK, it is not there
  • Click \"Add\", and \"Download and Add to path\"
  • Type email address connected to your MathWorks account
  • Installation starts
  • It will end up in:
  $ tree MATLAB\\ Add-Ons/\n  MATLAB\\ Add-Ons/\n  \u2514\u2500\u2500 Collections\n  |   \u2514\u2500\u2500 Efficient\\ GRIB1\\ data\\ reader\n  |       \u251c\u2500\u2500 core.28328\n  |       \u251c\u2500\u2500 license.txt\n  |       \u251c\u2500\u2500 readGRIB1.c\n  |       \u251c\u2500\u2500 readGRIB1.mexa64\n  |       \u2514\u2500\u2500 resources\n  |           \u251c\u2500\u2500 addons_core.xml\n  |           \u251c\u2500\u2500 matlab_path_entries.xml\n  |           \u251c\u2500\u2500 metadata.xml\n  |           \u251c\u2500\u2500 previewImage.png\n  |           \u251c\u2500\u2500 readGRIB1.zip\n  |           \u2514\u2500\u2500 screenshot.png\n  \u2514\u2500\u2500 Functions\n      \u2514\u2500\u2500 Learning\\ the\\ Kalman\\ Filter\n          \u251c\u2500\u2500 kalmanf.m\n          \u2514\u2500\u2500 resources\n              \u251c\u2500\u2500 addons_core.xml\n              \u251c\u2500\u2500 kalmanf.zip\n              \u251c\u2500\u2500 matlab_path_entries.xml\n              \u251c\u2500\u2500 metadata.xml\n              \u251c\u2500\u2500 previewImage.png\n              \u2514\u2500\u2500 screenshot.png\n
  • Evidently it is a function. Note that I already have something classified as collections
  • Now test:
  >> kalmanf()\n  'kalmanf' requires Learning the Kalman Filter version 1.0.0.0 to be enabled.\n
  • OK. It is installed but may need some other things. Just an example!!

Keypoints

  • Many Add-Ons, like toolboxes and packages are available at the Clusters
  • You can view Add-Ons and toolboxes

    • It is all more or less graphical
  • To install Add-Ons

    • Search in Add-Ons explorer and install.
    • Ends up in local folder and is in the path so it should be reached wherever you are in the file tree.
"},{"location":"software/matlab_local/","title":"MATLAB client on the desktop","text":"

Use own computer's matlab

  • Would you like to try run batch jobs on the Rackham or Snowy cluster but use the faster graphics that you can achieve on your own computer?
  • Do you have all your work locally but sometimes need the cluster to do parallel runs?
  • UPPMAX offers this now.

Warning

  • This solution is possible only if:

    • you have an UPPMAX compute project
    • a working matlab on your computer with one of the version available on the cluster:

    • check with module avail matlab

    • Examples of the newest ones:

      • R2020b
      • R2022a
      • R2022b
      • R2023a
      • R2023b
"},{"location":"software/matlab_local/#lets-get-started","title":"Let's get started","text":"

The Rackham MATLAB support package can be found at uppsala.Desktop.zip.

  • Download the ZIP file and start MATLAB locally.
  • The ZIP file should be unzipped in the location returned by calling.
>> userpath\n
  • You can unzip from MATLAB's Command window.
  • Configure MATLAB to run parallel jobs on the cluster by calling configCluster. configCluster only needs to be called once per version of MATLAB.
>> configCluster\nUsername on RACKHAM (e.g. jdoe):\n
  • Type your rackham user name.
  • As a result:
Complete.  Default cluster profile set to \"Rackham R2022b\".\n

Note

  • To submit jobs to the local machine instead of the cluster, run the following:
>> % Get a handle to the local resources\n>> c = parcluster('local');\n
"},{"location":"software/matlab_local/#configuring-slurm-details","title":"Configuring Slurm details","text":"

Prior to submitting the job, various parameters can be assigned, such as queue, e-mail, walltime, etc. The following is a partial list of parameters. See AdditionalProperties for the complete list. Only AccountName, Partition, MemUsage and WallTime.

>> % Get a handle to the cluster\n>> c = parcluster;\n\nc = \n\n  Generic Cluster\n\n    Properties: \n\n                      Profile: Rackham R2022b\n                     Modified: false\n                         Host: UUC-4GM8L33.user.uu.se\n                   NumWorkers: 100000\n                   NumThreads: 1\n\n        JobStorageLocation: <path to job outputs locally>\n         ClusterMatlabRoot: /sw/apps/matlab/x86_64/R2022b\n           OperatingSystem: unix\n
  • Set some additional parameters related to Slurm on Rackham
>> % Specify the account\n>> c.AdditionalProperties.AccountName = 'naiss2024-22-1202';\n\n>> % Specify the wall time (e.g., 1 day, 5 hours, 30 minutes\n>> c.AdditionalProperties.WallTime = '00:30:00';\n\n>> % Specify cores per node\n>> c.AdditionalProperties.ProcsPerNode = 20;\n\n[OPTIONAL]\n\n>> % Specify the partition\n>> c.AdditionalProperties.Partition = 'devcore';\n\n>> % Specify another cluster: 'snowy'\n>> c.AdditionalProperties.ClusterName='snowy'\n>> c.AdditionalProperties.ProcsPerNode = 16;\n\n>> % Specify number of GPUs\n>> c.AdditionalProperties.GPUsPerNode = 1;\n>> c.AdditionalProperties.GPUCard = 'gpu-card';\n
  • Save the profile
>> c.saveProfile\n

To see the values of the current configuration options, display AdditionalProperties.

>> % To view current properties\n>> c.AdditionalProperties\n

Unset a value when no longer needed.

>> % Example Turn off email notifications\n>> c.AdditionalProperties.EmailAddress = '';\n>> c.saveProfile\n
"},{"location":"software/matlab_local/#start-job","title":"Start job","text":"
  • Copy this script and paste in a new file parallel_example_local.m that you save in the working directory where you are (check with pwd in the Matlab Command Window).

    • The script is supposed to loop over sleepTime seconds of work nLoopIters times.
    • We will define the number of processes in the batch submit line.
   function t = parallel_example_local(nLoopIters, sleepTime)\n   t0 = tic;\n   parfor idx = 1:nLoopIters\n      A(idx) = idx;\n      pause(sleepTime);\n   end\n   t = toc(t0);\n
>> job = c.batch(@parallel_example_local, 1, {16,1}, 'Pool',8,'CurrentFolder','.');\n\n- Submission to the cluster requires SSH credentials. \n- You will be prompted for username and password or identity file (private key). \n    - It will not ask again until you define a new cluster handle ``c`` or in next session.\n

  • Jobs will now default to the cluster rather than submit to the local machine.
>> job.State\n\nans =\n\n    'running'\n
  • You can run this several times until it gives:
>> job.State\n\nans =\n\n    'finished'\n
  • You can also watch queue

  • Or on Rackham (it really runs there!):
[bjornc2@rackham2 ~]$ squeue -u bjornc2\n        JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n        50827312   devcore MATLAB_R  bjornc2  R       2:20      1 r483\n
>> job.fetchOutputs{:}\n\nans =\n\n    2.4853\n
  • The script looped over 1 s work 16 times, but with 8 processes.
  • In an ideal world it would have taken 16 / 8 = 2 s. Now it took 2.5 s with some \"overhead\"

Run on Snowy

>> c.AdditionalProperties.ClusterName='snowy'\n>> c.AdditionalProperties.ProcsPerNode = 16;\n

Keypoints

  • Steps to configure
    • First time download and decompress UPPMAX configure file.
    • run configCluster on local MATLAB and set user name
  • Steps to run
    • set parcluster settings, like you do otherwise.
  • Note: only parcluster will work, not parpool.
"},{"location":"software/metontiime/","title":"MetONTIIME","text":"

MetONTIIME is a Nextflow pipeline that is not part of nf-core.

It is not installed as a module.

User tickets (for UPPMAX staff)

ticket_287014

"},{"location":"software/metontiime/#links","title":"Links","text":"
  • MetONTIIME GitHub repository
"},{"location":"software/mobaxterm/","title":"MobaXterm","text":"

There are multiple SSH clients. This page describes the MobaXterm SSH clients.

MobaXterm is an SSH client that is easy to use and install for Windows. When MobaXterm is started, start a terminal to run ssh. The usage of ssh is described at the UPPMAX page on ssh here.

In MobaXterm you can use the internal MobAgent or/and the Peagent from the PuTTy tools.

  • MobaXterm homepage
"},{"location":"software/multiqc/","title":"MultiQC","text":"

MultiQC is a tool with homepage https://github.com/ewels/MultiQC.

MultiQC can be found among the UPPMAX modules.

module spider MultiQC\n
How does that look like?

You output will look similar to this:

[sven@rackham2 ~]$ module spider MultiQC\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  MultiQC:\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n     Versions:\n        MultiQC/0.6\n        MultiQC/0.7\n        MultiQC/0.8\n        MultiQC/0.9\n        MultiQC/1.0\n        MultiQC/1.2\n        MultiQC/1.3\n        MultiQC/1.5\n        MultiQC/1.6\n        MultiQC/1.7\n        MultiQC/1.8\n        MultiQC/1.9\n        MultiQC/1.10\n        MultiQC/1.10.1\n        MultiQC/1.11\n        MultiQC/1.12\n        MultiQC/1.22.2\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  For detailed information about a specific \"MultiQC\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider MultiQC/1.22.2\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n

To find out how to load a specific version:

module spider MultiQC/1.22.2\n
How does that look like?

Output will look similar to:

[sven@rackham2 ~]$ module spider MultiQC/1.22.2\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  MultiQC: MultiQC/1.22.2\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n    You will need to load all module(s) on any one of the lines below before the \"MultiQC/1.22.2\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n       MultiQC - use MultiQC 1.22.2\n\n       Version 1.22.2\n\n\n      Version 1.22.2 is installed using python/3.8.7\n

After reading that documentation, we know how to load it:

module load bioinfo-tools \nmodule load MultiQC/1.22.2\n
How does that look like?

Your output will look similar to this:

[sven@rackham2 ~]$ module load bioinfo-tools \n[sven@rackham2 ~]$ module load MultiQC/1.22.2\n[sven@rackham2 ~]$ \n
"},{"location":"software/multiqc/#singularity-script","title":"Singularity script","text":"

If you want to put MultiQC in a Singularity container, here is an example script:

BootStrap: library\nFrom: ubuntu:18.04\n\n%runscript\n  multiqc \"$@\"\n\n%post\n  echo \"Hello from inside the container\"\n  apt-get update\n  apt-get -y dist-upgrade\n  apt-get clean\n  apt-get -y install python-pip\n  pip install multiqc\n

See the documentation on Singularity how to do so.

"},{"location":"software/nano/","title":"nano","text":"

Using nano to edit the file my_file.txt.

UPPMAX has multiple text editors available. This page describes the GNU nano text editor.

Want to see a video?

You can find a video on using nano on Rackham here

GNU nano is a simple terminal text editor that is easy to learn.

"},{"location":"software/nano/#starting-nano","title":"Starting nano","text":"

Start nano on a terminal with:

nano\n

To start nano to edit a file (for example, my_file.txt, use:

nano my_file.txt\n
"},{"location":"software/nano/#using-nano","title":"Using nano","text":"

The keyboard shortcuts are shown on-screen, where ^ denotes Ctrl and M the meta key.

OS specifics:

  • On Windows, Alt is the meta key
  • On Mac: in the Terminal.app, go to 'Preferences -> Settings -> Keyboard' and turn on \"Use option as meta key\", after which Alt is the meta key

Common tasks:

  • Save a file: CTRL + O , then edit the filename and press enter
  • Exit: CTRL + X, press \"y\" or \"n\" on some questions and/or press Enter to confirm.
  • Help: CTRL + G

More tips can be found at the nano cheat sheet.

"},{"location":"software/nextflow/","title":"nextflow & nf-core on UPPMAX","text":"

https://www.nextflow.io

  • Official documentation: https://www.nextflow.io/docs/latest/index.html
"},{"location":"software/nextflow/#nextflow-from-the-module-system","title":"nextflow from the module system","text":"
  • latest nextflow
module load bioinfo-tools\nmodule load Nextflow/latest  # this also loads java as reqirement\n\nnextflow -v\nnextflow version 24.04.4.5917\n
  • alternative versions
export NXF_VER=23.10.1\n\nnextflow -v\nnextflow version 23.10.1.5891\n
# To check the available versions on Rackham and Bianca\nls /sw/bioinfo/Nextflow/latest/rackham/nxf_home/framework/\n20.04.1  20.10.0  21.10.6  22.10.1  22.10.3  22.10.8  23.04.2  23.04.4  23.10.1  24.04.2  24.04.4\n20.07.1  21.04.3  22.10.0  22.10.2  22.10.4  23.04.1  23.04.3  23.10.0  24.04.1  24.04.3\n
"},{"location":"software/nextflow/#nf-core-from-the-module-system","title":"nf-core from the module system","text":"

https://nf-co.re

nf-core and and all other required modules are available on the transit server as well.

module load bioinfo-tools\nmodule load nf-core   # this also load the nextflow and java as requirements\n
"},{"location":"software/nextflow/#nf-core-pipelines-on-bianca","title":"nf-core pipelines on Bianca","text":"
  1. Login to transit.uppmax.uu.se - documentation
  2. Mount the wharf of your project.

    user@transit:~$ mount_wharf sens2023531\nMounting wharf (accessible for you only) to /home/<user>/sens2023531\n<user>-sens2023531@bianca-sftp.uppmax.uu.se's password: \n
  3. Navigate to your wharf folder

  4. Disable Singularity cache

    export SINGULARITY_DISABLE_CACHE=true\nexport APPTAINER_DISABLE_CACHE=true\nunset NXF_SINGULARITY_CACHEDIR\n
  5. Load nf-core software module

    module load uppmax bioinfo-tools nf-core\n
  6. Run nf-core to download the pipeline.

    nf-core download pixelator\n                                      ,--./,-.\n      ___     __   __   __   ___     /,-._.--~\\\n|\\ | |__  __ /  ` /  \\ |__) |__         }  {\n| \\| |       \\__, \\__/ |  \\ |___     \\`-._,-`-,\n                                      `._,._,'\n\nnf-core/tools version 2.11.1 - https://nf-co.re\n\nWARNING  Could not find GitHub authentication token. Some API requests may fail.                                                    \n? Select release / branch: 1.0.2  [release]\n? Include the nf-core's default institutional configuration files into the download? Yes\n\nIn addition to the pipeline code, this tool can download software containers.\n? Download software container images: singularity\n\nNextflow and nf-core can use an environment variable called $NXF_SINGULARITY_CACHEDIR that is a path to a directory where remote \nSingularity images are stored. This allows downloaded images to be cached in a central location.\n? Define $NXF_SINGULARITY_CACHEDIR for a shared Singularity image download folder? [y/n]: n\n\nIf transferring the downloaded files to another system, it can be convenient to have everything compressed in a single file.\nThis is not recommended when downloading Singularity images, as it can take a long time and saves very little space.\n? Choose compression type: none\nINFO     Saving 'nf-core/pixelator'                                                                                                 \n          Pipeline revision: '1.0.2'                                                                                                \n          Use containers: 'singularity'                                                                                             \n          Container library: 'quay.io'                                                                                              \n          Output directory: 'nf-core-pixelator_1.0.2'                                                                               \n          Include default institutional configuration: 'True'                                                                       \nINFO     Downloading centralised configs from GitHub                                                                                \nINFO     Downloading workflow files from GitHub                                                                                     \nINFO     Processing workflow revision 1.0.2, found 4 container images in     total.\nDownloading singularity images ???????????????????????????????????????????????????????????????????????????????? 100% ? 4/4 completed\n
  7. Running on Bianca

    module load bioinfo-tools Nextflow\nnextflow run ... -profile uppmax --project sens-XXXX-XX .... \n

Note: you might need -c configs/conf/uppmax.config, make sure you have the file (it is an option to download it during the pipeline download process). https://github.com/nf-core/configs/blob/master/conf/uppmax.config https://nf-co.re/configs/uppmax

"},{"location":"software/nextflow/#common-problems","title":"Common problems","text":"
  • Task is running out of resources (memory or time)

Add lines to your configuration that overrides the settings for the problematic task, for example:

process {\n    withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' {\n        cpus   = 12\n        memory = '72.GB'\n        time   = '24.h'\n    }\n}\n

More: https://www.nextflow.io/docs/latest/config.html#process-selectors

"},{"location":"software/nextflow/#troubleshooting-nf-core","title":"Troubleshooting - nf-core","text":""},{"location":"software/nvidia-deep-learning-frameworks/","title":"NVIDIA Deep Learning Frameworks","text":"

Here is how easy one can use an NVIDIA environment for deep learning with all the following tools preset. A screenshot of that page is shown below.

First - pull the container (6.5GB).

singularity pull docker://nvcr.io/nvidia/pytorch:22.03-py3\n

Get an interactive shell.

singularity shell --nv ~/external_1TB/tmp/pytorch_22.03-py3.sif\n\nSingularity> python3\nPython 3.8.12 | packaged by conda-forge | (default, Jan 30 2022, 23:42:07)\n[GCC 9.4.0] on linux\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\n>>> import torch\n# Check torch version\n>>> print(torch.__version__)\n1.12.0a0+2c916ef\n\n# Check if CUDA is available\n>>> print(torch.cuda.is_available())\nTrue\n\n# Check which GPU architectures are supported\n>>> print(torch.cuda.get_arch_list())\n['sm_52', 'sm_60', 'sm_61', 'sm_70', 'sm_75', 'sm_80', 'sm_86', 'compute_86']\n\n# test torch\n>>> torch.zeros(1).to('cuda')\ntensor([0.], device='cuda:0')\n

From the container shell, check what else is available...

Singularity> nvcc -V\nnvcc: NVIDIA (R) Cuda compiler driver\nCopyright (c) 2005-2022 NVIDIA Corporation\nBuilt on Thu_Feb_10_18:23:41_PST_2022\nCuda compilation tools, release 11.6, V11.6.112\nBuild cuda_11.6.r11.6/compiler.30978841_0\n\n# Check what conda packages are already there\nSingularity> conda list -v\n\n# Start a jupyter-lab (keep in mind the hostname)\nSingularity> jupyter-lab\n...\n[I 13:35:46.270 LabApp] [jupyter_nbextensions_configurator] enabled 0.4.1\n[I 13:35:46.611 LabApp] jupyter_tensorboard extension loaded.\n[I 13:35:46.615 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.8/site-packages/jupyterlab\n[I 13:35:46.615 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab\n[I 13:35:46.616 LabApp] [Jupytext Server Extension] NotebookApp.contents_manager_class is (a subclass of) jupytext.TextFileContentsManager already - OK\n[I 13:35:46.616 LabApp] Serving notebooks from local directory: /home/pmitev\n[I 13:35:46.616 LabApp] Jupyter Notebook 6.4.8 is running at:\n[I 13:35:46.616 LabApp] http://hostname:8888/?token=d6e865a937e527ff5bbccfb3f150480b76566f47eb3808b1\n[I 13:35:46.616 LabApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).\n...\n

You can use this container to add more packages.

Bootstrap: docker\nFrom: nvcr.io/nvidia/pytorch:22.03-py3\n...\n

Just keep in mind that \"upgrading\" the build-in torch package might install a package that is compatible with less GPU architectures and it might not work anymore on your hardware.

Singularity> python3 -c \"import torch; print(torch.__version__); print(torch.cuda.is_available()); print(torch.cuda.get_arch_list()); torch.zeros(1).to('cuda')\"\n\n1.10.0+cu102\nTrue\n['sm_37', 'sm_50', 'sm_60', 'sm_70']\nNVIDIA A100-PCIE-40GB with CUDA capability sm_80 is not compatible with the current PyTorch installation.\nThe current PyTorch install supports CUDA capabilities sm_37 sm_50 sm_60 sm_70.\n
"},{"location":"software/openmolcas/","title":"MOLCAS user guide","text":"

How to run the program MOLCAS on UPPMAX

"},{"location":"software/openmolcas/#information","title":"Information","text":"

MOLCAS is an ab initio computational chemistry program. Focus in the program is placed on methods for calculating general electronic structures in molecular systems in both ground and excited states. MOLCAS is, in particular, designed to study the potential surfaces of excited states

This guide will help you get started running MOLCAS on UPPMAX. More detailed information on how to use Molcas can be found on the official website.

"},{"location":"software/openmolcas/#licensing","title":"Licensing","text":"

A valid license key is required to run Molcas on UPPMAX. The licence key should be kept in a directory named .Molcas under the home directory.

Molcas is currently free of charge for academic researchers active in the Nordic countries. You can get hold of a license by following these instructions.

"},{"location":"software/openmolcas/#versions-installed-at-uppmax","title":"Versions installed at UPPMAX","text":"

At UPPMAX the following versions are installed:

  • 8.0 (serial)
  • 7.8 (serial)
"},{"location":"software/openmolcas/#modules-needed-to-run-molcas","title":"Modules needed to run MOLCAS","text":"

In order to run MOLCAS you must first load the molcas module. You can see all available versions of MOLCAS installed at UPPMAX with:

module avail molcas\n

Load a MOLCAS module with, eg:

module load molcas/7.8.082\n
"},{"location":"software/openmolcas/#how-to-run-molcas-interactively","title":"How to run MOLCAS interactively","text":"

If you would like to do tests or short runs, we recommend using the interactive command:

interactive -A your_project_name\n

This will reserve a node for you to do your test on. Note that you must provide the name of an active project in order to run on UPPMAX resources. After a short wait you will get access to the node. Then you can run MOLCAS by:

module load molcas/7.8.082\nmolcas -f test000.input\n

The test000.input looks like:

*$Revision: 7.7 $\n************************************************************************\n* Molecule: H2\n* Basis: DZ\n* Symmetry: x y z\n* SCF: conventional\n*\n*  This is a test to be run during first run to verify\n*  that seward and scf works at all\n*\n\n>export MOLCAS_PRINT=VERBOSE\n &GATEWAY\ncoord\n2\nangstrom\nH  0.350000000  0.000000000  0.000000000\nH -0.350000000  0.000000000  0.000000000\nbasis\nH.DZ....\n\n &SEWARD\n\n &SCF\nTitle\n H2, DZ Basis set\n\n &RASSCF\nTitle\n H2, DZ Basis set\nnActEl\n 2  0 0\nRas2\n 1 1 0 0 0 0 0 0\n\n &ALASKA\n\n &SLAPAF\n\n &CASPT2\n

See the Slurm user guide for more information on the interactive command. Don't forget to exit your interactive job when you have finished your calculation. Exiting will free the resource for others to use.

"},{"location":"software/openmolcas/#batch-scripts-for-slurm","title":"Batch scripts for Slurm","text":"

It's possible to run MOLCAS in the batch queue. Here is an example running MOLCAS on one core:

#!/bin/bash -l\n#\n#SBATCH -A <em>your_project_name</em>\n#SBATCH -J molcastest\n#SBATCH -t 00:10:00\n#SBATCH -p core -n 1\n\nmodule load molcas/7.8.082\n\n#In order to let MOLCAS use more memory\nexport MOLCASMEM=2000\n\nmolcas -f test000.input\n

Again you'll have to provide your project name.

If the script is called test000.job you can submit it to the batch queue with:

sbatch test000.job\n
"},{"location":"software/orthofinder/","title":"OrthoFinder","text":"

'OrthoFinder is a software program for phylogenetic orthology inference' (from OrthoFinder tutorials).

It is not installed via the module system.

For UPPMAX staff

Tickets:

  • https://support.naiss.se/Ticket/Display.html?id=293272
"},{"location":"software/orthofinder/#links","title":"Links","text":"
  • OrthoFinder GitHub repository
  • OrthoFinder tutorials
"},{"location":"software/overview/","title":"Software","text":"

At the UPPMAX clusters, a lot of software is pre-installed and accessible via the module system.

What are the UPPMAX clusters?

See the UPPMAX documentation on its clusters here

What is the module system?

See the UPPMAX documentation on modules here

"},{"location":"software/overview/#software-table","title":"Software table","text":"

Automatically updated software table

"},{"location":"software/overview/#conflicting-modules","title":"Conflicting modules","text":"
  • Conflicting modules
"},{"location":"software/overview/#reach-the-bioinformatics-tools","title":"Reach the Bioinformatics tools","text":"
  • Before you can list available bioinformatics tools you need to issue the command:
module load bioinfo-tools\n
  • When you list available modules with module avail after this, you will see that the bioinformatics tools are now also available in the listing.

  • Note that the module spider command will show bioinformatics modules regardless of whether you have loaded the bioinfo-tools module.

  • This command can also tell you whether a particular module requires the bioinfo-tools module, e.g. \"module spider GEMINI/0.18.3\".
"},{"location":"software/overview/#how-can-i-request-new-software-to-be-installed","title":"How can I request new software to be installed?","text":"

You can always install software in your home on any UPPMAX system. If there are many users who would like to request the same software, it can be installed by UPPMAX application or system experts.

Please send such requests to support@uppmax.uu.se.

"},{"location":"software/overview/#installing-yourself","title":"Installing yourself","text":"

Go to our installation page

"},{"location":"software/parallel_comb/","title":"Combinations of parallel libraries and compilers","text":"

Before compiling a program for MPI we must choose, in addition to the compiler, which version of MPI we want to use. At UPPMAX there are two, openmpi and intelmpi. These, with their versions, are compatible only to a subset of the gcc and intel compiler versions. The lists below summarise the best choices.

"},{"location":"software/parallel_comb/#suggestions-for-compatibility-rackham-snowy-bianca","title":"Suggestions for compatibility Rackham, Snowy, Bianca","text":""},{"location":"software/parallel_comb/#gcc","title":"GCC","text":"
  • v5: gcc/5.3.0 openmpi/1.10.3
  • v6: gcc/6.3.0 openmpi/2.1.0
  • v7: gcc/7.4.0 openmpi/3.1.3
  • v8: gcc/8.3.0 openmpi/3.1.3
  • v9: gcc/9.3.0 openmpi/3.1.5
  • v10: gcc/10.3.0 openmpi/3.1.6 or openmpi/4.1.0
  • v11: gcc/11.2.0 openmpi/4.1.1 will work also on Miarka
  • v12: gcc/12.2.0 openmpi/4.1.4
  • v13: gcc/13.2.0 openmpi/4.1.5
"},{"location":"software/parallel_comb/#intel","title":"Intel","text":"
  • v18: intel/18.3 openmpi/3.1.3
  • v20: intel/20.4 openmpi/3.1.6 or openmpi/4.0.4
"},{"location":"software/parallel_comb/#intel-intelmpi","title":"Intel & intelmpi","text":"
  • Load the corresponding version of intelmpi as of the intel compiler (versions up to 20.4)
"},{"location":"software/parallel_comb/#intel-after-version-204","title":"Intel after version 20.4","text":"
  • For all versions of intel from 2021 there is not necessarily a mpi library with same version as the compiler.
module load intel-oneapi\n
  • Check availability and load desired version
module avail mpi  # showing both compilers and mpi ;-)\n
  • Example:
module load compiler/2023.1.0 mpi/2021.9.0    \n
"},{"location":"software/parallel_comb/#suggestions-for-compatibility-rackham-and-snowy","title":"Suggestions for compatibility Rackham and Snowy","text":"
  • GCC

    • v4: gcc/4.8.2 openmpi/1.7.4
    • v5: gcc/5.3.0 openmpi/1.10.3
    • v6: gcc/6.3.0 openmpi/2.1.0
    • v7: gcc/7.4.0 openmpi/3.1.3
    • v8: gcc/8.3.0 openmpi/3.1.3
    • v9: gcc/9.3.0 openmpi/3.1.3 or openmpi/4.0.3
    • v10: gcc/10.3.0 openmpi/3.1.6*- #or openmpi/4.1.1**
    • v11: gcc/11.3.0 openmpi/4.1.2
    • v12: gcc/12.2.0 openmpi/4.1.4
    • v13: gcc/13.1.0 openmpi/4.1.5
  • Intel

    • v18: intel/18.3 openmpi/3.1.3
    • v20: intel/20.4 openmpi/3.1.6*- # or openmpi/4.1.1**
"},{"location":"software/parallel_comb/#rackham","title":"Rackham","text":"
  • Also on Snowy in italic
  • Also on Snowy AND Bianca in bold
GCC openmpi 4.8.2 1.7.4 5.2.0 1.8.8 5.3.0 1.10.1 5.5.0 1.10.3 6.3.0 2.0.1, 2.0.2, 2.1.0 6.4.0 2.1.1 7.1.0 2.1.0, 2.1.1 7.2.0 2.1.1, 2.1.2, 3.0.0 7.3.0 2.1.3, 3.0.0, 3.1.0 7.4.0 3.1.3 8.1.0 3.0.1, 3.1.0 8.2.0 3.0.2, 3.1.0, 3.1.1, 3.1.2, 3.1.3, 4.0.0 8.3.0 3.1.3 8.4.0 3.1.5, 4.0.2 9.1.0 3.1.3 9.2.0 3.1.3, 3.1.4, 3.1.5, 4.0.2 9.3.0 3.1.5, 4.0.2, 4.0.3 10.1.0 3.1.6, 4.0.3 10.2.0 3.1.6, 4.0.4, 4.1.0 10.3.0 3.1.6, 4.0.5, 4.1.0, 4.1.1 11.2.0 4.1.1, 4.1.2 11.3.0 4.1.2, 4.1.3 12.1.0 4.1.3 12.2.0 4.1.3, 4.1.4 12.3.0 4.1.5 13.1.0 4.1.5 Intel openmpi 15.3 1.10.0, 1.10.1, 2.1.0 16.1 1.10.1, 1.10.2 17.1 2.0.1, 2.0.2, 17.2 2.0.2, 2.1.0 17.4 2.1.1, 3.0.0 18.0 3.0.0 18.1 2.1.2, 2.1.3, 3.0.0 18.2 2.1.3, 3.0.0, 3.1.0 18.3 3.0.2, 3.1.0, 3.1.1, 3.1.2, 3.1.3 19.4 3.1.4 19.5 3.1.4 20.0 3.1.5, 3.1.6, 4.0.3, 4.0.4 20.2 3.1.6, 4.0.4 20.4 3.1.6, 4.0.4, 4.1.0, 4.1.1 openmpi gcc intel pgi 1.7.4 4.8.2 - - 1.8.8 5.2.0 - - 1.10.0 15.3 - 1.10.1 5.3.0 15.3, 16.1 - 1.10.2 16.1 16.9, 17.4, 17.7, 17.10 1.10.3 5.5.0 - - 2.0.1 6.3.0 17.1 - 2.0.2 6.3.0 17.1, 17.2 - 2.1.0 6.3.0, 7.1.0 15.3, 17.2 - 2.1.1 6.4.0, 7.1.0, 7.2.0 17.4 17.4, 17.7 2.1.2 7.2.0 18.1 17.10, 18.1, 18.3 2.1.3 7.3.0 18.1, 18.2 18.1 3.0.0 7.2.0, 7.3.0 17.4, 18.0, 18.1, 18.2 17.7, 17.10, 18.0 18.1 3.0.1 8.1.0 - - 3.0.2 8.2.0 18.3 - 3.1.0 7.3.0, 8.1.0, 8.2.0 18.2, 18.3 18.3 3.1.1 8.2.0 18.3 - 3.1.2 8.2.0 18.3 18.3 3.1.3 7.4.0, 8.2.0, 8.3.0, 9.1.0, 9.2.0 18.3 18.3 3.1.4 9.2.0 19.4, 19.5 - 3.1.5 8.4.0, 9.2.0, 9.3.0 20.0 - 3.1.6 10.1.0, 10.2.0, 10.3.0 20.0, 20.2, 20.4 - 4.0.0 8.2.0 - 4.0.2 8.4.0, 9.2.0, 9.3.0 - 4.0.3 9.3.0, 10.1.0 20.0 - 4.0.4 10.2 20.0, 20.2, 20.4 - 4.0.5 10.3.0 - - 4.1.0 10.2.0, 10.3.0 20.4 - 4.1.1 10.3.0, 11.2.0 20.4 - 4.1.2 11.2.0 - - 4.1.3 12.1.0, 12.2.0 - - 4.1.4 12.2.0 - - 4.1.5 12.3.0, 13.1.0 - -"},{"location":"software/parallel_comb/#bianca","title":"Bianca","text":"GCC openmpi 5.3.0 1.10.1 5.4.0 2.0.0, 2.0.1 6.1.0 2.0.0, 2.0.1 6.2.0 2.0.1 6.3.0 2.0.1, 2.0.2, 2.1.0 6.4.0 2.1.1 7.1.0 2.1.0, 2.1.1 7.2.0 2.1.1, 3.0.0 7.3.0 3.0.0 8.1.0 3.1.0 8.2.0 3.1.2, 3.1.3 8.3.0 3.1.3 9.3.0 3.1.5 10.1.0 3.1.6 10.2.0 4.1.0 10.3.0 3.1.6, 4.0.5, 4.1.0 11.2.0 4.1.1 Intel openmpi 15.3 1.10.0, 1.10.1 16.1 1.10.1, 1.10.2 16.3 2.0.0, 2.0.1 17.0 2.0.1 17.1 2.0.1, 2.0.2 17.2 2.0.2, 2.1.0 17.4 2.1.1, 3.0.0 18.3 3.1.2, 3.1.3 20.2 3.1.6, 4.0.4 20.4 3.1.6, 4.0.4"},{"location":"software/perl/","title":"Perl","text":""},{"location":"software/perl/#perl_modules-guide","title":"Perl_modules guide","text":"

A number of modules/packages are available by default with all Perl versions.

This is a list of modules for perl/5.26.2 available by loading the module perl_modules/5.26.2.

For previous Perl versions 5.18.4 and 5.24.1 (available through the software module system as perl/5.18.4 and perl/5.24.1), many more Perl modules are available by loading the software module perl_modules/5.18.4 or perl_modules/5.24.1.

A complete list of the Perl modules available in perl_modules/5.26.2 module is as follows:

"},{"location":"software/perl/#perl-module-search-on-perl_modules5262rackham","title":"Perl Module Search on perl_modules/5.26.2/rackham","text":"Module name Version $pkg 2.019 Acme::Damn 0.08 Algorithm::C3 0.10 Algorithm::Combinatorics 0.27 Algorithm::Diff 1.1903 Algorithm::FastPermute 0.999 Algorithm::Loops 1.032 Algorithm::Munkres 0.08 Algorithm::Permute 0.16 aliased 0.34 Apache::Htpasswd 1.9 Apache::LogFormat::Compiler 0.35 Apache::SOAP 1.27 App::Ack 2.24 App::Ack::ConfigDefault unknown App::Ack::ConfigFinder unknown App::Ack::ConfigLoader unknown App::Ack::Filter unknown App::Ack::Filter::Collection unknown App::Ack::Filter::Default unknown App::Ack::Filter::Extension unknown App::Ack::Filter::ExtensionGroup unknown App::Ack::Filter::FirstLineMatch unknown App::Ack::Filter::Inverse unknown App::Ack::Filter::Is unknown App::Ack::Filter::IsGroup unknown App::Ack::Filter::IsPath unknown App::Ack::Filter::IsPathGroup unknown App::Ack::Filter::Match unknown App::Ack::Filter::MatchGroup unknown App::Ack::Resource unknown App::Ack::Resources unknown App::Cmd 0.331 App::Cmd::ArgProcessor 0.331 App::Cmd::Command 0.331 App::Cmd::Command::commands 0.331 App::Cmd::Command::help 0.331 App::Cmd::Command::version 0.331 App::Cmd::Plugin 0.331 App::Cmd::Setup 0.331 App::Cmd::Simple 0.331 App::Cmd::Subdispatch 0.331 App::Cmd::Subdispatch::DashedStyle 0.331 App::Cmd::Tester 0.331 App::Cmd::Tester::CaptureExternal 0.331 App::cpanminus 1.7044 App::cpanminus::fatscript 1.7044 App::FatPacker 0.010007 App::FatPacker::Trace unknown App::Nopaste 1.012 App::Nopaste::Command 1.012 App::Nopaste::Service 1.012 App::Nopaste::Service::Codepeek 1.012 App::Nopaste::Service::Debian 1.012 App::Nopaste::Service::Gist 1.012 App::Nopaste::Service::GitLab 1.012 App::Nopaste::Service::Mojopaste 1.012 App::Nopaste::Service::PastebinCom 1.012 App::Nopaste::Service::Pastie 1.012 App::Nopaste::Service::Shadowcat 1.012 App::Nopaste::Service::Snitch 1.012 App::Nopaste::Service::ssh 1.012 App::Nopaste::Service::Ubuntu 1.012 App::perlbrew 0.84 App::Pinto 0.14 App::Pinto::Command 0.14 App::Pinto::Command::add 0.14 App::Pinto::Command::clean 0.14 App::Pinto::Command::copy 0.14 App::Pinto::Command::default 0.14 App::Pinto::Command::delete 0.14 App::Pinto::Command::diff 0.14 App::Pinto::Command::help 0.14 App::Pinto::Command::init 0.14 App::Pinto::Command::install 0.14 App::Pinto::Command::kill 0.14 App::Pinto::Command::list 0.14 App::Pinto::Command::lock 0.14 App::Pinto::Command::log 0.14 App::Pinto::Command::look 0.14 App::Pinto::Command::manual 0.14 App::Pinto::Command::merge 0.14 App::Pinto::Command::migrate 0.14 App::Pinto::Command::new 0.14 App::Pinto::Command::nop 0.14 App::Pinto::Command::pin 0.14 App::Pinto::Command::props 0.14 App::Pinto::Command::pull 0.14 App::Pinto::Command::register 0.14 App::Pinto::Command::rename 0.14 App::Pinto::Command::reset 0.14 App::Pinto::Command::revert 0.14 App::Pinto::Command::roots 0.14 App::Pinto::Command::stacks 0.14 App::Pinto::Command::statistics 0.14 App::Pinto::Command::thanks 0.14 App::Pinto::Command::unlock 0.14 App::Pinto::Command::unpin 0.14 App::Pinto::Command::unregister 0.14 App::Pinto::Command::update 0.14 App::Pinto::Command::verify 0.14 App::Prove 3.42 App::Prove::State 3.42 App::Prove::State::Result 3.42 App::Prove::State::Result::Test 3.42 AppConfig 1.71 AppConfig::Args 1.71 AppConfig::CGI 1.71 AppConfig::File 1.71 AppConfig::Getopt 1.71 AppConfig::State 1.71 AppConfig::Sys 1.71 Archive::Any::Create 0.03 Archive::Any::Create::Tar unknown Archive::Any::Create::Zip unknown Archive::Extract 0.80 Archive::Zip 1.60 Archive::Zip::Archive 1.60 Archive::Zip::BufferedFileHandle 1.60 Archive::Zip::DirectoryMember 1.60 Archive::Zip::FileMember 1.60 Archive::Zip::Member 1.60 Archive::Zip::MemberRead 1.60 Archive::Zip::MockFileHandle 1.60 Archive::Zip::NewFileMember 1.60 Archive::Zip::StringMember 1.60 Archive::Zip::Tree 1.60 Archive::Zip::ZipFileMember 1.60 Array::Compare 3.0.1 Array::Unique 0.08 Array::Utils 0.5 asa 1.03 Astro::FITS::Header 3.04 Astro::FITS::Header::AST 3.01 Astro::FITS::Header::CFITSIO 3.02 Astro::FITS::Header::GSD 3.01 Astro::FITS::Header::Item 3.02 Astro::FITS::Header::NDF 3.02 Authen::SASL 2.16 Authen::SASL::CRAM_MD5 2.14 Authen::SASL::EXTERNAL 2.14 Authen::SASL::Perl 2.14 Authen::SASL::Perl::ANONYMOUS 2.14 Authen::SASL::Perl::CRAM_MD5 2.14 Authen::SASL::Perl::DIGEST_MD5 2.14 Authen::SASL::Perl::EXTERNAL 2.14 Authen::SASL::Perl::GSSAPI 0.05 Authen::SASL::Perl::LOGIN 2.14 Authen::SASL::Perl::PLAIN 2.14 Authen::Simple 0.5 Authen::Simple::Adapter unknown Authen::Simple::Apache unknown Authen::Simple::Log unknown Authen::Simple::Passwd 0.6 Authen::Simple::Password unknown autobox unknown autobox::universal unknown B::Hooks::EndOfScope 0.24 B::Hooks::EndOfScope::PP 0.24 B::Hooks::EndOfScope::XS 0.24 B::Hooks::OP::Check 0.22 B::Hooks::OP::Check::Install::Files unknown B::Hooks::OP::PPAddr 0.06 B::Hooks::OP::PPAddr::Install::Files unknown B::Keywords 1.18 B::Utils 0.27 B::Utils::Install::Files unknown B::Utils::OP 0.27 bareword::filehandles 0.006 Bit::Vector 7.4 Bit::Vector::Overload 7.4 Bit::Vector::String 7.4 boolean 0.46 Browser::Open 0.04 Bundle::DBD::mysql 4.046 Bundle::DBI 12.008696 Bundle::Object::InsideOut 4.04 C::StructType unknown C::Type unknown C::Var unknown Cache::BaseCache unknown Cache::BaseCacheTester unknown Cache::Cache 1.08 Cache::CacheMetaData unknown Cache::CacheSizer unknown Cache::CacheTester unknown Cache::CacheUtils unknown Cache::FileBackend unknown Cache::FileCache unknown Cache::LRU 0.04 Cache::MemoryBackend unknown Cache::MemoryCache unknown Cache::NullCache unknown Cache::Object unknown Cache::SharedMemoryBackend unknown Cache::SharedMemoryCache unknown Cache::SizeAwareCache unknown Cache::SizeAwareCacheTester unknown Cache::SizeAwareFileCache unknown Cache::SizeAwareMemoryCache unknown Cache::SizeAwareSharedMemoryCache unknown Capture::Tiny 0.48 Carp::Always 0.13 Carp::Assert 0.21 Carp::Assert::More 1.16 Carp::Clan 6.06 Carp::REPL 0.18 Carton unknown Carton::Builder unknown Carton::CLI unknown Carton::CPANfile unknown Carton::Dependency unknown Carton::Dist unknown Carton::Dist::Core unknown Carton::Environment unknown Carton::Error unknown Carton::Index unknown Carton::Mirror unknown Carton::Package unknown Carton::Packer unknown Carton::Snapshot unknown Carton::Snapshot::Emitter unknown Carton::Snapshot::Parser unknown Carton::Tree unknown Carton::Util unknown Catalyst 5.90118 Catalyst::Action unknown Catalyst::Action::Deserialize 1.21 Catalyst::Action::Deserialize::Callback 1.21 Catalyst::Action::Deserialize::JSON 1.21 Catalyst::Action::Deserialize::JSON::XS 1.21 Catalyst::Action::Deserialize::View 1.21 Catalyst::Action::Deserialize::XML::Simple 1.21 Catalyst::Action::Deserialize::YAML 1.21 Catalyst::Action::DeserializeMultiPart 1.21 Catalyst::Action::RenderView 0.16 Catalyst::Action::REST 1.21 Catalyst::Action::REST::ForBrowsers 1.21 Catalyst::Action::Role::ACL 0.07 Catalyst::Action::Serialize 1.21 Catalyst::Action::Serialize::Callback 1.21 Catalyst::Action::Serialize::JSON 1.21 Catalyst::Action::Serialize::JSON::XS 1.21 Catalyst::Action::Serialize::JSONP 1.21 Catalyst::Action::Serialize::View 1.21 Catalyst::Action::Serialize::XML::Simple 1.21 Catalyst::Action::Serialize::YAML 1.21 Catalyst::Action::Serialize::YAML::HTML 1.21 Catalyst::Action::SerializeBase 1.21 Catalyst::ActionChain unknown Catalyst::ActionContainer unknown Catalyst::ActionRole::ACL 0.07 Catalyst::ActionRole::ConsumesContent unknown Catalyst::ActionRole::HTTPMethods unknown Catalyst::ActionRole::NeedsLogin unknown Catalyst::ActionRole::QueryMatching unknown Catalyst::ActionRole::Scheme unknown Catalyst::Authentication::Credential::HTTP 1.018 Catalyst::Authentication::Credential::NoPassword unknown Catalyst::Authentication::Credential::Password unknown Catalyst::Authentication::Credential::Remote unknown Catalyst::Authentication::Realm unknown Catalyst::Authentication::Realm::Compatibility unknown Catalyst::Authentication::Realm::Progressive unknown Catalyst::Authentication::Realm::SimpleDB unknown Catalyst::Authentication::Store::DBIx::Class 0.1506 Catalyst::Authentication::Store::DBIx::Class::User unknown Catalyst::Authentication::Store::Minimal unknown Catalyst::Authentication::Store::Null unknown Catalyst::Authentication::User unknown Catalyst::Authentication::User::Hash unknown Catalyst::Base unknown Catalyst::ClassData unknown Catalyst::Component unknown Catalyst::Component::ApplicationAttribute unknown Catalyst::Component::ContextClosure unknown Catalyst::Component::InstancePerContext 0.001001 Catalyst::Controller unknown Catalyst::Controller::ActionRole 0.17 Catalyst::Controller::REST 1.21 Catalyst::Devel 1.39 Catalyst::Dispatcher unknown Catalyst::DispatchType unknown Catalyst::DispatchType::Chained unknown Catalyst::DispatchType::Default unknown Catalyst::DispatchType::Index unknown Catalyst::DispatchType::Path unknown Catalyst::Engine unknown Catalyst::EngineLoader unknown Catalyst::Exception unknown Catalyst::Exception::Basic unknown Catalyst::Exception::Detach unknown Catalyst::Exception::Go unknown Catalyst::Exception::Interface unknown Catalyst::Helper 1.39 Catalyst::Helper::Model::Adaptor unknown Catalyst::Helper::Model::DBIC::Schema 0.65 Catalyst::Helper::Model::Factory unknown Catalyst::Helper::Model::Factory::PerRequest unknown Catalyst::Helper::View::Email 0.36 Catalyst::Helper::View::Email::Template 0.36 Catalyst::Helper::View::TT 0.44 Catalyst::Helper::View::TTSite 0.44 Catalyst::Log unknown Catalyst::Manual 5.9009 Catalyst::Middleware::Stash unknown Catalyst::Model unknown Catalyst::Model::Adaptor 0.10 Catalyst::Model::Adaptor::Base unknown Catalyst::Model::DBIC::Schema 0.65 Catalyst::Model::Factory 0.10 Catalyst::Model::Factory::PerRequest 0.10 Catalyst::Plugin::Authentication 0.10023 Catalyst::Plugin::Authentication::Credential::Password unknown Catalyst::Plugin::Authentication::Store::Minimal unknown Catalyst::Plugin::Authentication::User unknown Catalyst::Plugin::Authentication::User::Hash unknown Catalyst::Plugin::ConfigLoader 0.34 Catalyst::Plugin::I18N 0.10 Catalyst::Plugin::Session 0.40 Catalyst::Plugin::Session::State unknown Catalyst::Plugin::Session::State::Cookie 0.17 Catalyst::Plugin::Session::Store unknown Catalyst::Plugin::Session::Store::DBIC 0.14 Catalyst::Plugin::Session::Store::DBIC::Delegate unknown Catalyst::Plugin::Session::Store::Delegate 0.06 Catalyst::Plugin::Session::Store::Dummy unknown Catalyst::Plugin::Session::Store::File 0.18 Catalyst::Plugin::Session::Test::Store 123 Catalyst::Plugin::StackTrace 0.12 Catalyst::Plugin::Static::Simple 0.36 Catalyst::Plugin::Unicode::Encoding 5.90118 Catalyst::Request unknown Catalyst::Request::PartData unknown Catalyst::Request::REST 1.21 Catalyst::Request::REST::ForBrowsers 1.21 Catalyst::Request::Upload unknown Catalyst::Response unknown Catalyst::Response::Writer unknown Catalyst::Restarter unknown Catalyst::Restarter::Forking unknown Catalyst::Restarter::Win32 unknown Catalyst::Runtime 5.90118 Catalyst::Script::CGI unknown Catalyst::Script::Create unknown Catalyst::Script::FastCGI unknown Catalyst::Script::Server unknown Catalyst::Script::Test unknown Catalyst::ScriptRole unknown Catalyst::ScriptRunner unknown Catalyst::Stats unknown Catalyst::Test unknown Catalyst::TraitFor::Model::DBIC::Schema::Caching unknown Catalyst::TraitFor::Model::DBIC::Schema::PerRequestSchema unknown Catalyst::TraitFor::Model::DBIC::Schema::Replicated unknown Catalyst::TraitFor::Model::DBIC::Schema::SchemaProxy unknown Catalyst::TraitFor::Request::REST 1.21 Catalyst::TraitFor::Request::REST::ForBrowsers 1.21 Catalyst::Utils unknown Catalyst::View unknown Catalyst::View::Email 0.36 Catalyst::View::Email::Template 0.36 Catalyst::View::TT 0.44 CatalystX::Component::Traits 0.19 CatalystX::InjectComponent 0.025 CatalystX::LeakChecker 0.06 CatalystX::Profile 0.02 CatalystX::Profile::Controller::ControlProfiling 0.02 CatalystX::REPL 0.04 CatalystX::SimpleLogin 0.20 CatalystX::SimpleLogin::Controller::Login unknown CatalystX::SimpleLogin::Form::Login unknown CatalystX::SimpleLogin::Form::LoginOpenID unknown CatalystX::SimpleLogin::TraitFor::Controller::Login::Logout unknown CatalystX::SimpleLogin::TraitFor::Controller::Login::OpenID unknown CatalystX::SimpleLogin::TraitFor::Controller::Login::RenderAsTTTemplate unknown CatalystX::SimpleLogin::TraitFor::Controller::Login::WithRedirect unknown CGI 4.38 CGI::Carp 4.38 CGI::Cookie 4.38 CGI::File::Temp 4.38 CGI::FormBuilder 3.10 CGI::FormBuilder::Field 3.10 CGI::FormBuilder::Field::button 3.10 CGI::FormBuilder::Field::checkbox 3.10 CGI::FormBuilder::Field::date 3.10 CGI::FormBuilder::Field::datetime 3.10 CGI::FormBuilder::Field::datetime_local 3.10 CGI::FormBuilder::Field::email 3.10 CGI::FormBuilder::Field::file 3.10 CGI::FormBuilder::Field::hidden 3.10 CGI::FormBuilder::Field::image 3.10 CGI::FormBuilder::Field::number 3.10 CGI::FormBuilder::Field::password 3.10 CGI::FormBuilder::Field::radio 3.10 CGI::FormBuilder::Field::select 3.10 CGI::FormBuilder::Field::static 3.10 CGI::FormBuilder::Field::submit 3.10 CGI::FormBuilder::Field::text 3.10 CGI::FormBuilder::Field::textarea 3.10 CGI::FormBuilder::Field::time 3.10 CGI::FormBuilder::Field::url 3.10 CGI::FormBuilder::Messages 3.10 CGI::FormBuilder::Messages::base 3.10 CGI::FormBuilder::Messages::default 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Messages::locale 3.10 CGI::FormBuilder::Multi 3.10 CGI::FormBuilder::Source 3.10 CGI::FormBuilder::Source::File 3.10 CGI::FormBuilder::Source::Perl 0.01 CGI::FormBuilder::Template 3.10 CGI::FormBuilder::Template::Builtin 3.10 CGI::FormBuilder::Template::CGI_SSI 3.10 CGI::FormBuilder::Template::Div 3.10 CGI::FormBuilder::Template::Fast 3.10 CGI::FormBuilder::Template::HTML 3.10 CGI::FormBuilder::Template::Text 3.10 CGI::FormBuilder::Template::TT2 3.10 CGI::FormBuilder::Test 3.10 CGI::FormBuilder::Util 3.10 CGI::HTML::Functions unknown CGI::Pretty 4.38 CGI::Push 4.38 CGI::Simple 1.15 CGI::Simple::Cookie 1.15 CGI::Simple::Standard 1.15 CGI::Simple::Util 1.15 CGI::Struct 1.21 CGI::Util 4.38 CHI 0.60 CHI::CacheObject 0.60 CHI::Constants 0.60 CHI::Driver 0.60 CHI::Driver::Base::CacheContainer 0.60 CHI::Driver::CacheCache 0.60 CHI::Driver::FastMmap 0.60 CHI::Driver::File 0.60 CHI::Driver::Memory 0.60 CHI::Driver::Metacache 0.60 CHI::Driver::Null 0.60 CHI::Driver::RawMemory 0.60 CHI::Driver::Role::HasSubcaches 0.60 CHI::Driver::Role::IsSizeAware 0.60 CHI::Driver::Role::IsSubcache 0.60 CHI::Driver::Role::Universal 0.60 CHI::Serializer::JSON 0.60 CHI::Serializer::Storable 0.60 CHI::Stats 0.60 CHI::t::Bugs 0.60 CHI::t::Config 0.60 CHI::t::Constants 0.60 CHI::t::Driver 0.60 CHI::t::Driver::CacheCache 0.60 CHI::t::Driver::FastMmap 0.60 CHI::t::Driver::File 0.60 CHI::t::Driver::File::DepthZero 0.60 CHI::t::Driver::Memory 0.60 CHI::t::Driver::NonMoose 0.60 CHI::t::Driver::RawMemory 0.60 CHI::t::Driver::Subcache 0.60 CHI::t::Driver::Subcache::l1_cache 0.60 CHI::t::Driver::Subcache::mirror_cache 0.60 CHI::t::GetError 0.60 CHI::t::Initialize 0.60 CHI::t::Null 0.60 CHI::t::RequiredModules 0.60 CHI::t::Sanity 0.60 CHI::t::SetError 0.60 CHI::t::Subcache 0.60 CHI::t::Subclass 0.60 CHI::t::Util 0.60 CHI::Test 0.60 CHI::Test::Class 0.60 CHI::Test::Driver::NonMoose 0.60 CHI::Test::Driver::Readonly 0.60 CHI::Test::Driver::Role::CheckKeyValidity 0.60 CHI::Test::Driver::Writeonly 0.60 CHI::Test::Util 0.60 CHI::Types 0.60 CHI::Util 0.60 Class::Accessor 0.51 Class::Accessor::Chained 0.01 Class::Accessor::Chained::Fast unknown Class::Accessor::Fast 0.51 Class::Accessor::Faster 0.51 Class::Accessor::Grouped 0.10014 Class::Accessor::Lite 0.08 Class::AutoClass 1.56 Class::AutoClass::Root 1 Class::C3 0.34 Class::C3::Adopt::NEXT 0.14 Class::C3::Componentised 1.001002 Class::C3::Componentised::ApplyHooks unknown Class::Data::Inheritable 0.08 Class::Factory::Util 1.7 Class::Inspector 1.32 Class::Inspector::Functions 1.32 Class::Load 0.25 Class::Load::PP 0.25 Class::Load::XS 0.10 Class::Method::Modifiers 2.12 Class::MethodMaker 2.24 Class::MethodMaker::array unknown Class::MethodMaker::Constants unknown Class::MethodMaker::Engine 2.24 Class::MethodMaker::hash unknown Class::MethodMaker::OptExt unknown Class::MethodMaker::scalar unknown Class::MethodMaker::V1Compat unknown Class::MOP 2.2011 Class::MOP::Attribute 2.2011 Class::MOP::Class 2.2011 Class::MOP::Class::Immutable::Trait 2.2011 Class::MOP::Deprecated 2.2011 Class::MOP::Instance 2.2011 Class::MOP::Method 2.2011 Class::MOP::Method::Accessor 2.2011 Class::MOP::Method::Constructor 2.2011 Class::MOP::Method::Generated 2.2011 Class::MOP::Method::Inlined 2.2011 Class::MOP::Method::Meta 2.2011 Class::MOP::Method::Wrapped 2.2011 Class::MOP::MiniTrait 2.2011 Class::MOP::Mixin 2.2011 Class::MOP::Mixin::AttributeCore 2.2011 Class::MOP::Mixin::HasAttributes 2.2011 Class::MOP::Mixin::HasMethods 2.2011 Class::MOP::Mixin::HasOverloads 2.2011 Class::MOP::Module 2.2011 Class::MOP::Object 2.2011 Class::MOP::Overload 2.2011 Class::MOP::Package 2.2011 Class::Singleton 1.5 Class::Tiny 1.006 Class::Trigger 0.14 Class::Unload 0.11 Class::XSAccessor 1.19 Class::XSAccessor::Array 1.19 Clipboard 0.13 Clipboard::MacPasteboard unknown Clipboard::Win32 unknown Clipboard::Xclip unknown Clone 0.39 Clone::Choose 0.010 Clone::PP 1.07 Commandable 0.01 Commandable::Invocation 0.01 common::sense 3.74 Compress::Bzip2 2.26 Compress::Raw::Bzip2 2.081 Compress::Raw::Zlib 2.081 Config::Any 0.32 Config::Any::Base unknown Config::Any::General unknown Config::Any::INI unknown Config::Any::JSON unknown Config::Any::Perl unknown Config::Any::XML unknown Config::Any::YAML unknown Config::General 2.63 Config::General::Extended 2.07 Config::General::Interpolated 2.15 Config::INI 0.025 Config::INI::Reader 0.025 Config::INI::Writer 0.025 Config::MVP 2.200011 Config::MVP::Assembler 2.200011 Config::MVP::Assembler::WithBundles 2.200011 Config::MVP::Error 2.200011 Config::MVP::Reader 2.200011 Config::MVP::Reader::Findable 2.200011 Config::MVP::Reader::Findable::ByExtension 2.200011 Config::MVP::Reader::Finder 2.200011 Config::MVP::Reader::Hash 2.200011 Config::MVP::Reader::INI 2.101463 Config::MVP::Section 2.200011 Config::MVP::Sequence 2.200011 Config::Tiny 2.23 constant::boolean 0.02 Context::Preserve 0.03 Contextual::Return 0.004014 Contextual::Return::Failure unknown Convert::Binary::C 0.78 Convert::Binary::C::Cached 0.78 Convert::BinHex 1.125 Convert::Color 0.11 Convert::Color::CMY 0.11 Convert::Color::CMYK 0.11 Convert::Color::HSL 0.11 Convert::Color::HSV 0.11 Convert::Color::RGB 0.11 Convert::Color::RGB16 0.11 Convert::Color::RGB8 0.11 Convert::Color::VGA 0.11 Convert::Color::X11 0.11 Convert::Color::XTerm 0.05 Convert::UU 0.5201 Cookie::Baker 0.09 CPAN::Changes 0.400002 CPAN::Changes::Group unknown CPAN::Changes::Release unknown CPAN::Checksums 2.12 CPAN::Common::Index 0.010 CPAN::Common::Index::LocalPackage 0.010 CPAN::Common::Index::MetaDB 0.010 CPAN::Common::Index::Mirror 0.010 CPAN::Common::Index::Mux::Ordered 0.010 CPAN::DistnameInfo 0.12 CPAN::Meta::Check 0.014 CPAN::Mini 1.111016 CPAN::Mini::App 1.111016 CPAN::Perl::Releases 3.68 CPAN::Uploader 0.103013 Cpanel::JSON::XS 4.04 Cpanel::JSON::XS::Type unknown Crypt::Blowfish 2.14 Crypt::CBC 2.33 Crypt::PasswdMD5 1.40 Crypt::Random::Seed 0.03 Crypt::Random::TESHA2 0.01 Crypt::Random::TESHA2::Config 0.01 Crypt::RC4 2.02 CSS::Tiny 1.20 curry 1.001000 curry::weak unknown Curses::Window 1.36 Cwd 3.74 Cwd::Guard 0.05 Dancer 1.3400 Dancer2 0.206000 Dancer2::CLI 0.206000 Dancer2::CLI::Command::gen 0.206000 Dancer2::CLI::Command::version 0.206000 Dancer2::Core 0.206000 Dancer2::Core::App 0.206000 Dancer2::Core::Cookie 0.206000 Dancer2::Core::Dispatcher 0.206000 Dancer2::Core::DSL 0.206000 Dancer2::Core::Error 0.206000 Dancer2::Core::Factory 0.206000 Dancer2::Core::Hook 0.206000 Dancer2::Core::HTTP 0.206000 Dancer2::Core::MIME 0.206000 Dancer2::Core::Request 0.206000 Dancer2::Core::Request::Upload 0.206000 Dancer2::Core::Response 0.206000 Dancer2::Core::Response::Delayed 0.206000 Dancer2::Core::Role::ConfigReader 0.206000 Dancer2::Core::Role::DSL 0.206000 Dancer2::Core::Role::Engine 0.206000 Dancer2::Core::Role::Handler 0.206000 Dancer2::Core::Role::HasLocation 0.206000 Dancer2::Core::Role::Hookable 0.206000 Dancer2::Core::Role::Logger 0.206000 Dancer2::Core::Role::Serializer 0.206000 Dancer2::Core::Role::SessionFactory 0.206000 Dancer2::Core::Role::SessionFactory::File 0.206000 Dancer2::Core::Role::StandardResponses 0.206000 Dancer2::Core::Role::Template 0.206000 Dancer2::Core::Route 0.206000 Dancer2::Core::Runner 0.206000 Dancer2::Core::Session 0.206000 Dancer2::Core::Time 0.206000 Dancer2::Core::Types 0.206000 Dancer2::FileUtils 0.206000 Dancer2::Handler::AutoPage 0.206000 Dancer2::Handler::File 0.206000 Dancer2::Logger::Capture 0.206000 Dancer2::Logger::Capture::Trap 0.206000 Dancer2::Logger::Console 0.206000 Dancer2::Logger::Diag 0.206000 Dancer2::Logger::File 0.206000 Dancer2::Logger::LogReport 1.27 Dancer2::Logger::Note 0.206000 Dancer2::Logger::Null 0.206000 Dancer2::Plugin 0.206000 Dancer2::Plugin::LogReport 1.27 Dancer2::Plugin::LogReport::Message 1.27 Dancer2::Serializer::Dumper 0.206000 Dancer2::Serializer::JSON 0.206000 Dancer2::Serializer::Mutable 0.206000 Dancer2::Serializer::YAML 0.206000 Dancer2::Session::Simple 0.206000 Dancer2::Session::YAML 0.206000 Dancer2::Template::Implementation::ForkedTiny 0.206000 Dancer2::Template::Simple 0.206000 Dancer2::Template::TemplateToolkit 0.206000 Dancer2::Template::Tiny 0.206000 Dancer2::Test 0.206000 Dancer::App 1.3400 Dancer::Config 1.3400 Dancer::Config::Object 1.3400 Dancer::Continuation 1.3400 Dancer::Continuation::Halted 1.3400 Dancer::Continuation::Route 1.3400 Dancer::Continuation::Route::ErrorSent 1.3400 Dancer::Continuation::Route::FileSent 1.3400 Dancer::Continuation::Route::Forwarded 1.3400 Dancer::Continuation::Route::Passed 1.3400 Dancer::Continuation::Route::Templated 1.3400 Dancer::Cookie 1.3400 Dancer::Cookies 1.3400 Dancer::Deprecation 1.3400 Dancer::Engine 1.3400 Dancer::Error 1.3400 Dancer::Exception 1.3400 Dancer::Exception::Base 1.3400 Dancer::Factory::Hook 1.3400 Dancer::FileUtils 1.3400 Dancer::GetOpt 1.3400 Dancer::Handler 1.3400 Dancer::Handler::Debug 1.3400 Dancer::Handler::PSGI 1.3400 Dancer::Handler::Standalone 1.3400 Dancer::Hook 1.3400 Dancer::Hook::Properties 1.3400 Dancer::HTTP 1.3400 Dancer::Logger 1.3400 Dancer::Logger::Abstract 1.3400 Dancer::Logger::Capture 1.3400 Dancer::Logger::Capture::Trap 1.3400 Dancer::Logger::Console 1.3400 Dancer::Logger::Diag 1.3400 Dancer::Logger::File 1.3400 Dancer::Logger::LogReport 1.27 Dancer::Logger::Note 1.3400 Dancer::Logger::Null 1.3400 Dancer::MIME 1.3400 Dancer::ModuleLoader 1.3400 Dancer::Object 1.3400 Dancer::Object::Singleton 1.3400 Dancer::Plugin 1.3400 Dancer::Plugin::Ajax 1.3400 Dancer::Renderer 1.3400 Dancer::Request 1.3400 Dancer::Request::Upload 1.3400 Dancer::Response 1.3400 Dancer::Route 1.3400 Dancer::Route::Cache 1.3400 Dancer::Route::Registry 1.3400 Dancer::Serializer 1.3400 Dancer::Serializer::Abstract 1.3400 Dancer::Serializer::Dumper 1.3400 Dancer::Serializer::JSON 1.3400 Dancer::Serializer::JSONP 1.3400 Dancer::Serializer::Mutable 1.3400 Dancer::Serializer::XML 1.3400 Dancer::Serializer::YAML 1.3400 Dancer::Session 1.3400 Dancer::Session::Abstract 1.3400 Dancer::Session::Simple 1.3400 Dancer::Session::YAML 1.3400 Dancer::SharedData 1.3400 Dancer::Template 1.3400 Dancer::Template::Abstract 1.3400 Dancer::Template::Simple 1.3400 Dancer::Template::TemplateToolkit 1.3400 Dancer::Test 1.3400 Dancer::Timer 1.3400 Data::Clone 0.004 Data::Compare 1.25 Data::Compare::Plugins::Scalar::Properties 1 Data::Dump 1.23 Data::Dump::FilterContext unknown Data::Dump::Filtered unknown Data::Dump::Streamer 2.40 Data::Dump::Streamer::_::StringPrinter 0.1 Data::Dump::Trace 0.02 Data::Dumper::Again 0.01 Data::Dumper::Concise 2.023 Data::Dumper::Concise::Sugar 2.023 Data::Dumper::Perltidy 0.03 Data::Grove 0.08 Data::Grove::Parent 0.08 Data::Grove::Visitor 0.08 Data::Munge 0.097 Data::OptList 0.110 Data::Page 2.02 Data::Paginator 0.08 Data::Paginator::Types 0.08 Data::Perl 0.002009 Data::Perl::Bool 0.002009 Data::Perl::Bool::MooseLike 0.001008 Data::Perl::Code 0.002009 Data::Perl::Collection::Array 0.002009 Data::Perl::Collection::Array::MooseLike 0.001008 Data::Perl::Collection::Hash 0.002009 Data::Perl::Collection::Hash::MooseLike 0.001008 Data::Perl::Counter 0.002009 Data::Perl::Number 0.002009 Data::Perl::Number::MooseLike 0.001008 Data::Perl::Role::Bool 0.002009 Data::Perl::Role::Code 0.002009 Data::Perl::Role::Collection::Array 0.002009 Data::Perl::Role::Collection::Hash 0.002009 Data::Perl::Role::Counter 0.002009 Data::Perl::Role::Number 0.002009 Data::Perl::Role::String 0.002009 Data::Perl::String 0.002009 Data::Perl::String::MooseLike 0.001008 Data::PowerSet 0.05 Data::Printer 0.40 Data::Printer::Filter unknown Data::Printer::Filter::DateTime unknown Data::Printer::Filter::DB unknown Data::Printer::Filter::Digest unknown Data::Section 0.200007 Data::Stag 0.14 Data::Stag::Arr2HTML 0.14 Data::Stag::Base 0.14 Data::Stag::BaseGenerator unknown Data::Stag::ChainHandler 0.14 Data::Stag::DTDWriter 0.14 Data::Stag::GraphHandler 0.14 Data::Stag::HashDB 0.14 Data::Stag::IndentParser 0.14 Data::Stag::IndentWriter 0.14 Data::Stag::ITextParser 0.14 Data::Stag::ITextWriter 0.14 Data::Stag::JSONWriter 0.14 Data::Stag::null 0.14 Data::Stag::PerlWriter 0.14 Data::Stag::PodParser 0.14 Data::Stag::SAX2Stag 0.14 Data::Stag::Simple 0.14 Data::Stag::StagDB 0.14 Data::Stag::StagI unknown Data::Stag::StagImpl 0.14 Data::Stag::SxprParser 0.14 Data::Stag::SxprWriter 0.14 Data::Stag::Util 0.14 Data::Stag::Writer 0.14 Data::Stag::XMLParser 0.14 Data::Stag::XMLWriter 0.14 Data::Stag::XSLHandler unknown Data::Stag::XSLTHandler unknown Data::UUID 1.221 Data::Validate::Domain 0.14 Data::Validate::IP 0.27 Data::Validate::URI 0.07 Data::Visitor 0.30 Data::Visitor::Callback 0.30 Date::Format 2.24 Date::Language 1.10 Date::Language::Afar 0.99 Date::Language::Amharic 1.00 Date::Language::Austrian 1.01 Date::Language::Brazilian 1.01 Date::Language::Bulgarian 1.01 Date::Language::Chinese 1.00 Date::Language::Chinese_GB 1.01 Date::Language::Czech 1.01 Date::Language::Danish 1.01 Date::Language::Dutch 1.02 Date::Language::English 1.01 Date::Language::Finnish 1.01 Date::Language::French 1.04 Date::Language::Gedeo 0.99 Date::Language::German 1.02 Date::Language::Greek 1.00 Date::Language::Hungarian 1.01 Date::Language::Icelandic 1.01 Date::Language::Italian 1.01 Date::Language::Norwegian 1.01 Date::Language::Oromo 0.99 Date::Language::Romanian 1.01 Date::Language::Russian 1.01 Date::Language::Russian_cp1251 1.01 Date::Language::Russian_koi8r 1.01 Date::Language::Sidama 0.99 Date::Language::Somali 0.99 Date::Language::Spanish 1.00 Date::Language::Swedish 1.01 Date::Language::Tigrinya 1.00 Date::Language::TigrinyaEritrean 1.00 Date::Language::TigrinyaEthiopian 1.00 Date::Language::Turkish 1.0 Date::Parse 2.30 Date::Tiny 1.07 DateTime 1.49 DateTime::Astro 1.03 DateTime::Astro unknown DateTime::Astro unknown DateTime::Calendar::Chinese 1.00 DateTime::Calendar::Japanese::Era 0.08003 DateTime::Duration 1.49 DateTime::Event::Chinese 1.00 DateTime::Event::ICal 0.13 DateTime::Event::SolarTerm unknown DateTime::Format::Builder 0.81 DateTime::Format::Builder::Parser 0.81 DateTime::Format::Builder::Parser::Dispatch 0.81 DateTime::Format::Builder::Parser::generic 0.81 DateTime::Format::Builder::Parser::Quick 0.81 DateTime::Format::Builder::Parser::Regex 0.81 DateTime::Format::Builder::Parser::Strptime 0.81 DateTime::Format::DateParse 0.05 DateTime::Format::Duration 1.04 DateTime::Format::Epoch 0.16 DateTime::Format::Epoch::ActiveDirectory 0.13 DateTime::Format::Epoch::DotNet 0.13 DateTime::Format::Epoch::JD 0.13 DateTime::Format::Epoch::Lilian 0.13 DateTime::Format::Epoch::MacOS 0.13 DateTime::Format::Epoch::MJD 0.13 DateTime::Format::Epoch::NTP 0.14 DateTime::Format::Epoch::RataDie 0.13 DateTime::Format::Epoch::RJD 0.13 DateTime::Format::Epoch::TAI64 0.13 DateTime::Format::Epoch::TJD 0.13 DateTime::Format::Epoch::Unix 0.13 DateTime::Format::Flexible 0.30 DateTime::Format::Flexible::lang unknown DateTime::Format::Flexible::lang::de unknown DateTime::Format::Flexible::lang::en unknown DateTime::Format::Flexible::lang::es unknown DateTime::Format::ICal 0.09 DateTime::Format::Mail 0.403 DateTime::Format::Natural 1.05 DateTime::Format::Natural::Calc 1.41 DateTime::Format::Natural::Compat 0.07 DateTime::Format::Natural::Duration 0.06 DateTime::Format::Natural::Duration::Checks 0.04 DateTime::Format::Natural::Expand 0.03 DateTime::Format::Natural::Extract 0.11 DateTime::Format::Natural::Formatted 0.07 DateTime::Format::Natural::Helpers 0.06 DateTime::Format::Natural::Lang::Base 1.08 DateTime::Format::Natural::Lang::EN 1.62 DateTime::Format::Natural::Rewrite 0.06 DateTime::Format::Natural::Test 0.10 DateTime::Format::Natural::Utils 0.05 DateTime::Format::Natural::Wrappers 0.03 DateTime::Format::Strptime 1.75 DateTime::Format::Strptime::Types 1.75 DateTime::Format::W3CDTF 0.07 DateTime::Helpers 1.49 DateTime::Infinite 1.49 DateTime::LeapSecond 1.49 DateTime::Locale 1.22 DateTime::Locale::Base 1.22 DateTime::Locale::Catalog 1.22 DateTime::Locale::Data 1.22 DateTime::Locale::FromData 1.22 DateTime::Locale::Util 1.22 DateTime::PP 1.49 DateTime::PPExtra 1.49 DateTime::Set 0.3900 DateTime::Set::ICal 0.19 DateTime::Span unknown DateTime::SpanSet unknown DateTime::TimeZone 2.19 DateTime::TimeZone::Africa::Abidjan 2.19 DateTime::TimeZone::Africa::Accra 2.19 DateTime::TimeZone::Africa::Algiers 2.19 DateTime::TimeZone::Africa::Bissau 2.19 DateTime::TimeZone::Africa::Cairo 2.19 DateTime::TimeZone::Africa::Casablanca 2.19 DateTime::TimeZone::Africa::Ceuta 2.19 DateTime::TimeZone::Africa::El_Aaiun 2.19 DateTime::TimeZone::Africa::Johannesburg 2.19 DateTime::TimeZone::Africa::Juba 2.19 DateTime::TimeZone::Africa::Khartoum 2.19 DateTime::TimeZone::Africa::Lagos 2.19 DateTime::TimeZone::Africa::Maputo 2.19 DateTime::TimeZone::Africa::Monrovia 2.19 DateTime::TimeZone::Africa::Nairobi 2.19 DateTime::TimeZone::Africa::Ndjamena 2.19 DateTime::TimeZone::Africa::Sao_Tome 2.19 DateTime::TimeZone::Africa::Tripoli 2.19 DateTime::TimeZone::Africa::Tunis 2.19 DateTime::TimeZone::Africa::Windhoek 2.19 DateTime::TimeZone::America::Adak 2.19 DateTime::TimeZone::America::Anchorage 2.19 DateTime::TimeZone::America::Araguaina 2.19 DateTime::TimeZone::America::Argentina::Buenos_Aires 2.19 DateTime::TimeZone::America::Argentina::Catamarca 2.19 DateTime::TimeZone::America::Argentina::Cordoba 2.19 DateTime::TimeZone::America::Argentina::Jujuy 2.19 DateTime::TimeZone::America::Argentina::La_Rioja 2.19 DateTime::TimeZone::America::Argentina::Mendoza 2.19 DateTime::TimeZone::America::Argentina::Rio_Gallegos 2.19 DateTime::TimeZone::America::Argentina::Salta 2.19 DateTime::TimeZone::America::Argentina::San_Juan 2.19 DateTime::TimeZone::America::Argentina::San_Luis 2.19 DateTime::TimeZone::America::Argentina::Tucuman 2.19 DateTime::TimeZone::America::Argentina::Ushuaia 2.19 DateTime::TimeZone::America::Asuncion 2.19 DateTime::TimeZone::America::Atikokan 2.19 DateTime::TimeZone::America::Bahia 2.19 DateTime::TimeZone::America::Bahia_Banderas 2.19 DateTime::TimeZone::America::Barbados 2.19 DateTime::TimeZone::America::Belem 2.19 DateTime::TimeZone::America::Belize 2.19 DateTime::TimeZone::America::Blanc_Sablon 2.19 DateTime::TimeZone::America::Boa_Vista 2.19 DateTime::TimeZone::America::Bogota 2.19 DateTime::TimeZone::America::Boise 2.19 DateTime::TimeZone::America::Cambridge_Bay 2.19 DateTime::TimeZone::America::Campo_Grande 2.19 DateTime::TimeZone::America::Cancun 2.19 DateTime::TimeZone::America::Caracas 2.19 DateTime::TimeZone::America::Cayenne 2.19 DateTime::TimeZone::America::Chicago 2.19 DateTime::TimeZone::America::Chihuahua 2.19 DateTime::TimeZone::America::Costa_Rica 2.19 DateTime::TimeZone::America::Creston 2.19 DateTime::TimeZone::America::Cuiaba 2.19 DateTime::TimeZone::America::Curacao 2.19 DateTime::TimeZone::America::Danmarkshavn 2.19 DateTime::TimeZone::America::Dawson 2.19 DateTime::TimeZone::America::Dawson_Creek 2.19 DateTime::TimeZone::America::Denver 2.19 DateTime::TimeZone::America::Detroit 2.19 DateTime::TimeZone::America::Edmonton 2.19 DateTime::TimeZone::America::Eirunepe 2.19 DateTime::TimeZone::America::El_Salvador 2.19 DateTime::TimeZone::America::Fort_Nelson 2.19 DateTime::TimeZone::America::Fortaleza 2.19 DateTime::TimeZone::America::Glace_Bay 2.19 DateTime::TimeZone::America::Godthab 2.19 DateTime::TimeZone::America::Goose_Bay 2.19 DateTime::TimeZone::America::Grand_Turk 2.19 DateTime::TimeZone::America::Guatemala 2.19 DateTime::TimeZone::America::Guayaquil 2.19 DateTime::TimeZone::America::Guyana 2.19 DateTime::TimeZone::America::Halifax 2.19 DateTime::TimeZone::America::Havana 2.19 DateTime::TimeZone::America::Hermosillo 2.19 DateTime::TimeZone::America::Indiana::Indianapolis 2.19 DateTime::TimeZone::America::Indiana::Knox 2.19 DateTime::TimeZone::America::Indiana::Marengo 2.19 DateTime::TimeZone::America::Indiana::Petersburg 2.19 DateTime::TimeZone::America::Indiana::Tell_City 2.19 DateTime::TimeZone::America::Indiana::Vevay 2.19 DateTime::TimeZone::America::Indiana::Vincennes 2.19 DateTime::TimeZone::America::Indiana::Winamac 2.19 DateTime::TimeZone::America::Inuvik 2.19 DateTime::TimeZone::America::Iqaluit 2.19 DateTime::TimeZone::America::Jamaica 2.19 DateTime::TimeZone::America::Juneau 2.19 DateTime::TimeZone::America::Kentucky::Louisville 2.19 DateTime::TimeZone::America::Kentucky::Monticello 2.19 DateTime::TimeZone::America::La_Paz 2.19 DateTime::TimeZone::America::Lima 2.19 DateTime::TimeZone::America::Los_Angeles 2.19 DateTime::TimeZone::America::Maceio 2.19 DateTime::TimeZone::America::Managua 2.19 DateTime::TimeZone::America::Manaus 2.19 DateTime::TimeZone::America::Martinique 2.19 DateTime::TimeZone::America::Matamoros 2.19 DateTime::TimeZone::America::Mazatlan 2.19 DateTime::TimeZone::America::Menominee 2.19 DateTime::TimeZone::America::Merida 2.19 DateTime::TimeZone::America::Metlakatla 2.19 DateTime::TimeZone::America::Mexico_City 2.19 DateTime::TimeZone::America::Miquelon 2.19 DateTime::TimeZone::America::Moncton 2.19 DateTime::TimeZone::America::Monterrey 2.19 DateTime::TimeZone::America::Montevideo 2.19 DateTime::TimeZone::America::Nassau 2.19 DateTime::TimeZone::America::New_York 2.19 DateTime::TimeZone::America::Nipigon 2.19 DateTime::TimeZone::America::Nome 2.19 DateTime::TimeZone::America::Noronha 2.19 DateTime::TimeZone::America::North_Dakota::Beulah 2.19 DateTime::TimeZone::America::North_Dakota::Center 2.19 DateTime::TimeZone::America::North_Dakota::New_Salem 2.19 DateTime::TimeZone::America::Ojinaga 2.19 DateTime::TimeZone::America::Panama 2.19 DateTime::TimeZone::America::Pangnirtung 2.19 DateTime::TimeZone::America::Paramaribo 2.19 DateTime::TimeZone::America::Phoenix 2.19 DateTime::TimeZone::America::Port_au_Prince 2.19 DateTime::TimeZone::America::Port_of_Spain 2.19 DateTime::TimeZone::America::Porto_Velho 2.19 DateTime::TimeZone::America::Puerto_Rico 2.19 DateTime::TimeZone::America::Punta_Arenas 2.19 DateTime::TimeZone::America::Rainy_River 2.19 DateTime::TimeZone::America::Rankin_Inlet 2.19 DateTime::TimeZone::America::Recife 2.19 DateTime::TimeZone::America::Regina 2.19 DateTime::TimeZone::America::Resolute 2.19 DateTime::TimeZone::America::Rio_Branco 2.19 DateTime::TimeZone::America::Santarem 2.19 DateTime::TimeZone::America::Santiago 2.19 DateTime::TimeZone::America::Santo_Domingo 2.19 DateTime::TimeZone::America::Sao_Paulo 2.19 DateTime::TimeZone::America::Scoresbysund 2.19 DateTime::TimeZone::America::Sitka 2.19 DateTime::TimeZone::America::St_Johns 2.19 DateTime::TimeZone::America::Swift_Current 2.19 DateTime::TimeZone::America::Tegucigalpa 2.19 DateTime::TimeZone::America::Thule 2.19 DateTime::TimeZone::America::Thunder_Bay 2.19 DateTime::TimeZone::America::Tijuana 2.19 DateTime::TimeZone::America::Toronto 2.19 DateTime::TimeZone::America::Vancouver 2.19 DateTime::TimeZone::America::Whitehorse 2.19 DateTime::TimeZone::America::Winnipeg 2.19 DateTime::TimeZone::America::Yakutat 2.19 DateTime::TimeZone::America::Yellowknife 2.19 DateTime::TimeZone::Antarctica::Casey 2.19 DateTime::TimeZone::Antarctica::Davis 2.19 DateTime::TimeZone::Antarctica::DumontDUrville 2.19 DateTime::TimeZone::Antarctica::Macquarie 2.19 DateTime::TimeZone::Antarctica::Mawson 2.19 DateTime::TimeZone::Antarctica::Palmer 2.19 DateTime::TimeZone::Antarctica::Rothera 2.19 DateTime::TimeZone::Antarctica::Syowa 2.19 DateTime::TimeZone::Antarctica::Troll 2.19 DateTime::TimeZone::Antarctica::Vostok 2.19 DateTime::TimeZone::Asia::Almaty 2.19 DateTime::TimeZone::Asia::Amman 2.19 DateTime::TimeZone::Asia::Anadyr 2.19 DateTime::TimeZone::Asia::Aqtau 2.19 DateTime::TimeZone::Asia::Aqtobe 2.19 DateTime::TimeZone::Asia::Ashgabat 2.19 DateTime::TimeZone::Asia::Atyrau 2.19 DateTime::TimeZone::Asia::Baghdad 2.19 DateTime::TimeZone::Asia::Baku 2.19 DateTime::TimeZone::Asia::Bangkok 2.19 DateTime::TimeZone::Asia::Barnaul 2.19 DateTime::TimeZone::Asia::Beirut 2.19 DateTime::TimeZone::Asia::Bishkek 2.19 DateTime::TimeZone::Asia::Brunei 2.19 DateTime::TimeZone::Asia::Chita 2.19 DateTime::TimeZone::Asia::Choibalsan 2.19 DateTime::TimeZone::Asia::Colombo 2.19 DateTime::TimeZone::Asia::Damascus 2.19 DateTime::TimeZone::Asia::Dhaka 2.19 DateTime::TimeZone::Asia::Dili 2.19 DateTime::TimeZone::Asia::Dubai 2.19 DateTime::TimeZone::Asia::Dushanbe 2.19 DateTime::TimeZone::Asia::Famagusta 2.19 DateTime::TimeZone::Asia::Gaza 2.19 DateTime::TimeZone::Asia::Hebron 2.19 DateTime::TimeZone::Asia::Ho_Chi_Minh 2.19 DateTime::TimeZone::Asia::Hong_Kong 2.19 DateTime::TimeZone::Asia::Hovd 2.19 DateTime::TimeZone::Asia::Irkutsk 2.19 DateTime::TimeZone::Asia::Jakarta 2.19 DateTime::TimeZone::Asia::Jayapura 2.19 DateTime::TimeZone::Asia::Jerusalem 2.19 DateTime::TimeZone::Asia::Kabul 2.19 DateTime::TimeZone::Asia::Kamchatka 2.19 DateTime::TimeZone::Asia::Karachi 2.19 DateTime::TimeZone::Asia::Kathmandu 2.19 DateTime::TimeZone::Asia::Khandyga 2.19 DateTime::TimeZone::Asia::Kolkata 2.19 DateTime::TimeZone::Asia::Krasnoyarsk 2.19 DateTime::TimeZone::Asia::Kuala_Lumpur 2.19 DateTime::TimeZone::Asia::Kuching 2.19 DateTime::TimeZone::Asia::Macau 2.19 DateTime::TimeZone::Asia::Magadan 2.19 DateTime::TimeZone::Asia::Makassar 2.19 DateTime::TimeZone::Asia::Manila 2.19 DateTime::TimeZone::Asia::Nicosia 2.19 DateTime::TimeZone::Asia::Novokuznetsk 2.19 DateTime::TimeZone::Asia::Novosibirsk 2.19 DateTime::TimeZone::Asia::Omsk 2.19 DateTime::TimeZone::Asia::Oral 2.19 DateTime::TimeZone::Asia::Pontianak 2.19 DateTime::TimeZone::Asia::Pyongyang 2.19 DateTime::TimeZone::Asia::Qatar 2.19 DateTime::TimeZone::Asia::Qyzylorda 2.19 DateTime::TimeZone::Asia::Riyadh 2.19 DateTime::TimeZone::Asia::Sakhalin 2.19 DateTime::TimeZone::Asia::Samarkand 2.19 DateTime::TimeZone::Asia::Seoul 2.19 DateTime::TimeZone::Asia::Shanghai 2.19 DateTime::TimeZone::Asia::Singapore 2.19 DateTime::TimeZone::Asia::Srednekolymsk 2.19 DateTime::TimeZone::Asia::Taipei 2.19 DateTime::TimeZone::Asia::Tashkent 2.19 DateTime::TimeZone::Asia::Tbilisi 2.19 DateTime::TimeZone::Asia::Tehran 2.19 DateTime::TimeZone::Asia::Thimphu 2.19 DateTime::TimeZone::Asia::Tokyo 2.19 DateTime::TimeZone::Asia::Tomsk 2.19 DateTime::TimeZone::Asia::Ulaanbaatar 2.19 DateTime::TimeZone::Asia::Urumqi 2.19 DateTime::TimeZone::Asia::Ust_Nera 2.19 DateTime::TimeZone::Asia::Vladivostok 2.19 DateTime::TimeZone::Asia::Yakutsk 2.19 DateTime::TimeZone::Asia::Yangon 2.19 DateTime::TimeZone::Asia::Yekaterinburg 2.19 DateTime::TimeZone::Asia::Yerevan 2.19 DateTime::TimeZone::Atlantic::Azores 2.19 DateTime::TimeZone::Atlantic::Bermuda 2.19 DateTime::TimeZone::Atlantic::Canary 2.19 DateTime::TimeZone::Atlantic::Cape_Verde 2.19 DateTime::TimeZone::Atlantic::Faroe 2.19 DateTime::TimeZone::Atlantic::Madeira 2.19 DateTime::TimeZone::Atlantic::Reykjavik 2.19 DateTime::TimeZone::Atlantic::South_Georgia 2.19 DateTime::TimeZone::Atlantic::Stanley 2.19 DateTime::TimeZone::Australia::Adelaide 2.19 DateTime::TimeZone::Australia::Brisbane 2.19 DateTime::TimeZone::Australia::Broken_Hill 2.19 DateTime::TimeZone::Australia::Currie 2.19 DateTime::TimeZone::Australia::Darwin 2.19 DateTime::TimeZone::Australia::Eucla 2.19 DateTime::TimeZone::Australia::Hobart 2.19 DateTime::TimeZone::Australia::Lindeman 2.19 DateTime::TimeZone::Australia::Lord_Howe 2.19 DateTime::TimeZone::Australia::Melbourne 2.19 DateTime::TimeZone::Australia::Perth 2.19 DateTime::TimeZone::Australia::Sydney 2.19 DateTime::TimeZone::Catalog 2.19 DateTime::TimeZone::CET 2.19 DateTime::TimeZone::CST6CDT 2.19 DateTime::TimeZone::EET 2.19 DateTime::TimeZone::EST 2.19 DateTime::TimeZone::EST5EDT 2.19 DateTime::TimeZone::Europe::Amsterdam 2.19 DateTime::TimeZone::Europe::Andorra 2.19 DateTime::TimeZone::Europe::Astrakhan 2.19 DateTime::TimeZone::Europe::Athens 2.19 DateTime::TimeZone::Europe::Belgrade 2.19 DateTime::TimeZone::Europe::Berlin 2.19 DateTime::TimeZone::Europe::Brussels 2.19 DateTime::TimeZone::Europe::Bucharest 2.19 DateTime::TimeZone::Europe::Budapest 2.19 DateTime::TimeZone::Europe::Chisinau 2.19 DateTime::TimeZone::Europe::Copenhagen 2.19 DateTime::TimeZone::Europe::Dublin 2.19 DateTime::TimeZone::Europe::Gibraltar 2.19 DateTime::TimeZone::Europe::Helsinki 2.19 DateTime::TimeZone::Europe::Istanbul 2.19 DateTime::TimeZone::Europe::Kaliningrad 2.19 DateTime::TimeZone::Europe::Kiev 2.19 DateTime::TimeZone::Europe::Kirov 2.19 DateTime::TimeZone::Europe::Lisbon 2.19 DateTime::TimeZone::Europe::London 2.19 DateTime::TimeZone::Europe::Luxembourg 2.19 DateTime::TimeZone::Europe::Madrid 2.19 DateTime::TimeZone::Europe::Malta 2.19 DateTime::TimeZone::Europe::Minsk 2.19 DateTime::TimeZone::Europe::Monaco 2.19 DateTime::TimeZone::Europe::Moscow 2.19 DateTime::TimeZone::Europe::Oslo 2.19 DateTime::TimeZone::Europe::Paris 2.19 DateTime::TimeZone::Europe::Prague 2.19 DateTime::TimeZone::Europe::Riga 2.19 DateTime::TimeZone::Europe::Rome 2.19 DateTime::TimeZone::Europe::Samara 2.19 DateTime::TimeZone::Europe::Saratov 2.19 DateTime::TimeZone::Europe::Simferopol 2.19 DateTime::TimeZone::Europe::Sofia 2.19 DateTime::TimeZone::Europe::Stockholm 2.19 DateTime::TimeZone::Europe::Tallinn 2.19 DateTime::TimeZone::Europe::Tirane 2.19 DateTime::TimeZone::Europe::Ulyanovsk 2.19 DateTime::TimeZone::Europe::Uzhgorod 2.19 DateTime::TimeZone::Europe::Vienna 2.19 DateTime::TimeZone::Europe::Vilnius 2.19 DateTime::TimeZone::Europe::Volgograd 2.19 DateTime::TimeZone::Europe::Warsaw 2.19 DateTime::TimeZone::Europe::Zaporozhye 2.19 DateTime::TimeZone::Europe::Zurich 2.19 DateTime::TimeZone::Floating 2.19 DateTime::TimeZone::HST 2.19 DateTime::TimeZone::Indian::Chagos 2.19 DateTime::TimeZone::Indian::Christmas 2.19 DateTime::TimeZone::Indian::Cocos 2.19 DateTime::TimeZone::Indian::Kerguelen 2.19 DateTime::TimeZone::Indian::Mahe 2.19 DateTime::TimeZone::Indian::Maldives 2.19 DateTime::TimeZone::Indian::Mauritius 2.19 DateTime::TimeZone::Indian::Reunion 2.19 DateTime::TimeZone::Local 2.19 DateTime::TimeZone::Local::Android 2.19 DateTime::TimeZone::Local::Unix 2.19 DateTime::TimeZone::Local::VMS 2.19 DateTime::TimeZone::MET 2.19 DateTime::TimeZone::MST 2.19 DateTime::TimeZone::MST7MDT 2.19 DateTime::TimeZone::OffsetOnly 2.19 DateTime::TimeZone::OlsonDB 2.19 DateTime::TimeZone::OlsonDB::Change 2.19 DateTime::TimeZone::OlsonDB::Observance 2.19 DateTime::TimeZone::OlsonDB::Rule 2.19 DateTime::TimeZone::OlsonDB::Zone 2.19 DateTime::TimeZone::Pacific::Apia 2.19 DateTime::TimeZone::Pacific::Auckland 2.19 DateTime::TimeZone::Pacific::Bougainville 2.19 DateTime::TimeZone::Pacific::Chatham 2.19 DateTime::TimeZone::Pacific::Chuuk 2.19 DateTime::TimeZone::Pacific::Easter 2.19 DateTime::TimeZone::Pacific::Efate 2.19 DateTime::TimeZone::Pacific::Enderbury 2.19 DateTime::TimeZone::Pacific::Fakaofo 2.19 DateTime::TimeZone::Pacific::Fiji 2.19 DateTime::TimeZone::Pacific::Funafuti 2.19 DateTime::TimeZone::Pacific::Galapagos 2.19 DateTime::TimeZone::Pacific::Gambier 2.19 DateTime::TimeZone::Pacific::Guadalcanal 2.19 DateTime::TimeZone::Pacific::Guam 2.19 DateTime::TimeZone::Pacific::Honolulu 2.19 DateTime::TimeZone::Pacific::Kiritimati 2.19 DateTime::TimeZone::Pacific::Kosrae 2.19 DateTime::TimeZone::Pacific::Kwajalein 2.19 DateTime::TimeZone::Pacific::Majuro 2.19 DateTime::TimeZone::Pacific::Marquesas 2.19 DateTime::TimeZone::Pacific::Nauru 2.19 DateTime::TimeZone::Pacific::Niue 2.19 DateTime::TimeZone::Pacific::Norfolk 2.19 DateTime::TimeZone::Pacific::Noumea 2.19 DateTime::TimeZone::Pacific::Pago_Pago 2.19 DateTime::TimeZone::Pacific::Palau 2.19 DateTime::TimeZone::Pacific::Pitcairn 2.19 DateTime::TimeZone::Pacific::Pohnpei 2.19 DateTime::TimeZone::Pacific::Port_Moresby 2.19 DateTime::TimeZone::Pacific::Rarotonga 2.19 DateTime::TimeZone::Pacific::Tahiti 2.19 DateTime::TimeZone::Pacific::Tarawa 2.19 DateTime::TimeZone::Pacific::Tongatapu 2.19 DateTime::TimeZone::Pacific::Wake 2.19 DateTime::TimeZone::Pacific::Wallis 2.19 DateTime::TimeZone::PST8PDT 2.19 DateTime::TimeZone::UTC 2.19 DateTime::TimeZone::WET 2.19 DateTime::Tiny 1.07 DateTime::Types 1.49 DateTimeX::Easy 0.089 DB unknown DBD::DBM 0.08 DBD::ExampleP 12.014311 DBD::File 0.44 DBD::Gofer 0.015327 DBD::Gofer::Policy::Base 0.010088 DBD::Gofer::Policy::classic 0.010088 DBD::Gofer::Policy::pedantic 0.010088 DBD::Gofer::Policy::rush 0.010088 DBD::Gofer::Transport::Base 0.014121 DBD::Gofer::Transport::corostream unknown DBD::Gofer::Transport::null 0.010088 DBD::Gofer::Transport::pipeone 0.010088 DBD::Gofer::Transport::stream 0.014599 DBD::Mem 0.001 DBD::mysql 4.046 DBD::mysql::GetInfo unknown DBD::NullP 12.014715 DBD::Proxy 0.2004 DBD::Sponge 12.010003 DBD::SQLite 1.58 DBD::SQLite::Constants unknown DBD::SQLite::VirtualTable 1.58 DBD::SQLite::VirtualTable::FileContent unknown DBD::SQLite::VirtualTable::PerlData unknown DBI unknown DBI::Const::GetInfo::ANSI 2.008697 DBI::Const::GetInfo::ODBC 2.011374 DBI::Const::GetInfoReturn 2.008697 DBI::Const::GetInfoType 2.008697 DBI::DBD 12.015129 DBI::DBD::Metadata 2.014214 DBI::DBD::SqlEngine 0.06 DBI::FAQ 1.014935 DBI::Gofer::Execute 0.014283 DBI::Gofer::Request 0.012537 DBI::Gofer::Response 0.011566 DBI::Gofer::Serializer::Base 0.009950 DBI::Gofer::Serializer::DataDumper 0.009950 DBI::Gofer::Serializer::Storable 0.015586 DBI::Gofer::Transport::Base 0.012537 DBI::Gofer::Transport::pipeone 0.012537 DBI::Gofer::Transport::stream 0.012537 DBI::Profile 2.015065 DBI::ProfileData 2.010008 DBI::ProfileDumper 2.015325 DBI::ProfileDumper::Apache 2.014121 DBI::ProfileSubs 0.009396 DBI::ProxyServer 0.3005 DBI::SQL::Nano 1.015544 DBI::Util::_accessor 0.009479 DBI::Util::CacheMemory 0.010315 DBIx::Class 0.082841 DBIx::Class::AccessorGroup unknown DBIx::Class::Admin unknown DBIx::Class::CDBICompat unknown DBIx::Class::CDBICompat::Iterator unknown DBIx::Class::CDBICompat::SQLTransformer unknown DBIx::Class::CDBICompat::Tied::ColumnValue unknown DBIx::Class::Core unknown DBIx::Class::Cursor unknown DBIx::Class::Cursor::Cached 1.001004 DBIx::Class::DB unknown DBIx::Class::Exception unknown DBIx::Class::FilterColumn unknown DBIx::Class::InflateColumn unknown DBIx::Class::InflateColumn::DateTime unknown DBIx::Class::InflateColumn::File unknown DBIx::Class::Optional::Dependencies unknown DBIx::Class::Ordered unknown DBIx::Class::PK unknown DBIx::Class::PK::Auto unknown DBIx::Class::Relationship unknown DBIx::Class::Relationship::Base unknown DBIx::Class::ResultClass::HashRefInflator unknown DBIx::Class::ResultSet unknown DBIx::Class::ResultSetColumn unknown DBIx::Class::ResultSetManager unknown DBIx::Class::ResultSource unknown DBIx::Class::ResultSource::Table unknown DBIx::Class::ResultSource::View unknown DBIx::Class::ResultSourceHandle unknown DBIx::Class::ResultSourceProxy::Table unknown DBIx::Class::Row unknown DBIx::Class::Schema unknown DBIx::Class::Schema::Loader 0.07049 DBIx::Class::Schema::Loader::Base 0.07049 DBIx::Class::Schema::Loader::Column unknown DBIx::Class::Schema::Loader::DBI 0.07049 DBIx::Class::Schema::Loader::DBI::ADO 0.07049 DBIx::Class::Schema::Loader::DBI::ADO::Microsoft_SQL_Server 0.07049 DBIx::Class::Schema::Loader::DBI::ADO::MS_Jet 0.07049 DBIx::Class::Schema::Loader::DBI::Component::QuotedDefault 0.07049 DBIx::Class::Schema::Loader::DBI::DB2 0.07049 DBIx::Class::Schema::Loader::DBI::Firebird 0.07049 DBIx::Class::Schema::Loader::DBI::Informix 0.07049 DBIx::Class::Schema::Loader::DBI::InterBase 0.07049 DBIx::Class::Schema::Loader::DBI::MSSQL 0.07049 DBIx::Class::Schema::Loader::DBI::mysql 0.07049 DBIx::Class::Schema::Loader::DBI::ODBC 0.07049 DBIx::Class::Schema::Loader::DBI::ODBC::ACCESS 0.07049 DBIx::Class::Schema::Loader::DBI::ODBC::Firebird 0.07049 DBIx::Class::Schema::Loader::DBI::ODBC::Microsoft_SQL_Server 0.07049 DBIx::Class::Schema::Loader::DBI::ODBC::SQL_Anywhere 0.07049 DBIx::Class::Schema::Loader::DBI::Oracle 0.07049 DBIx::Class::Schema::Loader::DBI::Pg 0.07049 DBIx::Class::Schema::Loader::DBI::SQLAnywhere 0.07049 DBIx::Class::Schema::Loader::DBI::SQLite 0.07049 DBIx::Class::Schema::Loader::DBI::Sybase 0.07049 DBIx::Class::Schema::Loader::DBI::Sybase::Common 0.07049 DBIx::Class::Schema::Loader::DBI::Sybase::Microsoft_SQL_Server 0.07049 DBIx::Class::Schema::Loader::DBI::Writing 0.07049 DBIx::Class::Schema::Loader::DBObject unknown DBIx::Class::Schema::Loader::DBObject::Informix unknown DBIx::Class::Schema::Loader::DBObject::Sybase unknown DBIx::Class::Schema::Loader::Optional::Dependencies unknown DBIx::Class::Schema::Loader::RelBuilder 0.07049 DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_040 0.07049 DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_05 0.07049 DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_06 0.07049 DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_07 0.07049 DBIx::Class::Schema::Loader::Table unknown DBIx::Class::Schema::Loader::Table::Informix unknown DBIx::Class::Schema::Loader::Table::Sybase unknown DBIx::Class::Serialize::Storable unknown DBIx::Class::SQLMaker unknown DBIx::Class::SQLMaker::LimitDialects unknown DBIx::Class::SQLMaker::OracleJoins unknown DBIx::Class::StartupCheck unknown DBIx::Class::Storage unknown DBIx::Class::Storage::DBI unknown DBIx::Class::Storage::DBI::ACCESS unknown DBIx::Class::Storage::DBI::ADO unknown DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server unknown DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server::Cursor unknown DBIx::Class::Storage::DBI::ADO::MS_Jet unknown DBIx::Class::Storage::DBI::ADO::MS_Jet::Cursor unknown DBIx::Class::Storage::DBI::AutoCast unknown DBIx::Class::Storage::DBI::Cursor unknown DBIx::Class::Storage::DBI::DB2 unknown DBIx::Class::Storage::DBI::Firebird unknown DBIx::Class::Storage::DBI::Firebird::Common unknown DBIx::Class::Storage::DBI::IdentityInsert unknown DBIx::Class::Storage::DBI::Informix unknown DBIx::Class::Storage::DBI::InterBase unknown DBIx::Class::Storage::DBI::MSSQL unknown DBIx::Class::Storage::DBI::mysql unknown DBIx::Class::Storage::DBI::NoBindVars unknown DBIx::Class::Storage::DBI::ODBC unknown DBIx::Class::Storage::DBI::ODBC::ACCESS unknown DBIx::Class::Storage::DBI::ODBC::DB2_400_SQL unknown DBIx::Class::Storage::DBI::ODBC::Firebird unknown DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Server unknown DBIx::Class::Storage::DBI::ODBC::SQL_Anywhere unknown DBIx::Class::Storage::DBI::Oracle unknown DBIx::Class::Storage::DBI::Oracle::Generic unknown DBIx::Class::Storage::DBI::Oracle::WhereJoins unknown DBIx::Class::Storage::DBI::Pg unknown DBIx::Class::Storage::DBI::Replicated unknown DBIx::Class::Storage::DBI::Replicated::Balancer unknown DBIx::Class::Storage::DBI::Replicated::Balancer::First unknown DBIx::Class::Storage::DBI::Replicated::Balancer::Random unknown DBIx::Class::Storage::DBI::Replicated::Pool unknown DBIx::Class::Storage::DBI::Replicated::Replicant unknown DBIx::Class::Storage::DBI::Replicated::WithDSN unknown DBIx::Class::Storage::DBI::SQLAnywhere unknown DBIx::Class::Storage::DBI::SQLAnywhere::Cursor unknown DBIx::Class::Storage::DBI::SQLite unknown DBIx::Class::Storage::DBI::Sybase unknown DBIx::Class::Storage::DBI::Sybase::ASE unknown DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVars unknown DBIx::Class::Storage::DBI::Sybase::FreeTDS unknown DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server unknown DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVars unknown DBIx::Class::Storage::DBI::Sybase::MSSQL unknown DBIx::Class::Storage::DBI::UniqueIdentifier unknown DBIx::Class::Storage::Debug::PrettyPrint unknown DBIx::Class::Storage::Statistics unknown DBIx::Class::Storage::TxnScopeGuard unknown DBIx::Class::UTF8Columns unknown DBIx::Connector 0.56 DBIx::Connector::Driver 0.56 DBIx::Connector::Driver::Firebird 0.56 DBIx::Connector::Driver::MSSQL 0.56 DBIx::Connector::Driver::mysql 0.56 DBIx::Connector::Driver::Oracle 0.56 DBIx::Connector::Driver::Pg 0.56 DBIx::Connector::Driver::SQLite 0.56 DDP unknown Declare::Constraints::Simple 0.03 Declare::Constraints::Simple::Library unknown Declare::Constraints::Simple::Library::Array unknown Declare::Constraints::Simple::Library::Base unknown Declare::Constraints::Simple::Library::Exportable unknown Declare::Constraints::Simple::Library::General unknown Declare::Constraints::Simple::Library::Hash unknown Declare::Constraints::Simple::Library::Numerical unknown Declare::Constraints::Simple::Library::OO unknown Declare::Constraints::Simple::Library::Operators unknown Declare::Constraints::Simple::Library::Referencial unknown Declare::Constraints::Simple::Library::Scalar unknown Declare::Constraints::Simple::Result unknown Devel::AssertC99 unknown Devel::AssertOS 1.21 Devel::AssertOS::AIX 1.2 Devel::AssertOS::Amiga 1.2 Devel::AssertOS::Android 1.2 Devel::AssertOS::Apple 1.3 Devel::AssertOS::BeOS 1.4 Devel::AssertOS::Bitrig 1.0 Devel::AssertOS::BSDOS 1.2 Devel::AssertOS::Cygwin 1.3 Devel::AssertOS::DEC 1.4 Devel::AssertOS::DGUX 1.2 Devel::AssertOS::DragonflyBSD 1.2 Devel::AssertOS::Dynix 1.2 Devel::AssertOS::EBCDIC 1.0 Devel::AssertOS::FreeBSD 1.2 Devel::AssertOS::GNUkFreeBSD 1.1 Devel::AssertOS::Haiku 1.1 Devel::AssertOS::HPUX 1.2 Devel::AssertOS::Hurd 1.0 Devel::AssertOS::Interix 1.2 Devel::AssertOS::iOS 1.0 Devel::AssertOS::Irix 1.2 Devel::AssertOS::Linux 1.3 Devel::AssertOS::Linux::Debian 1.0 Devel::AssertOS::Linux::v2_6 1.3 Devel::AssertOS::MachTen 1.2 Devel::AssertOS::MacOSclassic 1.2 Devel::AssertOS::MacOSX 1.2 Devel::AssertOS::MacOSX::v10_0 1.0 Devel::AssertOS::MacOSX::v10_1 1.0 Devel::AssertOS::MacOSX::v10_10 1.0 Devel::AssertOS::MacOSX::v10_11 1.0 Devel::AssertOS::MacOSX::v10_12 1.0 Devel::AssertOS::MacOSX::v10_2 1.0 Devel::AssertOS::MacOSX::v10_3 1.0 Devel::AssertOS::MacOSX::v10_4 1.4 Devel::AssertOS::MacOSX::v10_5 1.0 Devel::AssertOS::MacOSX::v10_6 1.0 Devel::AssertOS::MacOSX::v10_7 1.0 Devel::AssertOS::MacOSX::v10_8 1.0 Devel::AssertOS::MacOSX::v10_9 1.0 Devel::AssertOS::MicrosoftWindows 1.3 Devel::AssertOS::MidnightBSD 1.1 Devel::AssertOS::Minix 1.0 Devel::AssertOS::MirOSBSD 1.2 Devel::AssertOS::MPEiX 1.2 Devel::AssertOS::MSDOS 1.2 Devel::AssertOS::MSWin32 1.3 Devel::AssertOS::NetBSD 1.2 Devel::AssertOS::Netware 1.2 Devel::AssertOS::NeXT 1.2 Devel::AssertOS::OpenBSD 1.2 Devel::AssertOS::OS2 1.1 Devel::AssertOS::OS390 1.2 Devel::AssertOS::OS400 1.2 Devel::AssertOS::OSF 1.2 Devel::AssertOS::OSFeatures::POSIXShellRedirection 1.4 Devel::AssertOS::POSIXBC 1.2 Devel::AssertOS::QNX 1.2 Devel::AssertOS::QNX::Neutrino 1.1 Devel::AssertOS::QNX::v4 1.1 Devel::AssertOS::Realtime 1.2 Devel::AssertOS::RISCOS 1.2 Devel::AssertOS::SCO 1.2 Devel::AssertOS::Solaris 1.2 Devel::AssertOS::Sun 1.3 Devel::AssertOS::SunOS 1.2 Devel::AssertOS::SysVr4 1.2 Devel::AssertOS::SysVr5 1.2 Devel::AssertOS::Unicos 1.2 Devel::AssertOS::Unix 1.6 Devel::AssertOS::VMESA 1.2 Devel::AssertOS::VMS 1.2 Devel::AssertOS::VOS 1.2 Devel::Caller 2.06 Devel::CheckBin 0.04 Devel::CheckCompiler 0.07 Devel::CheckLib 1.13 Devel::CheckOS 1.81 Devel::Confess 0.009004 Devel::Confess::_Util unknown Devel::Confess::Builtin 0.009004 Devel::Confess::Source unknown Devel::Cover 1.30 Devel::Cover::Annotation::Git 1.30 Devel::Cover::Annotation::Random 1.30 Devel::Cover::Annotation::Svk 1.30 Devel::Cover::Branch 1.30 Devel::Cover::Collection 1.30 Devel::Cover::Condition 1.30 Devel::Cover::Condition_and_2 1.30 Devel::Cover::Condition_and_3 1.30 Devel::Cover::Condition_or_2 1.30 Devel::Cover::Condition_or_3 1.30 Devel::Cover::Condition_xor_4 1.30 Devel::Cover::Criterion 1.30 Devel::Cover::DB 1.30 Devel::Cover::DB::Criterion 1.30 Devel::Cover::DB::Digests 1.30 Devel::Cover::DB::File 1.30 Devel::Cover::DB::IO 1.30 Devel::Cover::DB::IO::Base 1.30 Devel::Cover::DB::IO::JSON 1.30 Devel::Cover::DB::IO::Sereal 1.30 Devel::Cover::DB::IO::Storable 1.30 Devel::Cover::DB::Structure 1.30 Devel::Cover::Html_Common 1.30 Devel::Cover::Inc 1.30 Devel::Cover::Op 1.30 Devel::Cover::Pod 1.30 Devel::Cover::Report::Compilation 1.30 Devel::Cover::Report::Html 1.30 Devel::Cover::Report::Html_basic 1.30 Devel::Cover::Report::Html_minimal 1.30 Devel::Cover::Report::Html_subtle 1.30 Devel::Cover::Report::Json 1.30 Devel::Cover::Report::Sort 1.30 Devel::Cover::Report::Text 1.30 Devel::Cover::Report::Text2 1.30 Devel::Cover::Report::Vim 1.30 Devel::Cover::Statement 1.30 Devel::Cover::Subroutine 1.30 Devel::Cover::Test 1.30 Devel::Cover::Time 1.30 Devel::Cover::Util 1.30 Devel::Cover::Web 1.30 Devel::Cycle 1.12 Devel::Declare 0.006019 Devel::Declare::Context::Simple 0.006019 Devel::Declare::MethodInstaller::Simple 0.006019 Devel::Dwarn unknown Devel::FindPerl 0.014 Devel::GlobalDestruction 0.14 Devel::GlobalPhase 0.003003 Devel::GraphVizProf 2.24 Devel::Hide 0.0010 Devel::InnerPackage 0.4 Devel::Leak 0.03 Devel::LexAlias 0.05 Devel::MAT 0.36 Devel::MAT::Context 0.36 Devel::MAT::Dumper 0.36 Devel::MAT::Dumpfile 0.36 Devel::MAT::Graph 0.36 Devel::MAT::InternalTools 0.36 Devel::MAT::SV 0.36 Devel::MAT::Tool 0.36 Devel::MAT::Tool::Callstack 0.36 Devel::MAT::Tool::Count 0.36 Devel::MAT::Tool::Find 0.36 Devel::MAT::Tool::Identify 0.36 Devel::MAT::Tool::Inrefs 0.36 Devel::MAT::Tool::IO 0.36 Devel::MAT::Tool::Outrefs 0.36 Devel::MAT::Tool::Reachability 0.36 Devel::MAT::Tool::Roots 0.36 Devel::MAT::Tool::Show 0.36 Devel::MAT::Tool::Sizes 0.36 Devel::MAT::Tool::Symbols 0.36 Devel::NYTProf 6.06 Devel::NYTProf::Apache 4.00 Devel::NYTProf::Constants unknown Devel::NYTProf::Core 6.06 Devel::NYTProf::Data 4.02 Devel::NYTProf::FileHandle unknown Devel::NYTProf::FileInfo unknown Devel::NYTProf::Reader 4.06 Devel::NYTProf::ReadStream 4.00 Devel::NYTProf::Run unknown Devel::NYTProf::SubCallInfo unknown Devel::NYTProf::SubInfo unknown Devel::NYTProf::Util 4.00 Devel::OverloadInfo 0.005 Devel::PartialDump 0.20 Devel::PatchPerl 1.52 Devel::PatchPerl::Hints 1.52 Devel::PatchPerl::Plugin 1.52 Devel::REPL 1.003028 Devel::REPL::Error 1.003028 Devel::REPL::Meta::Plugin 1.003028 Devel::REPL::Plugin 1.003028 Devel::REPL::Plugin::B::Concise 1.003028 Devel::REPL::Plugin::Carp::REPL 0.18 Devel::REPL::Plugin::Colors 1.003028 Devel::REPL::Plugin::Commands 1.003028 Devel::REPL::Plugin::Completion 1.003028 Devel::REPL::Plugin::CompletionDriver::Globals 1.003028 Devel::REPL::Plugin::CompletionDriver::INC 1.003028 Devel::REPL::Plugin::CompletionDriver::Keywords 1.003028 Devel::REPL::Plugin::CompletionDriver::LexEnv 1.003028 Devel::REPL::Plugin::CompletionDriver::Methods 1.003028 Devel::REPL::Plugin::CompletionDriver::Turtles 1.003028 Devel::REPL::Plugin::DDC 1.003028 Devel::REPL::Plugin::DDS 1.003028 Devel::REPL::Plugin::DumpHistory 1.003028 Devel::REPL::Plugin::FancyPrompt 1.003028 Devel::REPL::Plugin::FindVariable 1.003028 Devel::REPL::Plugin::History 1.003028 Devel::REPL::Plugin::Interrupt 1.003028 Devel::REPL::Plugin::LexEnv 1.003028 Devel::REPL::Plugin::MultiLine::PPI 1.003028 Devel::REPL::Plugin::Nopaste 1.003028 Devel::REPL::Plugin::OutputCache 1.003028 Devel::REPL::Plugin::Packages 1.003028 Devel::REPL::Plugin::Peek 1.003028 Devel::REPL::Plugin::PPI 1.003028 Devel::REPL::Plugin::ReadLineHistory 1.003028 Devel::REPL::Plugin::Refresh 1.003028 Devel::REPL::Plugin::Selenium 1.36 Devel::REPL::Plugin::ShowClass 1.003028 Devel::REPL::Plugin::Timing 1.003028 Devel::REPL::Plugin::Turtles 1.003028 Devel::REPL::Profile 1.003028 Devel::REPL::Profile::Default 1.003028 Devel::REPL::Profile::Minimal 1.003028 Devel::REPL::Profile::Standard 1.003028 Devel::REPL::Script 1.003028 Devel::StackTrace 2.03 Devel::StackTrace::AsHTML 0.15 Devel::StackTrace::Frame 2.03 Devel::StackTrace::WithLexicals 2.01 Devel::StackTrace::WithLexicals::Frame unknown Devel::Symdump 2.18 Devel::Symdump::Export unknown Devel::TypeTiny::Perl56Compat 1.002002 Devel::TypeTiny::Perl58Compat 1.002002 Digest::HMAC 1.03 Digest::HMAC_MD5 1.01 Digest::HMAC_SHA1 1.03 Digest::JHash 0.10 Digest::Perl::MD5 1.9 Digest::SHA1 2.13 Dist::CheckConflicts 0.11 Dist::Metadata 0.927 Dist::Metadata::Archive 0.927 Dist::Metadata::Dir 0.927 Dist::Metadata::Dist 0.927 Dist::Metadata::Struct 0.927 Dist::Metadata::Tar 0.927 Dist::Metadata::Zip 0.927 Dist::Zilla::PluginBundle::Example unknown Email::Abstract 3.008 Email::Abstract::EmailMIME 3.008 Email::Abstract::EmailSimple 3.008 Email::Abstract::MailInternet 3.008 Email::Abstract::MailMessage 3.008 Email::Abstract::MIMEEntity 3.008 Email::Abstract::Plugin 3.008 Email::Address 1.909 Email::Address::XS 1.04 Email::Date::Format 1.005 Email::MessageID 1.406 Email::MIME 1.946 Email::MIME::ContentType 1.022 Email::MIME::Creator 1.946 Email::MIME::Encode 1.946 Email::MIME::Encodings 1.315 Email::MIME::Header 1.946 Email::MIME::Header::AddressList 1.946 Email::MIME::Kit 3.000006 Email::MIME::Kit::Assembler::Standard 3.000006 Email::MIME::Kit::KitReader::Dir 3.000006 Email::MIME::Kit::ManifestReader::JSON 3.000006 Email::MIME::Kit::ManifestReader::YAML 3.000006 Email::MIME::Kit::Renderer::TestRenderer 3.000006 Email::MIME::Kit::Role::Assembler 3.000006 Email::MIME::Kit::Role::Component 3.000006 Email::MIME::Kit::Role::KitReader 3.000006 Email::MIME::Kit::Role::ManifestDesugarer 3.000006 Email::MIME::Kit::Role::ManifestReader 3.000006 Email::MIME::Kit::Role::Renderer 3.000006 Email::MIME::Kit::Role::Validator 3.000006 Email::MIME::Modifier 1.946 Email::Sender 1.300031 Email::Sender::Failure 1.300031 Email::Sender::Failure::Multi 1.300031 Email::Sender::Failure::Permanent 1.300031 Email::Sender::Failure::Temporary 1.300031 Email::Sender::Manual 1.300031 Email::Sender::Manual::QuickStart 1.300031 Email::Sender::Role::CommonSending 1.300031 Email::Sender::Role::HasMessage 1.300031 Email::Sender::Simple 1.300031 Email::Sender::Success 1.300031 Email::Sender::Success::Partial 1.300031 Email::Sender::Transport 1.300031 Email::Sender::Transport::DevNull 1.300031 Email::Sender::Transport::Failable 1.300031 Email::Sender::Transport::Maildir 1.300031 Email::Sender::Transport::Mbox 1.300031 Email::Sender::Transport::Print 1.300031 Email::Sender::Transport::Sendmail 1.300031 Email::Sender::Transport::SMTP 1.300031 Email::Sender::Transport::SMTP::Persistent 1.300031 Email::Sender::Transport::Test 1.300031 Email::Sender::Transport::Wrapper 1.300031 Email::Sender::Util 1.300031 Email::Simple 2.216 Email::Simple::Creator 2.216 Email::Simple::Header 2.216 Email::Valid 1.202 Encode 2.98 Encode::Alias 2.24 Encode::Byte 2.04 Encode::CJKConstants 2.02 Encode::CN 2.03 Encode::CN::HZ 2.10 Encode::Config 2.05 Encode::ConfigLocal 1532079134 Encode::EBCDIC 2.02 Encode::Encoder 2.03 Encode::Encoding 2.08 Encode::GSM0338 2.07 Encode::Guess 2.07 Encode::JP 2.04 Encode::JP::H2Z 2.02 Encode::JP::JIS7 2.08 Encode::KR 2.03 Encode::KR::2022_KR 2.04 Encode::Locale 1.05 Encode::MIME::Header 2.28 Encode::MIME::Header::ISO_2022_JP 1.09 Encode::MIME::Name 1.03 Encode::Symbol 2.02 Encode::TW 2.03 Encode::Unicode 2.17 Encode::Unicode::UTF7 2.10 encoding 2.22 Env::Path 0.19 Error 0.17026 Error::Simple 0.17026 Error::TypeTiny 1.002002 Error::TypeTiny::Assertion 1.002002 Error::TypeTiny::Compilation 1.002002 Error::TypeTiny::WrongNumberOfParameters 1.002002 Eval::Closure 0.14 Eval::TypeTiny 1.002002 Eval::WithLexicals 1.003006 Eval::WithLexicals::WithHintPersistence 1.003006 Excel::Writer::XLSX 0.98 Excel::Writer::XLSX::Chart 0.98 Excel::Writer::XLSX::Chart::Area 0.98 Excel::Writer::XLSX::Chart::Bar 0.98 Excel::Writer::XLSX::Chart::Column 0.98 Excel::Writer::XLSX::Chart::Doughnut 0.98 Excel::Writer::XLSX::Chart::Line 0.98 Excel::Writer::XLSX::Chart::Pie 0.98 Excel::Writer::XLSX::Chart::Radar 0.98 Excel::Writer::XLSX::Chart::Scatter 0.98 Excel::Writer::XLSX::Chart::Stock 0.98 Excel::Writer::XLSX::Chartsheet 0.98 Excel::Writer::XLSX::Drawing 0.98 Excel::Writer::XLSX::Examples 0.98 Excel::Writer::XLSX::Format 0.98 Excel::Writer::XLSX::Package::App 0.98 Excel::Writer::XLSX::Package::Comments 0.98 Excel::Writer::XLSX::Package::ContentTypes 0.98 Excel::Writer::XLSX::Package::Core 0.98 Excel::Writer::XLSX::Package::Custom 0.98 Excel::Writer::XLSX::Package::Packager 0.98 Excel::Writer::XLSX::Package::Relationships 0.98 Excel::Writer::XLSX::Package::SharedStrings 0.98 Excel::Writer::XLSX::Package::Styles 0.98 Excel::Writer::XLSX::Package::Table 0.98 Excel::Writer::XLSX::Package::Theme 0.98 Excel::Writer::XLSX::Package::VML 0.98 Excel::Writer::XLSX::Package::XMLwriter 0.98 Excel::Writer::XLSX::Shape 0.98 Excel::Writer::XLSX::Utility 0.98 Excel::Writer::XLSX::Workbook 0.98 Excel::Writer::XLSX::Worksheet 0.98 Exception::Assertion 0.0504 Exception::Base 0.2501 Exception::Class 1.44 Exception::Class::Base 1.44 Expect 1.35 Expect::Simple 0.04 Exporter::Declare 0.114 Exporter::Declare::Export unknown Exporter::Declare::Export::Alias unknown Exporter::Declare::Export::Generator unknown Exporter::Declare::Export::Sub unknown Exporter::Declare::Export::Variable unknown Exporter::Declare::Meta unknown Exporter::Declare::Specs unknown Exporter::Lite 0.08 Exporter::Shiny 1.002001 Exporter::Tidy 0.08 Exporter::Tiny 1.002001 ExtUtils::CChecker 0.10 ExtUtils::Config 0.008 ExtUtils::CppGuess 0.12 ExtUtils::Depends 0.405 ExtUtils::Helpers 0.026 ExtUtils::Helpers::Unix 0.026 ExtUtils::Helpers::VMS 0.026 ExtUtils::Helpers::Windows 0.026 ExtUtils::InstallPaths 0.012 ExtUtils::MakeMaker::CPANfile 0.08 ExtUtils::Manifest 1.71 ExtUtils::PkgConfig 1.16 FAlite unknown FCGI 0.78 FCGI::ProcManager 0.28 FCGI::ProcManager::Constrained unknown Fennec::Lite 0.004 Fh 4.38 File::ChangeNotify 0.28 File::ChangeNotify::Event 0.28 File::ChangeNotify::Watcher 0.28 File::ChangeNotify::Watcher::Default 0.28 File::ChangeNotify::Watcher::Inotify 0.28 File::ChangeNotify::Watcher::KQueue 0.28 File::Copy::Link 0.06 File::Copy::Recursive 0.44 File::Copy::Recursive::Reduced 0.006 File::Find::Rule 0.34 File::Grep 0.02 File::HomeDir 1.004 File::HomeDir::Darwin 1.004 File::HomeDir::Darwin::Carbon 1.004 File::HomeDir::Darwin::Cocoa 1.004 File::HomeDir::Driver 1.004 File::HomeDir::FreeDesktop 1.004 File::HomeDir::MacOS9 1.004 File::HomeDir::Test 1.004 File::HomeDir::Unix 1.004 File::HomeDir::Windows 1.004 File::Listing 6.04 File::Map 0.65 File::Next 1.16 File::NFSLock 1.27 File::Path 2.15 File::pushd 1.016 File::Remove 1.57 File::Share 0.25 File::ShareDir 1.116 File::ShareDir::Install 0.13 File::Slurp 9999.19 File::Slurp::Tiny 0.004 File::Slurper 0.012 File::Spec 3.74 File::Spec::AmigaOS 3.74 File::Spec::Cygwin 3.74 File::Spec::Epoc 3.74 File::Spec::Functions 3.74 File::Spec::Link 0.073 File::Spec::Mac 3.74 File::Spec::Native 1.004 File::Spec::OS2 3.74 File::Spec::Unix 3.74 File::Spec::VMS 3.74 File::Spec::Win32 3.74 File::Temp 0.2308 File::Which 1.22 Filesys::Notify::Simple 0.13 Font::TTF 1.06 Font::TTF::AATKern unknown Font::TTF::AATutils unknown Font::TTF::Anchor unknown Font::TTF::Bsln unknown Font::TTF::Cmap unknown Font::TTF::Coverage unknown Font::TTF::Cvt_ 0.0001 Font::TTF::Delta unknown Font::TTF::DSIG unknown Font::TTF::Dumper unknown Font::TTF::EBDT unknown Font::TTF::EBLC unknown Font::TTF::Fdsc unknown Font::TTF::Feat unknown Font::TTF::Features::Cvar unknown Font::TTF::Features::Size unknown Font::TTF::Features::Sset unknown Font::TTF::Fmtx unknown Font::TTF::Font 0.39 Font::TTF::Fpgm 0.0001 Font::TTF::GDEF unknown Font::TTF::Glat unknown Font::TTF::Gloc unknown Font::TTF::Glyf unknown Font::TTF::Glyph unknown Font::TTF::GPOS unknown Font::TTF::GrFeat unknown Font::TTF::GSUB unknown Font::TTF::Hdmx unknown Font::TTF::Head unknown Font::TTF::Hhea unknown Font::TTF::Hmtx unknown Font::TTF::Kern unknown Font::TTF::Kern::ClassArray unknown Font::TTF::Kern::CompactClassArray unknown Font::TTF::Kern::OrderedList unknown Font::TTF::Kern::StateTable unknown Font::TTF::Kern::Subtable unknown Font::TTF::Loca unknown Font::TTF::LTSH unknown Font::TTF::Maxp unknown Font::TTF::Mort unknown Font::TTF::Mort::Chain unknown Font::TTF::Mort::Contextual unknown Font::TTF::Mort::Insertion unknown Font::TTF::Mort::Ligature unknown Font::TTF::Mort::Noncontextual unknown Font::TTF::Mort::Rearrangement unknown Font::TTF::Mort::Subtable unknown Font::TTF::Name 1.1 Font::TTF::OldCmap unknown Font::TTF::OldMort unknown Font::TTF::OS_2 unknown Font::TTF::OTTags unknown Font::TTF::PCLT unknown Font::TTF::Post 0.01 Font::TTF::Prep 0.0001 Font::TTF::Prop unknown Font::TTF::PSNames unknown Font::TTF::Segarr 0.0001 Font::TTF::Silf unknown Font::TTF::Sill unknown Font::TTF::Table 0.0001 Font::TTF::Ttc 0.0001 Font::TTF::Ttopen unknown Font::TTF::Utils 0.0001 Font::TTF::Vhea unknown Font::TTF::Vmtx unknown Font::TTF::Win32 unknown Font::TTF::Woff unknown Font::TTF::Woff::MetaData unknown Font::TTF::Woff::PrivateData unknown Font::TTF::XMLparse unknown forks 0.36 forks::shared 0.36 Future 0.38 Future::Mutex 0.38 Future::Utils 0.38 GD 2.68 GD::Graph 1.54 GD::Graph::area unknown GD::Graph::axestype unknown GD::Graph::bars unknown GD::Graph::colour unknown GD::Graph::Data unknown GD::Graph::Error unknown GD::Graph::hbars unknown GD::Graph::lines unknown GD::Graph::linespoints unknown GD::Graph::mixed unknown GD::Graph::pie unknown GD::Graph::points unknown GD::Graph::utils unknown GD::Group 1 GD::Image 2.67 GD::Polygon unknown GD::Polyline 0.2 GD::Simple unknown GD::SVG 0.33 GD::Text 0.86 GD::Text::Align unknown GD::Text::Wrap unknown Getopt::Long::Descriptive 0.102 Getopt::Long::Descriptive::Opts 0.102 Getopt::Long::Descriptive::Usage 0.102 Graph 0.9704 Graph::AdjacencyMap unknown Graph::AdjacencyMap::Heavy unknown Graph::AdjacencyMap::Light unknown Graph::AdjacencyMap::Vertex unknown Graph::AdjacencyMatrix unknown Graph::Attribute unknown Graph::BitMatrix unknown Graph::Directed unknown Graph::Matrix unknown Graph::MSTHeapElem unknown Graph::Reader 2.09 Graph::Reader::Dot 2.09 Graph::Reader::HTK 2.09 Graph::Reader::XML 2.09 Graph::ReadWrite 2.09 Graph::SPTHeapElem unknown Graph::TransitiveClosure unknown Graph::TransitiveClosure::Matrix unknown Graph::Traversal unknown Graph::Traversal::BFS unknown Graph::Traversal::DFS unknown Graph::Undirected unknown Graph::UnionFind unknown Graph::Writer 2.09 Graph::Writer::daVinci 2.09 Graph::Writer::Dot 2.09 Graph::Writer::HTK 2.09 Graph::Writer::VCG 2.09 Graph::Writer::XML 2.09 GraphViz 2.24 GraphViz::Data::Grapher 2.24 GraphViz::No 2.24 GraphViz::Parse::RecDescent 2.24 GraphViz::Parse::Yacc 2.24 GraphViz::Parse::Yapp 2.24 GraphViz::Regex 2.24 GraphViz::Small 2.24 GraphViz::XML 2.24 Hash::AutoHash 1.17 Hash::AutoHash::Args 1.18 Hash::AutoHash::Args::V0 1.18 Hash::Merge 0.300 Hash::Merge::Simple 0.051 Hash::MoreUtils 0.06 Hash::MultiValue 0.16 Hash::Util::FieldHash::Compat 0.11 Hash::Util::FieldHash::Compat::Heavy 0.11 Heap071::Elem unknown Heap071::Fibonacci unknown Hook::LexWrap 0.26 HPC::Runner 2.48 HPC::Runner::Scheduler 0.09 HPC::Runner::Slurm 2.58 HTML::AsSubs 5.07 HTML::Element 5.07 HTML::Element::traverse 5.07 HTML::Entities 3.69 HTML::Filter 3.72 HTML::Form 6.03 HTML::FormHandler 0.40068 HTML::FormHandler::Base 0.40068 HTML::FormHandler::Blocks 0.40068 HTML::FormHandler::BuildFields 0.40068 HTML::FormHandler::BuildPages 0.40068 HTML::FormHandler::Field 0.40068 HTML::FormHandler::Field::AddElement 0.40068 HTML::FormHandler::Field::Boolean 0.40068 HTML::FormHandler::Field::BoolSelect 0.40068 HTML::FormHandler::Field::Button 0.40068 HTML::FormHandler::Field::Captcha 0.40068 HTML::FormHandler::Field::Checkbox 0.40068 HTML::FormHandler::Field::Compound 0.40068 HTML::FormHandler::Field::Date 0.40068 HTML::FormHandler::Field::DateMDY 0.40068 HTML::FormHandler::Field::DateTime 0.40068 HTML::FormHandler::Field::Display 0.40068 HTML::FormHandler::Field::Duration 0.40068 HTML::FormHandler::Field::Email 0.40068 HTML::FormHandler::Field::File 0.40068 HTML::FormHandler::Field::Float 0.40068 HTML::FormHandler::Field::Hidden 0.40068 HTML::FormHandler::Field::Hour 0.40068 HTML::FormHandler::Field::Integer 0.40068 HTML::FormHandler::Field::IntRange 0.40068 HTML::FormHandler::Field::Minute 0.40068 HTML::FormHandler::Field::Money 0.40068 HTML::FormHandler::Field::Month 0.40068 HTML::FormHandler::Field::MonthDay 0.40068 HTML::FormHandler::Field::MonthName 0.40068 HTML::FormHandler::Field::Multiple 0.40068 HTML::FormHandler::Field::Nested 0.40068 HTML::FormHandler::Field::NonEditable 0.40068 HTML::FormHandler::Field::NoValue 0.40068 HTML::FormHandler::Field::Password 0.40068 HTML::FormHandler::Field::PasswordConf 0.40068 HTML::FormHandler::Field::PosInteger 0.40068 HTML::FormHandler::Field::PrimaryKey 0.40068 HTML::FormHandler::Field::Repeatable 0.40068 HTML::FormHandler::Field::RequestToken 0.40068 HTML::FormHandler::Field::Reset 0.40068 HTML::FormHandler::Field::Result 0.40068 HTML::FormHandler::Field::RmElement 0.40068 HTML::FormHandler::Field::Role::RequestToken 0.40068 HTML::FormHandler::Field::Second 0.40068 HTML::FormHandler::Field::Select 0.40068 HTML::FormHandler::Field::SelectCSV 0.40068 HTML::FormHandler::Field::Submit 0.40068 HTML::FormHandler::Field::Text 0.40068 HTML::FormHandler::Field::TextArea 0.40068 HTML::FormHandler::Field::TextCSV 0.40068 HTML::FormHandler::Field::Upload 0.40068 HTML::FormHandler::Field::Weekday 0.40068 HTML::FormHandler::Field::Year 0.40068 HTML::FormHandler::Fields 0.40068 HTML::FormHandler::Foo 0.40068 HTML::FormHandler::I18N 0.40068 HTML::FormHandler::I18N::ar_kw 0.40068 HTML::FormHandler::I18N::bg_bg 0.40068 HTML::FormHandler::I18N::ca_es 0.40068 HTML::FormHandler::I18N::cs_cz 0.40068 HTML::FormHandler::I18N::de_de 0.40068 HTML::FormHandler::I18N::en_us 0.40068 HTML::FormHandler::I18N::es_es 0.40068 HTML::FormHandler::I18N::hu_hu 0.40068 HTML::FormHandler::I18N::it_it 0.40068 HTML::FormHandler::I18N::ja_jp 0.40068 HTML::FormHandler::I18N::pt_br 0.40068 HTML::FormHandler::I18N::ru_ru 0.40068 HTML::FormHandler::I18N::sv_se 0.40068 HTML::FormHandler::I18N::tr_tr 0.40068 HTML::FormHandler::I18N::ua_ua 0.40068 HTML::FormHandler::InitResult 0.40068 HTML::FormHandler::Merge 0.40068 HTML::FormHandler::Model 0.40068 HTML::FormHandler::Model::Object 0.40068 HTML::FormHandler::Moose 0.40068 HTML::FormHandler::Moose::Role 0.40068 HTML::FormHandler::Page 0.40068 HTML::FormHandler::Page::Simple 0.40068 HTML::FormHandler::Pages 0.40068 HTML::FormHandler::Render::RepeatableJs 0.40068 HTML::FormHandler::Render::Simple 0.40068 HTML::FormHandler::Render::Table 0.40068 HTML::FormHandler::Render::Util 0.40068 HTML::FormHandler::Render::WithTT 0.40068 HTML::FormHandler::Result 0.40068 HTML::FormHandler::Result::Role 0.40068 HTML::FormHandler::Test 0.40068 HTML::FormHandler::TraitFor::Captcha 0.40068 HTML::FormHandler::TraitFor::I18N 0.40068 HTML::FormHandler::TraitFor::Types 0.40068 HTML::FormHandler::Traits 0.40068 HTML::FormHandler::Types 0.40068 HTML::FormHandler::Validate 0.40068 HTML::FormHandler::Widget::ApplyRole 0.40068 HTML::FormHandler::Widget::Block 0.40068 HTML::FormHandler::Widget::Block::Bootstrap 0.40068 HTML::FormHandler::Widget::Field::Button 0.40068 HTML::FormHandler::Widget::Field::ButtonTag 0.40068 HTML::FormHandler::Widget::Field::Captcha 0.40068 HTML::FormHandler::Widget::Field::Checkbox 0.40068 HTML::FormHandler::Widget::Field::CheckboxGroup 0.40068 HTML::FormHandler::Widget::Field::Compound 0.40068 HTML::FormHandler::Widget::Field::Hidden 0.40068 HTML::FormHandler::Widget::Field::HorizCheckboxGroup 0.40068 HTML::FormHandler::Widget::Field::NoRender 0.40068 HTML::FormHandler::Widget::Field::Password 0.40068 HTML::FormHandler::Widget::Field::RadioGroup 0.40068 HTML::FormHandler::Widget::Field::Repeatable 0.40068 HTML::FormHandler::Widget::Field::Reset 0.40068 HTML::FormHandler::Widget::Field::Role::HTMLAttributes 0.40068 HTML::FormHandler::Widget::Field::Role::SelectedOption 0.40068 HTML::FormHandler::Widget::Field::Select 0.40068 HTML::FormHandler::Widget::Field::Span 0.40068 HTML::FormHandler::Widget::Field::Submit 0.40068 HTML::FormHandler::Widget::Field::Text 0.40068 HTML::FormHandler::Widget::Field::Textarea 0.40068 HTML::FormHandler::Widget::Field::Upload 0.40068 HTML::FormHandler::Widget::Form::Role::HTMLAttributes 0.40068 HTML::FormHandler::Widget::Form::Simple 0.40068 HTML::FormHandler::Widget::Form::Table 0.40068 HTML::FormHandler::Widget::Theme::Bootstrap 0.40068 HTML::FormHandler::Widget::Theme::Bootstrap3 0.40068 HTML::FormHandler::Widget::Theme::BootstrapFormMessages 0.40068 HTML::FormHandler::Widget::Wrapper::Base 0.40068 HTML::FormHandler::Widget::Wrapper::Bootstrap 0.40068 HTML::FormHandler::Widget::Wrapper::Bootstrap3 0.40068 HTML::FormHandler::Widget::Wrapper::Fieldset 0.40068 HTML::FormHandler::Widget::Wrapper::None 0.40068 HTML::FormHandler::Widget::Wrapper::Simple 0.40068 HTML::FormHandler::Widget::Wrapper::SimpleInline 0.40068 HTML::FormHandler::Widget::Wrapper::Table 0.40068 HTML::FormHandler::Widget::Wrapper::TableInline 0.40068 HTML::FormHandler::Wizard 0.40068 HTML::HeadParser 3.71 HTML::LinkExtor 3.69 HTML::Parse 5.07 HTML::Parser 3.72 HTML::Perlinfo 1.69 HTML::Perlinfo::Apache unknown HTML::Perlinfo::Base unknown HTML::Perlinfo::Common unknown HTML::Perlinfo::General unknown HTML::Perlinfo::Loaded 1.02 HTML::Perlinfo::Modules 1.19 HTML::PullParser 3.57 HTML::TableExtract 2.15 HTML::Tagset 3.20 HTML::TokeParser 3.69 HTML::Tree 5.07 HTML::TreeBuilder 5.07 HTTP::Body 1.22 HTTP::Body::MultiPart 1.22 HTTP::Body::OctetStream 1.22 HTTP::Body::UrlEncoded 1.22 HTTP::Body::XForms 1.22 HTTP::Body::XFormsMultipart 1.22 HTTP::Config 6.18 HTTP::CookieJar 0.008 HTTP::CookieJar::LWP 0.008 HTTP::Cookies 6.04 HTTP::Cookies::Microsoft 6.04 HTTP::Cookies::Netscape 6.04 HTTP::Daemon 6.01 HTTP::Date 6.02 HTTP::Entity::Parser 0.21 HTTP::Entity::Parser::JSON unknown HTTP::Entity::Parser::MultiPart unknown HTTP::Entity::Parser::OctetStream unknown HTTP::Entity::Parser::UrlEncoded unknown HTTP::Headers 6.18 HTTP::Headers::Auth 6.18 HTTP::Headers::ETag 6.18 HTTP::Headers::Fast 0.21 HTTP::Headers::Util 6.18 HTTP::Message 6.18 HTTP::Message::PSGI unknown HTTP::MultiPartParser 0.02 HTTP::Negotiate 6.01 HTTP::Parser::XS 0.17 HTTP::Parser::XS::PP unknown HTTP::Request 6.18 HTTP::Request::AsCGI 1.2 HTTP::Request::Common 6.18 HTTP::Response 6.18 HTTP::Server::PSGI unknown HTTP::Server::PSGI::Net::Server::PreFork unknown HTTP::Server::Simple 0.52 HTTP::Server::Simple::CGI unknown HTTP::Server::Simple::CGI::Environment unknown HTTP::Server::Simple::PSGI 0.16 HTTP::Status 6.18 HTTP::Thin 0.006 HTTP::Tinyish 0.14 HTTP::Tinyish::Base unknown HTTP::Tinyish::Curl unknown HTTP::Tinyish::HTTPTiny unknown HTTP::Tinyish::LWP unknown HTTP::Tinyish::Wget unknown HTTP::XSCookies 0.000021 Image::PNG 0.23 Image::PNG::Const 0.45 Image::PNG::Container 0.23 Image::PNG::Libpng 0.45 Image::PNG::Util unknown Import::Into 1.002005 Importer 0.025 inc::Module::Install 1.19 inc::Module::Install::DSL 1.19 indirect 0.38 Inline 0.80 Inline::C 0.78 Inline::C::Parser unknown Inline::C::Parser::Pegex unknown Inline::C::Parser::Pegex::AST unknown Inline::C::Parser::Pegex::Grammar unknown Inline::C::Parser::RecDescent unknown Inline::C::Parser::RegExp unknown Inline::denter unknown Inline::Foo 0.80 Inline::MakeMaker 0.80 Inline::MakePdlppInstallable unknown Inline::Pdlpp 0.4 IO::All 0.87 IO::All::Base unknown IO::All::DBM unknown IO::All::Dir unknown IO::All::File unknown IO::All::Filesys unknown IO::All::Link unknown IO::All::MLDBM unknown IO::All::Pipe unknown IO::All::Socket unknown IO::All::STDIO unknown IO::All::String unknown IO::All::Temp unknown IO::Async 0.72 IO::Async::Channel 0.72 IO::Async::Debug 0.72 IO::Async::File 0.72 IO::Async::FileStream 0.72 IO::Async::Function 0.72 IO::Async::Future 0.72 IO::Async::Handle 0.72 IO::Async::Internals::ChildManager 0.72 IO::Async::Listener 0.72 IO::Async::Loop 0.72 IO::Async::Loop::Poll 0.72 IO::Async::Loop::Select 0.72 IO::Async::LoopTests 0.72 IO::Async::Notifier 0.72 IO::Async::OS 0.72 IO::Async::OS::cygwin 0.72 IO::Async::OS::linux 0.72 IO::Async::OS::MSWin32 0.72 IO::Async::PID 0.72 IO::Async::Process 0.72 IO::Async::Protocol 0.72 IO::Async::Protocol::LineStream 0.72 IO::Async::Protocol::Stream 0.72 IO::Async::Resolver 0.72 IO::Async::Routine 0.72 IO::Async::Signal 0.72 IO::Async::Socket 0.72 IO::Async::Stream 0.72 IO::Async::Test 0.72 IO::Async::Timer 0.72 IO::Async::Timer::Absolute 0.72 IO::Async::Timer::Countdown 0.72 IO::Async::Timer::Periodic 0.72 IO::AtomicFile 2.111 IO::CaptureOutput 1.1104 IO::HTML 1.001 IO::InnerFile 2.111 IO::Interactive 1.022 IO::Lines 2.111 IO::Pipely 0.005 IO::Prompt 0.997004 IO::Pty 1.12 IO::Scalar 2.111 IO::ScalarArray 2.111 IO::SessionData 1.03 IO::SessionSet unknown IO::Socket::SSL 2.058 IO::Socket::SSL::Intercept 2.056 IO::Socket::SSL::PublicSuffix unknown IO::Socket::SSL::Utils 2.014 IO::Socket::Timeout 0.32 IO::String 1.08 IO::Stringy 2.111 IO::TieCombine 1.005 IO::TieCombine::Handle 1.005 IO::TieCombine::Scalar 1.005 IO::Tty 1.12 IO::Tty::Constant unknown IO::Wrap 2.111 IO::WrapTie 2.111 IPC::Run 20180523.0 IPC::Run3 0.048 IPC::Run3::ProfArrayBuffer 0.048 IPC::Run3::ProfLogger 0.048 IPC::Run3::ProfLogReader 0.048 IPC::Run3::ProfPP 0.048 IPC::Run3::ProfReporter 0.048 IPC::Run::Debug 20180523.0 IPC::Run::IO 20180523.0 IPC::Run::Timer 20180523.0 IPC::Run::Win32Helper 20180523.0 IPC::Run::Win32IO 20180523.0 IPC::Run::Win32Pump 20180523.0 IPC::ShareLite 0.17 IPC::System::Simple 1.25 JSON 2.97001 JSON::Any 1.39 JSON::MaybeXS 1.004000 Lexical::Persistence 1.020 lib::core::only unknown Lingua::EN::FindNumber 1.32 Lingua::EN::Inflect 1.903 Lingua::EN::Inflect::Number 1.12 Lingua::EN::Inflect::Phrase 0.20 Lingua::EN::Number::IsOrdinal 0.05 Lingua::EN::Tagger 0.29 Lingua::EN::Words2Nums unknown Lingua::GL::Stemmer 0.02 Lingua::PT::Stemmer 0.02 Lingua::Stem 0.84 Lingua::Stem::AutoLoader 1.02 Lingua::Stem::Da 1.01 Lingua::Stem::De 1.01 Lingua::Stem::En 2.16 Lingua::Stem::EnBroken 2.13 Lingua::Stem::Fr 0.02 Lingua::Stem::Gl 1.02 Lingua::Stem::It 0.02 Lingua::Stem::No 1.01 Lingua::Stem::Pt 1.01 Lingua::Stem::Ru 0.04 Lingua::Stem::Snowball::Da 1.01 Lingua::Stem::Snowball::No 1.2 Lingua::Stem::Snowball::Se 1.2 Lingua::Stem::Sv 1.01 List::AllUtils 0.14 List::MoreUtils 0.428 List::MoreUtils::PP 0.428 List::MoreUtils::XS 0.428 List::SomeUtils 0.56 List::SomeUtils::PP 0.56 List::SomeUtils::XS 0.58 List::Util 1.50 List::Util::XS 1.50 List::UtilsBy 0.11 local::lib 2.000024 Locale::Maketext::Extract 1.00 Locale::Maketext::Extract::Plugin::Base 1.00 Locale::Maketext::Extract::Plugin::FormFu 1.00 Locale::Maketext::Extract::Plugin::Generic 1.00 Locale::Maketext::Extract::Plugin::Haml 1.00 Locale::Maketext::Extract::Plugin::Mason 1.00 Locale::Maketext::Extract::Plugin::Perl 1.00 Locale::Maketext::Extract::Plugin::PPI 1.00 Locale::Maketext::Extract::Plugin::TextTemplate 1.00 Locale::Maketext::Extract::Plugin::TT2 1.00 Locale::Maketext::Extract::Plugin::YAML 1.00 Locale::Maketext::Extract::Run 1.00 Locale::Maketext::Lexicon 1.00 Locale::Maketext::Lexicon::Auto 1.00 Locale::Maketext::Lexicon::Gettext 1.00 Locale::Maketext::Lexicon::Msgcat 1.00 Locale::Maketext::Lexicon::Tie 1.00 Log::Any 1.706 Log::Any::Adapter 1.706 Log::Any::Adapter::Base 1.706 Log::Any::Adapter::File 1.706 Log::Any::Adapter::Null 1.706 Log::Any::Adapter::Stderr 1.706 Log::Any::Adapter::Stdout 1.706 Log::Any::Adapter::Syslog 1.706 Log::Any::Adapter::Test 1.706 Log::Any::Adapter::Util 1.706 Log::Any::Manager 1.706 Log::Any::Proxy 1.706 Log::Any::Proxy::Null 1.706 Log::Any::Proxy::Test 1.706 Log::Any::Test 1.706 Log::Contextual 0.008001 Log::Contextual::Easy::Default 0.008001 Log::Contextual::Easy::Package 0.008001 Log::Contextual::Role::Router 0.008001 Log::Contextual::Role::Router::HasLogger 0.008001 Log::Contextual::Role::Router::SetLogger 0.008001 Log::Contextual::Role::Router::WithLogger 0.008001 Log::Contextual::Router 0.008001 Log::Contextual::SimpleLogger 0.008001 Log::Contextual::TeeLogger 0.008001 Log::Contextual::WarnLogger 0.008001 Log::Dispatch 2.67 Log::Dispatch::ApacheLog 2.67 Log::Dispatch::Array 1.003 Log::Dispatch::Base 2.67 Log::Dispatch::Code 2.67 Log::Dispatch::Config 1.04 Log::Dispatch::Configurator 1.00 Log::Dispatch::Configurator::AppConfig 1.00 Log::Dispatch::Email 2.67 Log::Dispatch::Email::MailSend 2.67 Log::Dispatch::Email::MailSender 2.67 Log::Dispatch::Email::MailSendmail 2.67 Log::Dispatch::Email::MIMELite 2.67 Log::Dispatch::File 2.67 Log::Dispatch::File::Locked 2.67 Log::Dispatch::Handle 2.67 Log::Dispatch::Null 2.67 Log::Dispatch::Output 2.67 Log::Dispatch::Screen 2.67 Log::Dispatch::Syslog 2.67 Log::Dispatch::Types 2.67 Log::Dispatch::Vars 2.67 Log::Dispatchouli 2.016 Log::Dispatchouli::Global 2.016 Log::Dispatchouli::Proxy 2.016 Log::Log4perl 1.49 Log::Log4perl::Appender unknown Log::Log4perl::Appender::Buffer unknown Log::Log4perl::Appender::DBI unknown Log::Log4perl::Appender::File unknown Log::Log4perl::Appender::Limit unknown Log::Log4perl::Appender::RRDs unknown Log::Log4perl::Appender::Screen unknown Log::Log4perl::Appender::ScreenColoredLevels unknown Log::Log4perl::Appender::Socket unknown Log::Log4perl::Appender::String unknown Log::Log4perl::Appender::Synchronized unknown Log::Log4perl::Appender::TestArrayBuffer unknown Log::Log4perl::Appender::TestBuffer unknown Log::Log4perl::Appender::TestFileCreeper unknown Log::Log4perl::Catalyst unknown Log::Log4perl::Config unknown Log::Log4perl::Config::BaseConfigurator unknown Log::Log4perl::Config::DOMConfigurator 0.03 Log::Log4perl::Config::PropertyConfigurator unknown Log::Log4perl::Config::Watch unknown Log::Log4perl::DateFormat unknown Log::Log4perl::Filter unknown Log::Log4perl::Filter::Boolean unknown Log::Log4perl::Filter::LevelMatch unknown Log::Log4perl::Filter::LevelRange unknown Log::Log4perl::Filter::MDC unknown Log::Log4perl::Filter::StringMatch unknown Log::Log4perl::InternalDebug unknown Log::Log4perl::JavaMap unknown Log::Log4perl::JavaMap::ConsoleAppender unknown Log::Log4perl::JavaMap::FileAppender unknown Log::Log4perl::JavaMap::JDBCAppender unknown Log::Log4perl::JavaMap::NTEventLogAppender unknown Log::Log4perl::JavaMap::RollingFileAppender unknown Log::Log4perl::JavaMap::SyslogAppender unknown Log::Log4perl::JavaMap::TestBuffer unknown Log::Log4perl::Layout unknown Log::Log4perl::Layout::NoopLayout unknown Log::Log4perl::Layout::PatternLayout unknown Log::Log4perl::Layout::PatternLayout::Multiline unknown Log::Log4perl::Layout::SimpleLayout unknown Log::Log4perl::Level unknown Log::Log4perl::Logger unknown Log::Log4perl::MDC unknown Log::Log4perl::NDC unknown Log::Log4perl::Resurrector unknown Log::Log4perl::Util unknown Log::Log4perl::Util::Semaphore unknown Log::Log4perl::Util::TimeTracker unknown Log::Message 0.08 Log::Message::Config 0.08 Log::Message::Handlers 0.08 Log::Message::Item 0.08 Log::Message::Simple 0.10 Log::Report 1.27 Log::Report::DBIC::Profiler 1.27 Log::Report::Die 1.27 Log::Report::Dispatcher 1.27 Log::Report::Dispatcher::Callback 1.27 Log::Report::Dispatcher::File 1.27 Log::Report::Dispatcher::Log4perl 1.27 Log::Report::Dispatcher::LogDispatch 1.27 Log::Report::Dispatcher::Perl 1.27 Log::Report::Dispatcher::Syslog 1.27 Log::Report::Dispatcher::Try 1.27 Log::Report::Domain 1.27 Log::Report::Exception 1.27 Log::Report::Message 1.27 Log::Report::Minimal 1.06 Log::Report::Minimal::Domain 1.06 Log::Report::Optional 1.06 Log::Report::Translator 1.27 Log::Report::Util 1.06 Logger::Simple 2.0 LWP 6.35 LWP::Authen::Basic 6.35 LWP::Authen::Digest 6.35 LWP::Authen::Ntlm 6.35 LWP::ConnCache 6.35 LWP::Debug 6.35 LWP::Debug unknown LWP::Debug::TraceHTTP 6.35 LWP::DebugFile 6.35 LWP::MediaTypes 6.02 LWP::MemberMixin 6.35 LWP::Protocol 6.35 LWP::Protocol::cpan 6.35 LWP::Protocol::data 6.35 LWP::Protocol::file 6.35 LWP::Protocol::ftp 6.35 LWP::Protocol::gopher 6.35 LWP::Protocol::http 6.35 LWP::Protocol::https 6.07 LWP::Protocol::loopback 6.35 LWP::Protocol::mailto 6.35 LWP::Protocol::nntp 6.35 LWP::Protocol::nogo 6.35 LWP::RobotUA 6.35 LWP::Simple 6.35 LWP::UserAgent 6.35 Mail::Address 2.20 Mail::Cap 2.20 Mail::Field 2.20 Mail::Field::AddrList 2.20 Mail::Field::Date 2.20 Mail::Field::Generic 2.20 Mail::Filter 2.20 Mail::Header 2.20 Mail::Internet 2.20 Mail::Mailer 2.20 Mail::Mailer::qmail 2.20 Mail::Mailer::rfc822 2.20 Mail::Mailer::sendmail 2.20 Mail::Mailer::smtp 2.20 Mail::Mailer::smtps 2.20 Mail::Mailer::testfile 2.20 Mail::Send 2.20 Mail::Util 2.20 MailTools 2.20 Math::Bezier 0.01 Math::BigFloat 1.999813 Math::BigInt 1.999813 Math::BigInt::Calc 1.999813 Math::BigInt::CalcEmu 1.999813 Math::BigInt::Lib 1.999813 Math::CDF 0.1 Math::Cephes 0.5305 Math::Cephes::Complex 0.5305 Math::Cephes::Fraction 0.5305 Math::Cephes::Matrix 0.5305 Math::Cephes::Polynomial 0.5305 Math::Combinatorics 0.09 Math::Counting 0.1305 Math::Derivative 1.01 Math::GSL::Linalg::SVD 0.0.2 Math::MatrixReal 2.13 Math::Prime::Util 0.70 Math::Prime::Util::ChaCha 0.70 Math::Prime::Util::ECAffinePoint 0.70 Math::Prime::Util::ECProjectivePoint 0.70 Math::Prime::Util::Entropy 0.70 Math::Prime::Util::GMP 0.50 Math::Prime::Util::MemFree 0.70 Math::Prime::Util::PP 0.70 Math::Prime::Util::PPFE unknown Math::Prime::Util::PrimalityProving 0.70 Math::Prime::Util::PrimeArray 0.70 Math::Prime::Util::PrimeIterator 0.70 Math::Prime::Util::RandomPrimes 0.70 Math::Prime::Util::ZetaBigFloat 0.70 Math::Random 0.72 Math::Random::ISAAC 1.004 Math::Random::ISAAC::PP 1.004 Math::Random::MT::Auto 6.22 Math::Random::MT::Auto::Range 6.22 Math::Round 0.07 Math::Spline 0.02 Math::Utils 1.12 Math::VecStat 0.08 Memoize::ExpireLRU 0.56 Menlo 1.9019 Menlo::Builder::Static unknown Menlo::CLI::Compat 1.9022 Menlo::Dependency unknown Menlo::Index::MetaCPAN unknown Menlo::Index::MetaDB 1.9019 Menlo::Index::Mirror unknown Menlo::Legacy 1.9022 Menlo::Util unknown Meta::Builder 0.003 Meta::Builder::Base unknown Meta::Builder::Util unknown metaclass 2.2011 Method::Generate::Accessor unknown Method::Generate::BuildAll unknown Method::Generate::Constructor unknown Method::Generate::DemolishAll unknown Method::Inliner unknown MIME::Charset 1.012.2 MIME::Charset::_Compat 1.003.1 MIME::Charset::UTF 1.010 MIME::Type 2.17 MIME::Types 2.17 Mixin::Linewise 0.108 Mixin::Linewise::Readers 0.108 Mixin::Linewise::Writers 0.108 Mock::Config 0.03 Modern::Perl 1.20180701 Module::AutoInstall 1.19 Module::Build 0.4224 Module::Build::Base 0.4224 Module::Build::Compat 0.4224 Module::Build::Config 0.4224 Module::Build::ConfigData unknown Module::Build::Cookbook 0.4224 Module::Build::Dumper 0.4224 Module::Build::Notes 0.4224 Module::Build::Platform::aix 0.4224 Module::Build::Platform::cygwin 0.4224 Module::Build::Platform::darwin 0.4224 Module::Build::Platform::Default 0.4224 Module::Build::Platform::MacOS 0.4224 Module::Build::Platform::os2 0.4224 Module::Build::Platform::Unix 0.4224 Module::Build::Platform::VMS 0.4224 Module::Build::Platform::VOS 0.4224 Module::Build::Platform::Windows 0.4224 Module::Build::PodParser 0.4224 Module::Build::PPMMaker 0.4224 Module::Build::Tiny 0.039 Module::Build::XSUtil 0.19 Module::Compile 0.37 Module::Compile::Opt unknown Module::CPANfile 1.1004 Module::CPANfile::Environment unknown Module::CPANfile::Prereq unknown Module::CPANfile::Prereqs unknown Module::CPANfile::Requirement unknown Module::Faker 0.020 Module::Faker::Appendix 0.020 Module::Faker::Dist 0.020 Module::Faker::File 0.020 Module::Faker::Heavy 0.020 Module::Faker::Module 0.020 Module::Faker::Package 0.020 Module::Find 0.13 Module::Implementation 0.09 Module::Install 1.19 Module::Install::Admin 1.19 Module::Install::Admin::Bundle 1.19 Module::Install::Admin::Compiler 1.19 Module::Install::Admin::Find 1.19 Module::Install::Admin::Include 1.19 Module::Install::Admin::Makefile 1.19 Module::Install::Admin::Manifest 1.19 Module::Install::Admin::Metadata 1.19 Module::Install::Admin::ScanDeps 1.19 Module::Install::Admin::WriteAll 1.19 Module::Install::AutoInstall 1.19 Module::Install::Base 1.19 Module::Install::Bundle 1.19 Module::Install::Can 1.19 Module::Install::Catalyst unknown Module::Install::Compiler 1.19 Module::Install::Deprecated 1.19 Module::Install::DSL 1.19 Module::Install::External 1.19 Module::Install::Fetch 1.19 Module::Install::Include 1.19 Module::Install::Inline 1.19 Module::Install::Makefile 1.19 Module::Install::MakeMaker 1.19 Module::Install::Metadata 1.19 Module::Install::PAR 1.19 Module::Install::Run 1.19 Module::Install::Scripts 1.19 Module::Install::Share 1.19 Module::Install::Win32 1.19 Module::Install::With 1.19 Module::Install::WriteAll 1.19 Module::Optimize unknown Module::Path 0.19 Module::Pluggable 5.2 Module::Pluggable::Object 5.2 Module::Runtime 0.016 Module::Runtime::Conflicts 0.003 Module::ScanDeps 1.24 Module::ScanDeps::Cache unknown Module::Util 1.09 Mojo unknown Mojo::Asset unknown Mojo::Asset::File unknown Mojo::Asset::Memory unknown Mojo::Base unknown Mojo::ByteStream unknown Mojo::Cache unknown Mojo::Collection unknown Mojo::Content unknown Mojo::Content::MultiPart unknown Mojo::Content::Single unknown Mojo::Cookie unknown Mojo::Cookie::Request unknown Mojo::Cookie::Response unknown Mojo::Date unknown Mojo::DOM unknown Mojo::DOM::CSS unknown Mojo::DOM::HTML unknown Mojo::EventEmitter unknown Mojo::Exception unknown Mojo::File unknown Mojo::Headers unknown Mojo::HelloWorld unknown Mojo::Home unknown Mojo::IOLoop unknown Mojo::IOLoop::Client unknown Mojo::IOLoop::Delay unknown Mojo::IOLoop::Server unknown Mojo::IOLoop::Stream unknown Mojo::IOLoop::Stream::HTTPClient unknown Mojo::IOLoop::Stream::HTTPServer unknown Mojo::IOLoop::Stream::WebSocketClient unknown Mojo::IOLoop::Stream::WebSocketServer unknown Mojo::IOLoop::Subprocess unknown Mojo::IOLoop::TLS unknown Mojo::JSON unknown Mojo::JSON::Pointer unknown Mojo::Loader unknown Mojo::Log unknown Mojo::Message unknown Mojo::Message::Request unknown Mojo::Message::Response unknown Mojo::Parameters unknown Mojo::Path unknown Mojo::Promise unknown Mojo::Reactor unknown Mojo::Reactor::EV unknown Mojo::Reactor::Poll unknown Mojo::Server unknown Mojo::Server::CGI unknown Mojo::Server::Daemon unknown Mojo::Server::Hypnotoad unknown Mojo::Server::Morbo unknown Mojo::Server::Morbo::Backend unknown Mojo::Server::Morbo::Backend::Poll unknown Mojo::Server::Prefork unknown Mojo::Server::PSGI unknown Mojo::Template unknown Mojo::Transaction unknown Mojo::Transaction::HTTP unknown Mojo::Transaction::WebSocket unknown Mojo::Upload unknown Mojo::URL unknown Mojo::UserAgent unknown Mojo::UserAgent::CookieJar unknown Mojo::UserAgent::Proxy unknown Mojo::UserAgent::Server unknown Mojo::UserAgent::Transactor unknown Mojo::Util unknown Mojo::WebSocket unknown Mojolicious 7.88 Mojolicious::Command unknown Mojolicious::Command::cgi unknown Mojolicious::Command::cpanify unknown Mojolicious::Command::daemon unknown Mojolicious::Command::eval unknown Mojolicious::Command::generate unknown Mojolicious::Command::generate::app unknown Mojolicious::Command::generate::lite_app unknown Mojolicious::Command::generate::makefile unknown Mojolicious::Command::generate::plugin 0.01 Mojolicious::Command::get unknown Mojolicious::Command::inflate unknown Mojolicious::Command::prefork unknown Mojolicious::Command::psgi unknown Mojolicious::Command::routes unknown Mojolicious::Command::test unknown Mojolicious::Command::version unknown Mojolicious::Commands unknown Mojolicious::Controller unknown Mojolicious::Lite unknown Mojolicious::Plugin unknown Mojolicious::Plugin::Config unknown Mojolicious::Plugin::DefaultHelpers unknown Mojolicious::Plugin::EPLRenderer unknown Mojolicious::Plugin::EPRenderer unknown Mojolicious::Plugin::HeaderCondition unknown Mojolicious::Plugin::JSONConfig unknown Mojolicious::Plugin::Mount unknown Mojolicious::Plugin::PODRenderer unknown Mojolicious::Plugin::TagHelpers unknown Mojolicious::Plugins unknown Mojolicious::Renderer unknown Mojolicious::Routes unknown Mojolicious::Routes::Match unknown Mojolicious::Routes::Pattern unknown Mojolicious::Routes::Route unknown Mojolicious::Sessions unknown Mojolicious::Static unknown Mojolicious::Types unknown Mojolicious::Validator unknown Mojolicious::Validator::Validation unknown MojoX::Log::Report 1.27 MojoX::MIME::Types 2.17 Moo 2.001001 Moo::_mro unknown Moo::_strictures unknown Moo::_Utils unknown Moo::HandleMoose unknown Moo::HandleMoose::_TypeMap unknown Moo::HandleMoose::FakeMetaClass unknown Moo::Object unknown Moo::Role 2.001001 Moo::sification unknown Moose 2.2011 Moose::Autobox 0.16 Moose::Autobox::Array 0.16 Moose::Autobox::Code 0.16 Moose::Autobox::Defined 0.16 Moose::Autobox::Hash 0.16 Moose::Autobox::Indexed 0.16 Moose::Autobox::Item 0.16 Moose::Autobox::List 0.16 Moose::Autobox::Number 0.16 Moose::Autobox::Ref 0.16 Moose::Autobox::Scalar 0.16 Moose::Autobox::String 0.16 Moose::Autobox::Undef 0.16 Moose::Autobox::Value 0.16 Moose::Deprecated 2.2011 Moose::Exception 2.2011 Moose::Exception::AccessorMustReadWrite 2.2011 Moose::Exception::AddParameterizableTypeTakesParameterizableType 2.2011 Moose::Exception::AddRoleTakesAMooseMetaRoleInstance 2.2011 Moose::Exception::AddRoleToARoleTakesAMooseMetaRole 2.2011 Moose::Exception::ApplyTakesABlessedInstance 2.2011 Moose::Exception::AttachToClassNeedsAClassMOPClassInstanceOrASubclass 2.2011 Moose::Exception::AttributeConflictInRoles 2.2011 Moose::Exception::AttributeConflictInSummation 2.2011 Moose::Exception::AttributeExtensionIsNotSupportedInRoles 2.2011 Moose::Exception::AttributeIsRequired 2.2011 Moose::Exception::AttributeMustBeAnClassMOPMixinAttributeCoreOrSubclass 2.2011 Moose::Exception::AttributeNamesDoNotMatch 2.2011 Moose::Exception::AttributeValueIsNotAnObject 2.2011 Moose::Exception::AttributeValueIsNotDefined 2.2011 Moose::Exception::AutoDeRefNeedsArrayRefOrHashRef 2.2011 Moose::Exception::BadOptionFormat 2.2011 Moose::Exception::BothBuilderAndDefaultAreNotAllowed 2.2011 Moose::Exception::BuilderDoesNotExist 2.2011 Moose::Exception::BuilderMethodNotSupportedForAttribute 2.2011 Moose::Exception::BuilderMethodNotSupportedForInlineAttribute 2.2011 Moose::Exception::BuilderMustBeAMethodName 2.2011 Moose::Exception::CallingMethodOnAnImmutableInstance 2.2011 Moose::Exception::CallingReadOnlyMethodOnAnImmutableInstance 2.2011 Moose::Exception::CanExtendOnlyClasses 2.2011 Moose::Exception::CannotAddAdditionalTypeCoercionsToUnion 2.2011 Moose::Exception::CannotAddAsAnAttributeToARole 2.2011 Moose::Exception::CannotApplyBaseClassRolesToRole 2.2011 Moose::Exception::CannotAssignValueToReadOnlyAccessor 2.2011 Moose::Exception::CannotAugmentIfLocalMethodPresent 2.2011 Moose::Exception::CannotAugmentNoSuperMethod 2.2011 Moose::Exception::CannotAutoDereferenceTypeConstraint 2.2011 Moose::Exception::CannotAutoDerefWithoutIsa 2.2011 Moose::Exception::CannotCalculateNativeType 2.2011 Moose::Exception::CannotCallAnAbstractBaseMethod 2.2011 Moose::Exception::CannotCallAnAbstractMethod 2.2011 Moose::Exception::CannotCoerceAttributeWhichHasNoCoercion 2.2011 Moose::Exception::CannotCoerceAWeakRef 2.2011 Moose::Exception::CannotCreateHigherOrderTypeWithoutATypeParameter 2.2011 Moose::Exception::CannotCreateMethodAliasLocalMethodIsPresent 2.2011 Moose::Exception::CannotCreateMethodAliasLocalMethodIsPresentInClass 2.2011 Moose::Exception::CannotDelegateLocalMethodIsPresent 2.2011 Moose::Exception::CannotDelegateWithoutIsa 2.2011 Moose::Exception::CannotFindDelegateMetaclass 2.2011 Moose::Exception::CannotFindType 2.2011 Moose::Exception::CannotFindTypeGivenToMatchOnType 2.2011 Moose::Exception::CannotFixMetaclassCompatibility 2.2011 Moose::Exception::CannotGenerateInlineConstraint 2.2011 Moose::Exception::CannotInitializeMooseMetaRoleComposite 2.2011 Moose::Exception::CannotInlineTypeConstraintCheck 2.2011 Moose::Exception::CannotLocatePackageInINC 2.2011 Moose::Exception::CannotMakeMetaclassCompatible 2.2011 Moose::Exception::CannotOverrideALocalMethod 2.2011 Moose::Exception::CannotOverrideBodyOfMetaMethods 2.2011 Moose::Exception::CannotOverrideLocalMethodIsPresent 2.2011 Moose::Exception::CannotOverrideNoSuperMethod 2.2011 Moose::Exception::CannotRegisterUnnamedTypeConstraint 2.2011 Moose::Exception::CannotUseLazyBuildAndDefaultSimultaneously 2.2011 Moose::Exception::CanOnlyConsumeRole 2.2011 Moose::Exception::CanOnlyWrapBlessedCode 2.2011 Moose::Exception::CanReblessOnlyIntoASubclass 2.2011 Moose::Exception::CanReblessOnlyIntoASuperclass 2.2011 Moose::Exception::CircularReferenceInAlso 2.2011 Moose::Exception::ClassDoesNotHaveInitMeta 2.2011 Moose::Exception::ClassDoesTheExcludedRole 2.2011 Moose::Exception::ClassNamesDoNotMatch 2.2011 Moose::Exception::CloneObjectExpectsAnInstanceOfMetaclass 2.2011 Moose::Exception::CodeBlockMustBeACodeRef 2.2011 Moose::Exception::CoercingWithoutCoercions 2.2011 Moose::Exception::CoercionAlreadyExists 2.2011 Moose::Exception::CoercionNeedsTypeConstraint 2.2011 Moose::Exception::ConflictDetectedInCheckRoleExclusions 2.2011 Moose::Exception::ConflictDetectedInCheckRoleExclusionsInToClass 2.2011 Moose::Exception::ConstructClassInstanceTakesPackageName 2.2011 Moose::Exception::CouldNotCreateMethod 2.2011 Moose::Exception::CouldNotCreateWriter 2.2011 Moose::Exception::CouldNotEvalConstructor 2.2011 Moose::Exception::CouldNotEvalDestructor 2.2011 Moose::Exception::CouldNotFindTypeConstraintToCoerceFrom 2.2011 Moose::Exception::CouldNotGenerateInlineAttributeMethod 2.2011 Moose::Exception::CouldNotLocateTypeConstraintForUnion 2.2011 Moose::Exception::CouldNotParseType 2.2011 Moose::Exception::CreateMOPClassTakesArrayRefOfAttributes 2.2011 Moose::Exception::CreateMOPClassTakesArrayRefOfSuperclasses 2.2011 Moose::Exception::CreateMOPClassTakesHashRefOfMethods 2.2011 Moose::Exception::CreateTakesArrayRefOfRoles 2.2011 Moose::Exception::CreateTakesHashRefOfAttributes 2.2011 Moose::Exception::CreateTakesHashRefOfMethods 2.2011 Moose::Exception::DefaultToMatchOnTypeMustBeCodeRef 2.2011 Moose::Exception::DelegationToAClassWhichIsNotLoaded 2.2011 Moose::Exception::DelegationToARoleWhichIsNotLoaded 2.2011 Moose::Exception::DelegationToATypeWhichIsNotAClass 2.2011 Moose::Exception::DoesRequiresRoleName 2.2011 Moose::Exception::EnumCalledWithAnArrayRefAndAdditionalArgs 2.2011 Moose::Exception::EnumValuesMustBeString 2.2011 Moose::Exception::ExtendsMissingArgs 2.2011 Moose::Exception::HandlesMustBeAHashRef 2.2011 Moose::Exception::IllegalInheritedOptions 2.2011 Moose::Exception::IllegalMethodTypeToAddMethodModifier 2.2011 Moose::Exception::IncompatibleMetaclassOfSuperclass 2.2011 Moose::Exception::InitializeTakesUnBlessedPackageName 2.2011 Moose::Exception::InitMetaRequiresClass 2.2011 Moose::Exception::InstanceBlessedIntoWrongClass 2.2011 Moose::Exception::InstanceMustBeABlessedReference 2.2011 Moose::Exception::InvalidArgPassedToMooseUtilMetaRole 2.2011 Moose::Exception::InvalidArgumentsToTraitAliases 2.2011 Moose::Exception::InvalidArgumentToMethod 2.2011 Moose::Exception::InvalidBaseTypeGivenToCreateParameterizedTypeConstraint 2.2011 Moose::Exception::InvalidHandleValue 2.2011 Moose::Exception::InvalidHasProvidedInARole 2.2011 Moose::Exception::InvalidNameForType 2.2011 Moose::Exception::InvalidOverloadOperator 2.2011 Moose::Exception::InvalidRoleApplication 2.2011 Moose::Exception::InvalidTypeConstraint 2.2011 Moose::Exception::InvalidTypeGivenToCreateParameterizedTypeConstraint 2.2011 Moose::Exception::InvalidValueForIs 2.2011 Moose::Exception::IsaDoesNotDoTheRole 2.2011 Moose::Exception::IsaLacksDoesMethod 2.2011 Moose::Exception::LazyAttributeNeedsADefault 2.2011 Moose::Exception::Legacy 2.2011 Moose::Exception::MatchActionMustBeACodeRef 2.2011 Moose::Exception::MessageParameterMustBeCodeRef 2.2011 Moose::Exception::MetaclassIsAClassNotASubclassOfGivenMetaclass 2.2011 Moose::Exception::MetaclassIsARoleNotASubclassOfGivenMetaclass 2.2011 Moose::Exception::MetaclassIsNotASubclassOfGivenMetaclass 2.2011 Moose::Exception::MetaclassMustBeASubclassOfMooseMetaClass 2.2011 Moose::Exception::MetaclassMustBeASubclassOfMooseMetaRole 2.2011 Moose::Exception::MetaclassMustBeDerivedFromClassMOPClass 2.2011 Moose::Exception::MetaclassNotLoaded 2.2011 Moose::Exception::MetaclassTypeIncompatible 2.2011 Moose::Exception::MethodExpectedAMetaclassObject 2.2011 Moose::Exception::MethodExpectsFewerArgs 2.2011 Moose::Exception::MethodExpectsMoreArgs 2.2011 Moose::Exception::MethodModifierNeedsMethodName 2.2011 Moose::Exception::MethodNameConflictInRoles 2.2011 Moose::Exception::MethodNameNotFoundInInheritanceHierarchy 2.2011 Moose::Exception::MethodNameNotGiven 2.2011 Moose::Exception::MOPAttributeNewNeedsAttributeName 2.2011 Moose::Exception::MustDefineAMethodName 2.2011 Moose::Exception::MustDefineAnAttributeName 2.2011 Moose::Exception::MustDefineAnOverloadOperator 2.2011 Moose::Exception::MustHaveAtLeastOneValueToEnumerate 2.2011 Moose::Exception::MustPassAHashOfOptions 2.2011 Moose::Exception::MustPassAMooseMetaRoleInstanceOrSubclass 2.2011 Moose::Exception::MustPassAPackageNameOrAnExistingClassMOPPackageInstance 2.2011 Moose::Exception::MustPassEvenNumberOfArguments 2.2011 Moose::Exception::MustPassEvenNumberOfAttributeOptions 2.2011 Moose::Exception::MustProvideANameForTheAttribute 2.2011 Moose::Exception::MustSpecifyAtleastOneMethod 2.2011 Moose::Exception::MustSpecifyAtleastOneRole 2.2011 Moose::Exception::MustSpecifyAtleastOneRoleToApplicant 2.2011 Moose::Exception::MustSupplyAClassMOPAttributeInstance 2.2011 Moose::Exception::MustSupplyADelegateToMethod 2.2011 Moose::Exception::MustSupplyAMetaclass 2.2011 Moose::Exception::MustSupplyAMooseMetaAttributeInstance 2.2011 Moose::Exception::MustSupplyAnAccessorTypeToConstructWith 2.2011 Moose::Exception::MustSupplyAnAttributeToConstructWith 2.2011 Moose::Exception::MustSupplyArrayRefAsCurriedArguments 2.2011 Moose::Exception::MustSupplyPackageNameAndName 2.2011 Moose::Exception::NeedsTypeConstraintUnionForTypeCoercionUnion 2.2011 Moose::Exception::NeitherAttributeNorAttributeNameIsGiven 2.2011 Moose::Exception::NeitherClassNorClassNameIsGiven 2.2011 Moose::Exception::NeitherRoleNorRoleNameIsGiven 2.2011 Moose::Exception::NeitherTypeNorTypeNameIsGiven 2.2011 Moose::Exception::NoAttributeFoundInSuperClass 2.2011 Moose::Exception::NoBodyToInitializeInAnAbstractBaseClass 2.2011 Moose::Exception::NoCasesMatched 2.2011 Moose::Exception::NoConstraintCheckForTypeConstraint 2.2011 Moose::Exception::NoDestructorClassSpecified 2.2011 Moose::Exception::NoImmutableTraitSpecifiedForClass 2.2011 Moose::Exception::NoParentGivenToSubtype 2.2011 Moose::Exception::OnlyInstancesCanBeCloned 2.2011 Moose::Exception::OperatorIsRequired 2.2011 Moose::Exception::OverloadConflictInSummation 2.2011 Moose::Exception::OverloadRequiresAMetaClass 2.2011 Moose::Exception::OverloadRequiresAMetaMethod 2.2011 Moose::Exception::OverloadRequiresAMetaOverload 2.2011 Moose::Exception::OverloadRequiresAMethodNameOrCoderef 2.2011 Moose::Exception::OverloadRequiresAnOperator 2.2011 Moose::Exception::OverloadRequiresNamesForCoderef 2.2011 Moose::Exception::OverrideConflictInComposition 2.2011 Moose::Exception::OverrideConflictInSummation 2.2011 Moose::Exception::PackageDoesNotUseMooseExporter 2.2011 Moose::Exception::PackageNameAndNameParamsNotGivenToWrap 2.2011 Moose::Exception::PackagesAndModulesAreNotCachable 2.2011 Moose::Exception::ParameterIsNotSubtypeOfParent 2.2011 Moose::Exception::ReferencesAreNotAllowedAsDefault 2.2011 Moose::Exception::RequiredAttributeLacksInitialization 2.2011 Moose::Exception::RequiredAttributeNeedsADefault 2.2011 Moose::Exception::RequiredMethodsImportedByClass 2.2011 Moose::Exception::RequiredMethodsNotImplementedByClass 2.2011 Moose::Exception::Role::Attribute 2.2011 Moose::Exception::Role::AttributeName 2.2011 Moose::Exception::Role::Class 2.2011 Moose::Exception::Role::EitherAttributeOrAttributeName 2.2011 Moose::Exception::Role::Instance 2.2011 Moose::Exception::Role::InstanceClass 2.2011 Moose::Exception::Role::InvalidAttributeOptions 2.2011 Moose::Exception::Role::Method 2.2011 Moose::Exception::Role::ParamsHash 2.2011 Moose::Exception::Role::Role 2.2011 Moose::Exception::Role::RoleForCreate 2.2011 Moose::Exception::Role::RoleForCreateMOPClass 2.2011 Moose::Exception::Role::TypeConstraint 2.2011 Moose::Exception::RoleDoesTheExcludedRole 2.2011 Moose::Exception::RoleExclusionConflict 2.2011 Moose::Exception::RoleNameRequired 2.2011 Moose::Exception::RoleNameRequiredForMooseMetaRole 2.2011 Moose::Exception::RolesDoNotSupportAugment 2.2011 Moose::Exception::RolesDoNotSupportExtends 2.2011 Moose::Exception::RolesDoNotSupportInner 2.2011 Moose::Exception::RolesDoNotSupportRegexReferencesForMethodModifiers 2.2011 Moose::Exception::RolesInCreateTakesAnArrayRef 2.2011 Moose::Exception::RolesListMustBeInstancesOfMooseMetaRole 2.2011 Moose::Exception::SingleParamsToNewMustBeHashRef 2.2011 Moose::Exception::TriggerMustBeACodeRef 2.2011 Moose::Exception::TypeConstraintCannotBeUsedForAParameterizableType 2.2011 Moose::Exception::TypeConstraintIsAlreadyCreated 2.2011 Moose::Exception::TypeParameterMustBeMooseMetaType 2.2011 Moose::Exception::UnableToCanonicalizeHandles 2.2011 Moose::Exception::UnableToCanonicalizeNonRolePackage 2.2011 Moose::Exception::UnableToRecognizeDelegateMetaclass 2.2011 Moose::Exception::UndefinedHashKeysPassedToMethod 2.2011 Moose::Exception::UnionCalledWithAnArrayRefAndAdditionalArgs 2.2011 Moose::Exception::UnionTakesAtleastTwoTypeNames 2.2011 Moose::Exception::ValidationFailedForInlineTypeConstraint 2.2011 Moose::Exception::ValidationFailedForTypeConstraint 2.2011 Moose::Exception::WrapTakesACodeRefToBless 2.2011 Moose::Exception::WrongTypeConstraintGiven 2.2011 Moose::Exporter 2.2011 Moose::Meta::Attribute 2.2011 Moose::Meta::Attribute::Native 2.2011 Moose::Meta::Attribute::Native::Trait 2.2011 Moose::Meta::Attribute::Native::Trait::Array 2.2011 Moose::Meta::Attribute::Native::Trait::Bool 2.2011 Moose::Meta::Attribute::Native::Trait::Code 2.2011 Moose::Meta::Attribute::Native::Trait::Counter 2.2011 Moose::Meta::Attribute::Native::Trait::Hash 2.2011 Moose::Meta::Attribute::Native::Trait::Number 2.2011 Moose::Meta::Attribute::Native::Trait::String 2.2011 Moose::Meta::Class 2.2011 Moose::Meta::Class::Immutable::Trait 2.2011 Moose::Meta::Instance 2.2011 Moose::Meta::Method 2.2011 Moose::Meta::Method::Accessor 2.2011 Moose::Meta::Method::Accessor::Native 2.2011 Moose::Meta::Method::Accessor::Native::Array 2.2011 Moose::Meta::Method::Accessor::Native::Array::accessor 2.2011 Moose::Meta::Method::Accessor::Native::Array::clear 2.2011 Moose::Meta::Method::Accessor::Native::Array::count 2.2011 Moose::Meta::Method::Accessor::Native::Array::delete 2.2011 Moose::Meta::Method::Accessor::Native::Array::elements 2.2011 Moose::Meta::Method::Accessor::Native::Array::first 2.2011 Moose::Meta::Method::Accessor::Native::Array::first_index 2.2011 Moose::Meta::Method::Accessor::Native::Array::get 2.2011 Moose::Meta::Method::Accessor::Native::Array::grep 2.2011 Moose::Meta::Method::Accessor::Native::Array::insert 2.2011 Moose::Meta::Method::Accessor::Native::Array::is_empty 2.2011 Moose::Meta::Method::Accessor::Native::Array::join 2.2011 Moose::Meta::Method::Accessor::Native::Array::map 2.2011 Moose::Meta::Method::Accessor::Native::Array::natatime 2.2011 Moose::Meta::Method::Accessor::Native::Array::pop 2.2011 Moose::Meta::Method::Accessor::Native::Array::push 2.2011 Moose::Meta::Method::Accessor::Native::Array::reduce 2.2011 Moose::Meta::Method::Accessor::Native::Array::set 2.2011 Moose::Meta::Method::Accessor::Native::Array::shallow_clone 2.2011 Moose::Meta::Method::Accessor::Native::Array::shift 2.2011 Moose::Meta::Method::Accessor::Native::Array::shuffle 2.2011 Moose::Meta::Method::Accessor::Native::Array::sort 2.2011 Moose::Meta::Method::Accessor::Native::Array::sort_in_place 2.2011 Moose::Meta::Method::Accessor::Native::Array::splice 2.2011 Moose::Meta::Method::Accessor::Native::Array::uniq 2.2011 Moose::Meta::Method::Accessor::Native::Array::unshift 2.2011 Moose::Meta::Method::Accessor::Native::Array::Writer 2.2011 Moose::Meta::Method::Accessor::Native::Bool::not 2.2011 Moose::Meta::Method::Accessor::Native::Bool::set 2.2011 Moose::Meta::Method::Accessor::Native::Bool::toggle 2.2011 Moose::Meta::Method::Accessor::Native::Bool::unset 2.2011 Moose::Meta::Method::Accessor::Native::Code::execute 2.2011 Moose::Meta::Method::Accessor::Native::Code::execute_method 2.2011 Moose::Meta::Method::Accessor::Native::Collection 2.2011 Moose::Meta::Method::Accessor::Native::Counter::dec 2.2011 Moose::Meta::Method::Accessor::Native::Counter::inc 2.2011 Moose::Meta::Method::Accessor::Native::Counter::reset 2.2011 Moose::Meta::Method::Accessor::Native::Counter::set 2.2011 Moose::Meta::Method::Accessor::Native::Counter::Writer 2.2011 Moose::Meta::Method::Accessor::Native::Hash 2.2011 Moose::Meta::Method::Accessor::Native::Hash::accessor 2.2011 Moose::Meta::Method::Accessor::Native::Hash::clear 2.2011 Moose::Meta::Method::Accessor::Native::Hash::count 2.2011 Moose::Meta::Method::Accessor::Native::Hash::defined 2.2011 Moose::Meta::Method::Accessor::Native::Hash::delete 2.2011 Moose::Meta::Method::Accessor::Native::Hash::elements 2.2011 Moose::Meta::Method::Accessor::Native::Hash::exists 2.2011 Moose::Meta::Method::Accessor::Native::Hash::get 2.2011 Moose::Meta::Method::Accessor::Native::Hash::is_empty 2.2011 Moose::Meta::Method::Accessor::Native::Hash::keys 2.2011 Moose::Meta::Method::Accessor::Native::Hash::kv 2.2011 Moose::Meta::Method::Accessor::Native::Hash::set 2.2011 Moose::Meta::Method::Accessor::Native::Hash::shallow_clone 2.2011 Moose::Meta::Method::Accessor::Native::Hash::values 2.2011 Moose::Meta::Method::Accessor::Native::Hash::Writer 2.2011 Moose::Meta::Method::Accessor::Native::Number::abs 2.2011 Moose::Meta::Method::Accessor::Native::Number::add 2.2011 Moose::Meta::Method::Accessor::Native::Number::div 2.2011 Moose::Meta::Method::Accessor::Native::Number::mod 2.2011 Moose::Meta::Method::Accessor::Native::Number::mul 2.2011 Moose::Meta::Method::Accessor::Native::Number::set 2.2011 Moose::Meta::Method::Accessor::Native::Number::sub 2.2011 Moose::Meta::Method::Accessor::Native::Reader 2.2011 Moose::Meta::Method::Accessor::Native::String::append 2.2011 Moose::Meta::Method::Accessor::Native::String::chomp 2.2011 Moose::Meta::Method::Accessor::Native::String::chop 2.2011 Moose::Meta::Method::Accessor::Native::String::clear 2.2011 Moose::Meta::Method::Accessor::Native::String::inc 2.2011 Moose::Meta::Method::Accessor::Native::String::length 2.2011 Moose::Meta::Method::Accessor::Native::String::match 2.2011 Moose::Meta::Method::Accessor::Native::String::prepend 2.2011 Moose::Meta::Method::Accessor::Native::String::replace 2.2011 Moose::Meta::Method::Accessor::Native::String::substr 2.2011 Moose::Meta::Method::Accessor::Native::Writer 2.2011 Moose::Meta::Method::Augmented 2.2011 Moose::Meta::Method::Constructor 2.2011 Moose::Meta::Method::Delegation 2.2011 Moose::Meta::Method::Destructor 2.2011 Moose::Meta::Method::Meta 2.2011 Moose::Meta::Method::Overridden 2.2011 Moose::Meta::Mixin::AttributeCore 2.2011 Moose::Meta::Object::Trait 2.2011 Moose::Meta::Role 2.2011 Moose::Meta::Role::Application 2.2011 Moose::Meta::Role::Application::RoleSummation 2.2011 Moose::Meta::Role::Application::ToClass 2.2011 Moose::Meta::Role::Application::ToInstance 2.2011 Moose::Meta::Role::Application::ToRole 2.2011 Moose::Meta::Role::Attribute 2.2011 Moose::Meta::Role::Composite 2.2011 Moose::Meta::Role::Method 2.2011 Moose::Meta::Role::Method::Conflicting 2.2011 Moose::Meta::Role::Method::Required 2.2011 Moose::Meta::TypeCoercion 2.2011 Moose::Meta::TypeCoercion::Union 2.2011 Moose::Meta::TypeConstraint 2.2011 Moose::Meta::TypeConstraint::Class 2.2011 Moose::Meta::TypeConstraint::DuckType 2.2011 Moose::Meta::TypeConstraint::Enum 2.2011 Moose::Meta::TypeConstraint::Parameterizable 2.2011 Moose::Meta::TypeConstraint::Parameterized 2.2011 Moose::Meta::TypeConstraint::Registry 2.2011 Moose::Meta::TypeConstraint::Role 2.2011 Moose::Meta::TypeConstraint::Union 2.2011 Moose::Object 2.2011 Moose::Role 2.2011 Moose::Util 2.2011 Moose::Util::MetaRole 2.2011 Moose::Util::TypeConstraints 2.2011 Moose::Util::TypeConstraints::Builtins 2.2011 MooseX::Adopt::Class::Accessor::Fast 0.009032 MooseX::Aliases 0.11 MooseX::Aliases::Meta::Trait::Attribute 0.11 MooseX::Aliases::Meta::Trait::Class 0.11 MooseX::Aliases::Meta::Trait::Method 0.11 MooseX::Aliases::Meta::Trait::Role 0.11 MooseX::Aliases::Meta::Trait::Role::ApplicationToClass 0.11 MooseX::Aliases::Meta::Trait::Role::ApplicationToRole 0.11 MooseX::Aliases::Meta::Trait::Role::Composite 0.11 MooseX::App::Cmd 0.32 MooseX::App::Cmd::Command 0.32 MooseX::ArrayRef 0.005 MooseX::ArrayRef::Meta::Class 0.005 MooseX::ArrayRef::Meta::Instance 0.005 MooseX::ClassAttribute 0.29 MooseX::ClassAttribute::Meta::Role::Attribute 0.29 MooseX::ClassAttribute::Trait::Application 0.29 MooseX::ClassAttribute::Trait::Application::ToClass 0.29 MooseX::ClassAttribute::Trait::Application::ToRole 0.29 MooseX::ClassAttribute::Trait::Attribute 0.29 MooseX::ClassAttribute::Trait::Class 0.29 MooseX::ClassAttribute::Trait::Mixin::HasClassAttributes 0.29 MooseX::ClassAttribute::Trait::Role 0.29 MooseX::ClassAttribute::Trait::Role::Composite 0.29 MooseX::Clone 0.06 MooseX::Clone::Meta::Attribute::Trait::Clone 0.06 MooseX::Clone::Meta::Attribute::Trait::Clone::Base 0.06 MooseX::Clone::Meta::Attribute::Trait::Clone::Std 0.06 MooseX::Clone::Meta::Attribute::Trait::Copy 0.06 MooseX::Clone::Meta::Attribute::Trait::NoClone 0.06 MooseX::Clone::Meta::Attribute::Trait::StorableClone 0.06 MooseX::ConfigFromFile 0.14 MooseX::Configuration 0.02 MooseX::Configuration::Trait::Attribute 0.02 MooseX::Configuration::Trait::Attribute::ConfigKey 0.02 MooseX::Configuration::Trait::Object 0.02 MooseX::Daemonize 0.21 MooseX::Daemonize::Core 0.21 MooseX::Daemonize::Pid 0.21 MooseX::Daemonize::Pid::File 0.21 MooseX::Daemonize::WithPidFile 0.21 MooseX::Declare 0.43 MooseX::Declare::Context 0.43 MooseX::Declare::Context::Namespaced 0.43 MooseX::Declare::Context::Parameterized 0.43 MooseX::Declare::Syntax::EmptyBlockIfMissing 0.43 MooseX::Declare::Syntax::Extending 0.43 MooseX::Declare::Syntax::InnerSyntaxHandling 0.43 MooseX::Declare::Syntax::Keyword::Class 0.43 MooseX::Declare::Syntax::Keyword::Clean 0.43 MooseX::Declare::Syntax::Keyword::Method 0.43 MooseX::Declare::Syntax::Keyword::MethodModifier 0.43 MooseX::Declare::Syntax::Keyword::Namespace 0.43 MooseX::Declare::Syntax::Keyword::Role 0.43 MooseX::Declare::Syntax::Keyword::With 0.43 MooseX::Declare::Syntax::KeywordHandling 0.43 MooseX::Declare::Syntax::MethodDeclaration 0.43 MooseX::Declare::Syntax::MooseSetup 0.43 MooseX::Declare::Syntax::NamespaceHandling 0.43 MooseX::Declare::Syntax::OptionHandling 0.43 MooseX::Declare::Syntax::RoleApplication 0.43 MooseX::Declare::Util 0.43 MooseX::Emulate::Class::Accessor::Fast 0.009032 MooseX::Emulate::Class::Accessor::Fast::Meta::Accessor unknown MooseX::Emulate::Class::Accessor::Fast::Meta::Role::Attribute unknown MooseX::Getopt 0.71 MooseX::Getopt::Basic 0.71 MooseX::Getopt::Dashes 0.71 MooseX::Getopt::GLD 0.71 MooseX::Getopt::Meta::Attribute 0.71 MooseX::Getopt::Meta::Attribute::NoGetopt 0.71 MooseX::Getopt::Meta::Attribute::Trait 0.71 MooseX::Getopt::Meta::Attribute::Trait::NoGetopt 0.71 MooseX::Getopt::OptionTypeMap 0.71 MooseX::Getopt::ProcessedArgv 0.71 MooseX::Getopt::Strict 0.71 MooseX::Getopt::Usage 0.24 MooseX::Getopt::Usage::Formatter 0.24 MooseX::Getopt::Usage::Pod::Text 0.24 MooseX::Getopt::Usage::Role::Man 0.24 MooseX::GlobRef 0.0701 MooseX::GlobRef::Object 0.0701 MooseX::GlobRef::Role::Meta::Instance 0.0701 MooseX::GlobRef::Role::Object 0.0701 MooseX::InsideOut 0.106 MooseX::InsideOut::Role::Meta::Instance 0.106 MooseX::Iterator 0.11 MooseX::Iterator::Array 0.11 MooseX::Iterator::Hash 0.11 MooseX::Iterator::Meta::Iterable 0.11 MooseX::Iterator::Role 0.11 MooseX::LazyLogDispatch 0.02 MooseX::LazyLogDispatch::Levels 0.02 MooseX::LazyRequire 0.11 MooseX::LazyRequire::Meta::Attribute::Trait::LazyRequire 0.11 MooseX::Log::Log4perl 0.47 MooseX::Log::Log4perl::Easy 0.47 MooseX::LogDispatch 1.2002 MooseX::LogDispatch::Levels unknown MooseX::MarkAsMethods 0.15 MooseX::Meta::TypeConstraint::ForceCoercion 0.01 MooseX::Method::Signatures 0.49 MooseX::Method::Signatures::Meta::Method 0.49 MooseX::Method::Signatures::Types 0.49 MooseX::MethodAttributes 0.31 MooseX::MethodAttributes::Inheritable 0.31 MooseX::MethodAttributes::Role 0.31 MooseX::MethodAttributes::Role::AttrContainer 0.31 MooseX::MethodAttributes::Role::AttrContainer::Inheritable 0.31 MooseX::MethodAttributes::Role::Meta::Class 0.31 MooseX::MethodAttributes::Role::Meta::Map 0.31 MooseX::MethodAttributes::Role::Meta::Method 0.31 MooseX::MethodAttributes::Role::Meta::Method::MaybeWrapped 0.31 MooseX::MethodAttributes::Role::Meta::Method::Wrapped 0.31 MooseX::MethodAttributes::Role::Meta::Role 0.31 MooseX::MethodAttributes::Role::Meta::Role::Application 0.31 MooseX::MethodAttributes::Role::Meta::Role::Application::Summation 0.31 MooseX::NonMoose 0.26 MooseX::NonMoose::InsideOut 0.26 MooseX::NonMoose::Meta::Role::Class 0.26 MooseX::NonMoose::Meta::Role::Constructor 0.26 MooseX::Object::Pluggable 0.0014 MooseX::OneArgNew 0.005 MooseX::Param 0.02 MooseX::Params::Validate 0.21 MooseX::Params::Validate::Exception::ValidationFailedForTypeConstraint 0.21 MooseX::POE 0.215 MooseX::POE::Aliased 0.215 MooseX::POE::Meta::Method::State 0.215 MooseX::POE::Meta::Role 0.215 MooseX::POE::Meta::Trait 0.215 MooseX::POE::Meta::Trait::Class 0.215 MooseX::POE::Meta::Trait::Instance 0.215 MooseX::POE::Meta::Trait::Object 0.215 MooseX::POE::Meta::Trait::SweetArgs 0.215 MooseX::POE::Role 0.215 MooseX::POE::SweetArgs 0.215 MooseX::RelatedClassRoles 0.004 MooseX::Role::Cmd 0.10 MooseX::Role::Cmd::Meta::Attribute::Trait unknown MooseX::Role::Parameterised 1.10 MooseX::Role::Parameterized 1.10 MooseX::Role::Parameterized::Meta::Role::Parameterized 1.10 MooseX::Role::Parameterized::Meta::Trait::Parameterizable 1.10 MooseX::Role::Parameterized::Meta::Trait::Parameterized 1.10 MooseX::Role::Parameterized::Parameters 1.10 MooseX::Role::TraitConstructor 0.01 MooseX::Role::WithOverloading 0.17 MooseX::Role::WithOverloading::Meta::Role 0.17 MooseX::Role::WithOverloading::Meta::Role::Application 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::Composite 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToClass 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToInstance 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToRole 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::FixOverloadedRefs 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::ToClass 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::ToInstance 0.17 MooseX::Role::WithOverloading::Meta::Role::Application::ToRole 0.17 MooseX::Role::WithOverloading::Meta::Role::Composite 0.17 MooseX::SemiAffordanceAccessor 0.10 MooseX::SemiAffordanceAccessor::Role::Attribute 0.10 MooseX::SetOnce 0.200002 MooseX::SimpleConfig 0.11 MooseX::Singleton 0.30 MooseX::Singleton::Role::Meta::Class 0.30 MooseX::Singleton::Role::Meta::Instance 0.30 MooseX::Singleton::Role::Meta::Method::Constructor 0.30 MooseX::Singleton::Role::Object 0.30 MooseX::Storage 0.52 MooseX::Storage::Base::WithChecksum 0.52 MooseX::Storage::Basic 0.52 MooseX::Storage::Deferred 0.52 MooseX::Storage::Engine 0.52 MooseX::Storage::Engine::IO::AtomicFile 0.52 MooseX::Storage::Engine::IO::File 0.52 MooseX::Storage::Engine::Trait::DisableCycleDetection 0.52 MooseX::Storage::Engine::Trait::OnlyWhenBuilt 0.52 MooseX::Storage::Format::JSON 0.52 MooseX::Storage::Format::Storable 0.52 MooseX::Storage::Format::YAML 0.52 MooseX::Storage::IO::AtomicFile 0.52 MooseX::Storage::IO::File 0.52 MooseX::Storage::IO::StorableFile 0.52 MooseX::Storage::Meta::Attribute::DoNotSerialize 0.52 MooseX::Storage::Meta::Attribute::Trait::DoNotSerialize 0.52 MooseX::Storage::Traits::DisableCycleDetection 0.52 MooseX::Storage::Traits::OnlyWhenBuilt 0.52 MooseX::Storage::Util 0.52 MooseX::StrictConstructor 0.21 MooseX::StrictConstructor::Trait::Class 0.21 MooseX::StrictConstructor::Trait::Method::Constructor 0.21 MooseX::Traits 0.13 MooseX::Traits::Pluggable 0.12 MooseX::Traits::Util 0.13 MooseX::Types 0.50 MooseX::Types::Base 0.50 MooseX::Types::CheckedUtilExports 0.50 MooseX::Types::Combine 0.50 MooseX::Types::Common 0.001014 MooseX::Types::Common::Numeric 0.001014 MooseX::Types::Common::String 0.001014 MooseX::Types::DateTime 0.13 MooseX::Types::LoadableClass 0.015 MooseX::Types::Moose 0.50 MooseX::Types::Path::Class 0.09 MooseX::Types::Path::Tiny 0.012 MooseX::Types::Perl 0.101343 MooseX::Types::Set::Object 0.05 MooseX::Types::Stringlike 0.003 MooseX::Types::Structured 0.36 MooseX::Types::TypeDecorator 0.50 MooseX::Types::UndefinedType 0.50 MooseX::Types::Util 0.50 MooseX::Types::Wrapper 0.50 MooseX::Workers 0.24 MooseX::Workers::Engine 0.24 MooseX::Workers::Job 0.24 MooX::HandlesVia 0.001008 MooX::Types::MooseLike 0.29 MooX::Types::MooseLike::Base 0.29 MooX::Types::MooseLike::Numeric 1.03 Mozilla::CA 20180117 MRO::Compat 0.13 multidimensional 0.014 MyApp::Schema 0.001 MyApplication::Form::User unknown MyPersonHandler 0.14 namespace::autoclean 0.28 namespace::clean 0.27 Net::Domain::TLD 1.75 Net::EmptyPort unknown Net::HTTP 6.18 Net::HTTP::Methods 6.18 Net::HTTP::NB 6.18 Net::HTTPS 6.18 Net::Server 2.009 Net::Server::Daemonize 0.06 Net::Server::Fork unknown Net::Server::HTTP unknown Net::Server::INET unknown Net::Server::Log::Log::Log4perl unknown Net::Server::Log::Sys::Syslog unknown Net::Server::Multiplex unknown Net::Server::MultiType unknown Net::Server::PreFork unknown Net::Server::PreForkSimple unknown Net::Server::Proto unknown Net::Server::Proto::SSL unknown Net::Server::Proto::SSLEAY unknown Net::Server::Proto::TCP unknown Net::Server::Proto::UDP unknown Net::Server::Proto::UNIX unknown Net::Server::Proto::UNIXDGRAM unknown Net::Server::PSGI unknown Net::Server::SIG 0.03 Net::Server::Single unknown Net::SSLeay 1.85 Net::SSLeay::Handle 0.61 NetAddr::IP 4.079 NetAddr::IP::InetBase 0.08 NetAddr::IP::Lite 1.57 NetAddr::IP::Util 1.53 NetAddr::IP::Util_IS 1 NetAddr::IP::UtilPP 1.09 ntheory 0.70 Number::Compare 0.03 Number::Format 1.75 Number::Misc 1.2 Obj 1.39 Object::InsideOut 4.04 Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut 4.04 Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut unknown Object::InsideOut::Exception 4.04 Object::InsideOut::Metadata 4.04 Object::InsideOut::Secure 4.04 Object::InsideOut::Util 4.04 Object::Signature 1.07 Object::Signature::File 1.07 ojo unknown ok 1.302138 OLE::Storage_Lite::PPS 0.19 oo unknown oose 2.2011 Package::DeprecationManager 0.17 Package::Stash 0.37 Package::Stash::PP 0.37 Package::Stash::XS 0.28 Package::Variant 1.003002 PadWalker 2.3 Parallel::ForkManager 1.20 Params::Util 1.07 Params::Validate 1.29 Params::Validate::Constants 1.29 Params::Validate::PP 1.29 Params::Validate::XS 1.29 Params::ValidationCompiler 0.27 Params::ValidationCompiler::Compiler 0.27 Params::ValidationCompiler::Exceptions 0.27 Parse::Method::Signatures 1.003019 Parse::Method::Signatures::Param unknown Parse::Method::Signatures::Param::Bindable unknown Parse::Method::Signatures::Param::Named unknown Parse::Method::Signatures::Param::Placeholder unknown Parse::Method::Signatures::Param::Positional unknown Parse::Method::Signatures::Param::Unpacked unknown Parse::Method::Signatures::Param::Unpacked::Array unknown Parse::Method::Signatures::Param::Unpacked::Hash unknown Parse::Method::Signatures::ParamCollection unknown Parse::Method::Signatures::Sig unknown Parse::Method::Signatures::TypeConstraint unknown Parse::Method::Signatures::Types unknown Parse::PMFile 0.41 Parse::RecDescent 1.967015 Parse::Yapp 1.21 Parse::Yapp::Driver 1.21 Parse::Yapp::Grammar unknown Parse::Yapp::Lalr unknown Parse::Yapp::Options unknown Parse::Yapp::Output unknown Parse::Yapp::Parse unknown Path::Class 0.37 Path::Class::Dir 0.37 Path::Class::Entity 0.37 Path::Class::File 0.37 Path::FindDev unknown Path::FindDev::Object unknown Path::IsDev 1.001003 Path::IsDev::Heuristic::Changelog 1.001003 Path::IsDev::Heuristic::DevDirMarker 1.001003 Path::IsDev::Heuristic::Makefile 1.001003 Path::IsDev::Heuristic::META 1.001003 Path::IsDev::Heuristic::MYMETA 1.001003 Path::IsDev::Heuristic::TestDir 1.001003 Path::IsDev::Heuristic::Tool::Dzil 1.001003 Path::IsDev::Heuristic::Tool::MakeMaker 1.001003 Path::IsDev::Heuristic::Tool::ModuleBuild 1.001003 Path::IsDev::Heuristic::VCS::Git 1.001003 Path::IsDev::HeuristicSet::Basic 1.001003 Path::IsDev::NegativeHeuristic::HomeDir 1.001003 Path::IsDev::NegativeHeuristic::IsDev::IgnoreFile 1.001003 Path::IsDev::NegativeHeuristic::PerlINC 1.001003 Path::IsDev::Object 1.001003 Path::IsDev::Result 1.001003 Path::IsDev::Role::Heuristic 1.001003 Path::IsDev::Role::HeuristicSet 1.001003 Path::IsDev::Role::HeuristicSet::Simple 1.001003 Path::IsDev::Role::Matcher::Child::BaseName::MatchRegexp 1.001003 Path::IsDev::Role::Matcher::Child::BaseName::MatchRegexp::File 1.001003 Path::IsDev::Role::Matcher::Child::Exists::Any 1.001003 Path::IsDev::Role::Matcher::Child::Exists::Any::Dir 1.001003 Path::IsDev::Role::Matcher::Child::Exists::Any::File 1.001003 Path::IsDev::Role::Matcher::FullPath::Is::Any 1.001003 Path::IsDev::Role::NegativeHeuristic 1.001003 Path::Tiny 0.106 PDL unknown PDL::Bad unknown PDL::Basic unknown PDL::CallExt unknown PDL::Char unknown PDL::Complex 2.009 PDL::Compression unknown PDL::Constants 0.02 PDL::Core 2.019 PDL::Core::Dev unknown PDL::Dbg unknown PDL::Demos::BAD2_demo unknown PDL::Demos::BAD_demo unknown PDL::Demos::Cartography_demo unknown PDL::Demos::General unknown PDL::Demos::Gnuplot_demo unknown PDL::Demos::PGPLOT_demo unknown PDL::Demos::PGPLOT_OO_demo unknown PDL::Demos::Prima unknown PDL::Demos::Routines unknown PDL::Demos::Transform_demo unknown PDL::Demos::TriD1 unknown PDL::Demos::TriD2 unknown PDL::Demos::TriDGallery unknown PDL::Doc::Config unknown PDL::Doc::Perldl unknown PDL::FFT unknown PDL::Filter::Linear unknown PDL::Filter::LinSmooth unknown PDL::Fit::Gaussian unknown PDL::Fit::Linfit unknown PDL::Fit::LM unknown PDL::Fit::Polynomial unknown PDL::Func unknown PDL::Graphics2D unknown PDL::Graphics::IIS unknown PDL::Graphics::Limits 0.01 PDL::Graphics::LUT unknown PDL::Graphics::PGPLOT unknown PDL::Graphics::PGPLOT::Window unknown PDL::Graphics::PGPLOTOptions unknown PDL::Graphics::State unknown PDL::GSL::DIFF unknown PDL::GSL::INTEG unknown PDL::GSL::INTERP unknown PDL::GSL::MROOT unknown PDL::GSL::RNG unknown PDL::GSLSF::AIRY unknown PDL::GSLSF::BESSEL unknown PDL::GSLSF::CLAUSEN unknown PDL::GSLSF::COULOMB unknown PDL::GSLSF::COUPLING unknown PDL::GSLSF::DAWSON unknown PDL::GSLSF::DEBYE unknown PDL::GSLSF::DILOG unknown PDL::GSLSF::ELEMENTARY unknown PDL::GSLSF::ELLINT unknown PDL::GSLSF::ELLJAC unknown PDL::GSLSF::ERF unknown PDL::GSLSF::EXP unknown PDL::GSLSF::EXPINT unknown PDL::GSLSF::FERMI_DIRAC unknown PDL::GSLSF::GAMMA unknown PDL::GSLSF::GEGENBAUER unknown PDL::GSLSF::HYPERG unknown PDL::GSLSF::LAGUERRE unknown PDL::GSLSF::LEGENDRE unknown PDL::GSLSF::LOG unknown PDL::GSLSF::POLY unknown PDL::GSLSF::POW_INT unknown PDL::GSLSF::PSI unknown PDL::GSLSF::SYNCHROTRON unknown PDL::GSLSF::TRANSPORT unknown PDL::GSLSF::TRIG unknown PDL::GSLSF::ZETA unknown PDL::Image2D unknown PDL::ImageND unknown PDL::ImageRGB unknown PDL::Install::Files 2.009 PDL::IO::Dicom unknown PDL::IO::Dumper 1.3.2 PDL::IO::FastRaw unknown PDL::IO::FITS 0.92 PDL::IO::FlexRaw unknown PDL::IO::GD unknown PDL::IO::Misc unknown PDL::IO::Pic unknown PDL::IO::Pnm unknown PDL::IO::Storable unknown PDL::Lite unknown PDL::LiteF unknown PDL::Lvalue unknown PDL::Math unknown PDL::Matrix 0.5 PDL::MatrixOps unknown PDL::MyMod unknown PDL::NiceSlice 1.001 PDL::Ops unknown PDL::Opt::Simplex unknown PDL::Options 0.92 PDL::Perldl2::Plugin::CleanErrors unknown PDL::Perldl2::Plugin::NiceSlice unknown PDL::Perldl2::Plugin::PDLCommands unknown PDL::Perldl2::Plugin::PrintControl unknown PDL::Perldl2::Profile::Perldl2 0.008 PDL::Perldl2::Script unknown PDL::PodParser unknown PDL::PP::Code unknown PDL::PP::Dump unknown PDL::PP::PdlDimsObj unknown PDL::PP::PdlParObj unknown PDL::PP::Rule 2.3 PDL::PP::Signature unknown PDL::Primitive unknown PDL::Reduce unknown PDL::Slices unknown PDL::Transform unknown PDL::Transform::Cartography 0.6 PDL::Types unknown PDL::Ufunc unknown PDL::Version 2.019 Pegex 0.64 Pegex::Base unknown Pegex::Bootstrap unknown Pegex::Compiler unknown Pegex::Grammar unknown Pegex::Grammar::Atoms unknown Pegex::Input unknown Pegex::Module unknown Pegex::Optimizer unknown Pegex::Parser unknown Pegex::Parser::Indent unknown Pegex::Pegex::AST unknown Pegex::Pegex::Grammar unknown Pegex::Receiver unknown Pegex::Regex unknown Pegex::Tree unknown Pegex::Tree::Wrap unknown Perl6::Export 0.07 Perl6::Form 0.06 Perl::Critic 1.132 Perl::Critic::Annotation 1.132 Perl::Critic::Command 1.132 Perl::Critic::Config 1.132 Perl::Critic::Document 1.132 Perl::Critic::Exception 1.132 Perl::Critic::Exception::AggregateConfiguration 1.132 Perl::Critic::Exception::Configuration 1.132 Perl::Critic::Exception::Configuration::Generic 1.132 Perl::Critic::Exception::Configuration::NonExistentPolicy 1.132 Perl::Critic::Exception::Configuration::Option 1.132 Perl::Critic::Exception::Configuration::Option::Global 1.132 Perl::Critic::Exception::Configuration::Option::Global::ExtraParameter 1.132 Perl::Critic::Exception::Configuration::Option::Global::ParameterValue 1.132 Perl::Critic::Exception::Configuration::Option::Policy 1.132 Perl::Critic::Exception::Configuration::Option::Policy::ExtraParameter 1.132 Perl::Critic::Exception::Configuration::Option::Policy::ParameterValue 1.132 Perl::Critic::Exception::Fatal 1.132 Perl::Critic::Exception::Fatal::Generic 1.132 Perl::Critic::Exception::Fatal::Internal 1.132 Perl::Critic::Exception::Fatal::PolicyDefinition 1.132 Perl::Critic::Exception::IO 1.132 Perl::Critic::Exception::Parse 1.132 Perl::Critic::OptionsProcessor 1.132 Perl::Critic::Policy 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitBooleanGrep 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitComplexMappings 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitLvalueSubstr 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitReverseSortBlock 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitSleepViaSelect 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitStringyEval 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitStringySplit 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalCan 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalIsa 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitUselessTopic 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidGrep 1.132 Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidMap 1.132 Perl::Critic::Policy::BuiltinFunctions::RequireBlockGrep 1.132 Perl::Critic::Policy::BuiltinFunctions::RequireBlockMap 1.132 Perl::Critic::Policy::BuiltinFunctions::RequireGlobFunction 1.132 Perl::Critic::Policy::BuiltinFunctions::RequireSimpleSortBlock 1.132 Perl::Critic::Policy::ClassHierarchies::ProhibitAutoloading 1.132 Perl::Critic::Policy::ClassHierarchies::ProhibitExplicitISA 1.132 Perl::Critic::Policy::ClassHierarchies::ProhibitOneArgBless 1.132 Perl::Critic::Policy::CodeLayout::ProhibitHardTabs 1.132 Perl::Critic::Policy::CodeLayout::ProhibitParensWithBuiltins 1.132 Perl::Critic::Policy::CodeLayout::ProhibitQuotedWordLists 1.132 Perl::Critic::Policy::CodeLayout::ProhibitTrailingWhitespace 1.132 Perl::Critic::Policy::CodeLayout::RequireConsistentNewlines 1.132 Perl::Critic::Policy::CodeLayout::RequireTidyCode 1.132 Perl::Critic::Policy::CodeLayout::RequireTrailingCommas 1.132 Perl::Critic::Policy::ControlStructures::ProhibitCascadingIfElse 1.132 Perl::Critic::Policy::ControlStructures::ProhibitCStyleForLoops 1.132 Perl::Critic::Policy::ControlStructures::ProhibitDeepNests 1.132 Perl::Critic::Policy::ControlStructures::ProhibitLabelsWithSpecialBlockNames 1.132 Perl::Critic::Policy::ControlStructures::ProhibitMutatingListFunctions 1.132 Perl::Critic::Policy::ControlStructures::ProhibitNegativeExpressionsInUnlessAndUntilConditions 1.132 Perl::Critic::Policy::ControlStructures::ProhibitPostfixControls 1.132 Perl::Critic::Policy::ControlStructures::ProhibitUnlessBlocks 1.132 Perl::Critic::Policy::ControlStructures::ProhibitUnreachableCode 1.132 Perl::Critic::Policy::ControlStructures::ProhibitUntilBlocks 1.132 Perl::Critic::Policy::ControlStructures::ProhibitYadaOperator 1.132 Perl::Critic::Policy::Documentation::PodSpelling 1.132 Perl::Critic::Policy::Documentation::RequirePackageMatchesPodName 1.132 Perl::Critic::Policy::Documentation::RequirePodAtEnd 1.132 Perl::Critic::Policy::Documentation::RequirePodLinksIncludeText 1.132 Perl::Critic::Policy::Documentation::RequirePodSections 1.132 Perl::Critic::Policy::ErrorHandling::RequireCarping 1.132 Perl::Critic::Policy::ErrorHandling::RequireCheckingReturnValueOfEval 1.132 Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators 1.132 Perl::Critic::Policy::InputOutput::ProhibitBarewordFileHandles 1.132 Perl::Critic::Policy::InputOutput::ProhibitExplicitStdin 1.132 Perl::Critic::Policy::InputOutput::ProhibitInteractiveTest 1.132 Perl::Critic::Policy::InputOutput::ProhibitJoinedReadline 1.132 Perl::Critic::Policy::InputOutput::ProhibitOneArgSelect 1.132 Perl::Critic::Policy::InputOutput::ProhibitReadlineInForLoop 1.132 Perl::Critic::Policy::InputOutput::ProhibitTwoArgOpen 1.132 Perl::Critic::Policy::InputOutput::RequireBracedFileHandleWithPrint 1.132 Perl::Critic::Policy::InputOutput::RequireBriefOpen 1.132 Perl::Critic::Policy::InputOutput::RequireCheckedClose 1.132 Perl::Critic::Policy::InputOutput::RequireCheckedOpen 1.132 Perl::Critic::Policy::InputOutput::RequireCheckedSyscalls 1.132 Perl::Critic::Policy::InputOutput::RequireEncodingWithUTF8Layer 1.132 Perl::Critic::Policy::Miscellanea::ProhibitFormats 1.132 Perl::Critic::Policy::Miscellanea::ProhibitTies 1.132 Perl::Critic::Policy::Miscellanea::ProhibitUnrestrictedNoCritic 1.132 Perl::Critic::Policy::Miscellanea::ProhibitUselessNoCritic 1.132 Perl::Critic::Policy::Modules::ProhibitAutomaticExportation 1.132 Perl::Critic::Policy::Modules::ProhibitConditionalUseStatements 1.132 Perl::Critic::Policy::Modules::ProhibitEvilModules 1.132 Perl::Critic::Policy::Modules::ProhibitExcessMainComplexity 1.132 Perl::Critic::Policy::Modules::ProhibitMultiplePackages 1.132 Perl::Critic::Policy::Modules::RequireBarewordIncludes 1.132 Perl::Critic::Policy::Modules::RequireEndWithOne 1.132 Perl::Critic::Policy::Modules::RequireExplicitPackage 1.132 Perl::Critic::Policy::Modules::RequireFilenameMatchesPackage 1.132 Perl::Critic::Policy::Modules::RequireNoMatchVarsWithUseEnglish 1.132 Perl::Critic::Policy::Modules::RequireVersionVar 1.132 Perl::Critic::Policy::NamingConventions::Capitalization 1.132 Perl::Critic::Policy::NamingConventions::ProhibitAmbiguousNames 1.132 Perl::Critic::Policy::Objects::ProhibitIndirectSyntax 1.132 Perl::Critic::Policy::References::ProhibitDoubleSigils 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitCaptureWithoutTest 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitComplexRegexes 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitEnumeratedClasses 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitEscapedMetacharacters 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitFixedStringMatches 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitSingleCharAlternation 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitUnusedCapture 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitUnusualDelimiters 1.132 Perl::Critic::Policy::RegularExpressions::ProhibitUselessTopic 1.132 Perl::Critic::Policy::RegularExpressions::RequireBracesForMultiline 1.132 Perl::Critic::Policy::RegularExpressions::RequireDotMatchAnything 1.132 Perl::Critic::Policy::RegularExpressions::RequireExtendedFormatting 1.132 Perl::Critic::Policy::RegularExpressions::RequireLineBoundaryMatching 1.132 Perl::Critic::Policy::Subroutines::ProhibitAmpersandSigils 1.132 Perl::Critic::Policy::Subroutines::ProhibitBuiltinHomonyms 1.132 Perl::Critic::Policy::Subroutines::ProhibitExcessComplexity 1.132 Perl::Critic::Policy::Subroutines::ProhibitExplicitReturnUndef 1.132 Perl::Critic::Policy::Subroutines::ProhibitManyArgs 1.132 Perl::Critic::Policy::Subroutines::ProhibitNestedSubs 1.132 Perl::Critic::Policy::Subroutines::ProhibitReturnSort 1.132 Perl::Critic::Policy::Subroutines::ProhibitSubroutinePrototypes 1.132 Perl::Critic::Policy::Subroutines::ProhibitUnusedPrivateSubroutines 1.132 Perl::Critic::Policy::Subroutines::ProtectPrivateSubs 1.132 Perl::Critic::Policy::Subroutines::RequireArgUnpacking 1.132 Perl::Critic::Policy::Subroutines::RequireFinalReturn 1.132 Perl::Critic::Policy::TestingAndDebugging::ProhibitNoStrict 1.132 Perl::Critic::Policy::TestingAndDebugging::ProhibitNoWarnings 1.132 Perl::Critic::Policy::TestingAndDebugging::ProhibitProlongedStrictureOverride 1.132 Perl::Critic::Policy::TestingAndDebugging::RequireTestLabels 1.132 Perl::Critic::Policy::TestingAndDebugging::RequireUseStrict 1.132 Perl::Critic::Policy::TestingAndDebugging::RequireUseWarnings 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitCommaSeparatedStatements 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitComplexVersion 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitConstantPragma 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitEmptyQuotes 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitEscapedCharacters 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitImplicitNewlines 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitInterpolationOfLiterals 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitLeadingZeros 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitLongChainsOfMethodCalls 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitMagicNumbers 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitMismatchedOperators 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitMixedBooleanOperators 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitNoisyQuotes 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitQuotesAsQuotelikeOperatorDelimiters 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitSpecialLiteralHeredocTerminator 1.132 Perl::Critic::Policy::ValuesAndExpressions::ProhibitVersionStrings 1.132 Perl::Critic::Policy::ValuesAndExpressions::RequireConstantVersion 1.132 Perl::Critic::Policy::ValuesAndExpressions::RequireInterpolationOfMetachars 1.132 Perl::Critic::Policy::ValuesAndExpressions::RequireNumberSeparators 1.132 Perl::Critic::Policy::ValuesAndExpressions::RequireQuotedHeredocTerminator 1.132 Perl::Critic::Policy::ValuesAndExpressions::RequireUpperCaseHeredocTerminator 1.132 Perl::Critic::Policy::Variables::ProhibitAugmentedAssignmentInDeclaration 1.132 Perl::Critic::Policy::Variables::ProhibitConditionalDeclarations 1.132 Perl::Critic::Policy::Variables::ProhibitEvilVariables 1.132 Perl::Critic::Policy::Variables::ProhibitLocalVars 1.132 Perl::Critic::Policy::Variables::ProhibitMatchVars 1.132 Perl::Critic::Policy::Variables::ProhibitPackageVars 1.132 Perl::Critic::Policy::Variables::ProhibitPerl4PackageNames 1.132 Perl::Critic::Policy::Variables::ProhibitPunctuationVars 1.132 Perl::Critic::Policy::Variables::ProhibitReusedNames 1.132 Perl::Critic::Policy::Variables::ProhibitUnusedVariables 1.132 Perl::Critic::Policy::Variables::ProtectPrivateVars 1.132 Perl::Critic::Policy::Variables::RequireInitializationForLocalVars 1.132 Perl::Critic::Policy::Variables::RequireLexicalLoopIterators 1.132 Perl::Critic::Policy::Variables::RequireLocalizedPunctuationVars 1.132 Perl::Critic::Policy::Variables::RequireNegativeIndices 1.132 Perl::Critic::PolicyConfig 1.132 Perl::Critic::PolicyFactory 1.132 Perl::Critic::PolicyListing 1.132 Perl::Critic::PolicyParameter 1.132 Perl::Critic::PolicyParameter::Behavior 1.132 Perl::Critic::PolicyParameter::Behavior::Boolean 1.132 Perl::Critic::PolicyParameter::Behavior::Enumeration 1.132 Perl::Critic::PolicyParameter::Behavior::Integer 1.132 Perl::Critic::PolicyParameter::Behavior::String 1.132 Perl::Critic::PolicyParameter::Behavior::StringList 1.132 Perl::Critic::ProfilePrototype 1.132 Perl::Critic::Statistics 1.132 Perl::Critic::TestUtils 1.132 Perl::Critic::Theme 1.132 Perl::Critic::ThemeListing 1.132 Perl::Critic::UserProfile 1.132 Perl::Critic::Utils 1.132 Perl::Critic::Utils::Constants 1.132 Perl::Critic::Utils::DataConversion 1.132 Perl::Critic::Utils::McCabe 1.132 Perl::Critic::Utils::Perl 1.132 Perl::Critic::Utils::POD 1.132 Perl::Critic::Utils::POD::ParseInteriorSequence 1.132 Perl::Critic::Utils::PPI 1.132 Perl::Critic::Violation 1.132 Perl::PrereqScanner 1.023 Perl::PrereqScanner::Scanner 1.023 Perl::PrereqScanner::Scanner::Aliased 1.023 Perl::PrereqScanner::Scanner::Moose 1.023 Perl::PrereqScanner::Scanner::Perl5 1.023 Perl::PrereqScanner::Scanner::POE 1.023 Perl::PrereqScanner::Scanner::Superclass 1.023 Perl::PrereqScanner::Scanner::TestMore 1.023 Perl::Tidy unknown Perl::Unsafe::Signals 0.03 Perl::Version 1.013 PerlIO::gzip 0.20 PerlIO::Layers 0.011 PerlIO::utf8_strict 0.007 PerlIO::via::Timeout 0.32 Pinto 0.14 Pinto::Action 0.14 Pinto::Action::Add 0.14 Pinto::Action::Clean 0.14 Pinto::Action::Copy 0.14 Pinto::Action::Default 0.14 Pinto::Action::Delete 0.14 Pinto::Action::Diff 0.14 Pinto::Action::Install 0.14 Pinto::Action::Kill 0.14 Pinto::Action::List 0.14 Pinto::Action::Lock 0.14 Pinto::Action::Log 0.14 Pinto::Action::Look 0.14 Pinto::Action::Merge 0.14 Pinto::Action::New 0.14 Pinto::Action::Nop 0.14 Pinto::Action::Pin 0.14 Pinto::Action::Props 0.14 Pinto::Action::Pull 0.14 Pinto::Action::Register 0.14 Pinto::Action::Rename 0.14 Pinto::Action::Reset 0.14 Pinto::Action::Revert 0.14 Pinto::Action::Roots 0.14 Pinto::Action::Stacks 0.14 Pinto::Action::Statistics 0.14 Pinto::Action::Unlock 0.14 Pinto::Action::Unpin 0.14 Pinto::Action::Unregister 0.14 Pinto::Action::Update 0.14 Pinto::Action::Verify 0.14 Pinto::ArchiveUnpacker 0.14 Pinto::Chrome 0.14 Pinto::Chrome::Net 0.14 Pinto::Chrome::Term 0.14 Pinto::Config 0.14 Pinto::Constants 0.14 Pinto::Database 0.14 Pinto::Difference 0.14 Pinto::DifferenceEntry 0.14 Pinto::Editor 0.14 Pinto::Editor::Clip 0.14 Pinto::Editor::Edit 0.14 Pinto::Exception 0.14 Pinto::Globals 0.14 Pinto::IndexReader 0.14 Pinto::IndexWriter 0.14 Pinto::Initializer 0.14 Pinto::Locator 0.14 Pinto::Locator::Mirror 0.14 Pinto::Locator::Multiplex 0.14 Pinto::Locator::Stratopan 0.14 Pinto::Locker 0.14 Pinto::Migrator 0.14 Pinto::ModlistWriter 0.14 Pinto::PackageExtractor 0.14 Pinto::PrerequisiteWalker 0.14 Pinto::Remote 0.14 Pinto::Remote::Action 0.14 Pinto::Remote::Action::Add 0.14 Pinto::Remote::Action::Install 0.14 Pinto::Remote::Result 0.14 Pinto::Repository 0.14 Pinto::Result 0.14 Pinto::RevisionWalker 0.14 Pinto::Role::Committable 0.14 Pinto::Role::Installer 0.14 Pinto::Role::PauseConfig 0.14 Pinto::Role::Plated 0.14 Pinto::Role::Puller 0.14 Pinto::Role::Schema::Result 0.14 Pinto::Role::Transactional 0.14 Pinto::Role::UserAgent 0.14 Pinto::Schema 0.14 Pinto::Schema::Result::Ancestry 0.14 Pinto::Schema::Result::Distribution 0.14 Pinto::Schema::Result::Package 0.14 Pinto::Schema::Result::Prerequisite 0.14 Pinto::Schema::Result::Registration 0.14 Pinto::Schema::Result::Revision 0.14 Pinto::Schema::Result::Stack 0.14 Pinto::Schema::ResultSet::Distribution 0.14 Pinto::Schema::ResultSet::Package 0.14 Pinto::Schema::ResultSet::Registration 0.14 Pinto::Server 0.14 Pinto::Server::Responder 0.14 Pinto::Server::Responder::Action 0.14 Pinto::Server::Responder::File 0.14 Pinto::Server::Router 0.14 Pinto::Shell 0.14 Pinto::Statistics 0.14 Pinto::Store 0.14 Pinto::Target 0.14 Pinto::Target::Distribution 0.14 Pinto::Target::Package 0.14 Pinto::Types 0.14 Pinto::Util 0.14 Plack 1.0047 Plack::App::Cascade unknown Plack::App::CGIBin unknown Plack::App::Directory unknown Plack::App::File unknown Plack::App::PSGIBin unknown Plack::App::URLMap unknown Plack::App::WrapCGI unknown Plack::Builder unknown Plack::Component unknown Plack::Handler unknown Plack::Handler::Apache1 unknown Plack::Handler::Apache2 unknown Plack::Handler::Apache2::Registry unknown Plack::Handler::CGI unknown Plack::Handler::FCGI unknown Plack::Handler::HTTP::Server::PSGI unknown Plack::Handler::HTTP::Server::Simple 0.16 Plack::Handler::Standalone unknown Plack::Handler::Starman unknown Plack::HTTPParser unknown Plack::HTTPParser::PP unknown Plack::Loader unknown Plack::Loader::Delayed unknown Plack::Loader::Restarter unknown Plack::Loader::Shotgun unknown Plack::LWPish unknown Plack::Middleware unknown Plack::Middleware::AccessLog unknown Plack::Middleware::AccessLog::Timed unknown Plack::Middleware::Auth::Basic unknown Plack::Middleware::BufferedStreaming unknown Plack::Middleware::Chunked unknown Plack::Middleware::Conditional unknown Plack::Middleware::ConditionalGET unknown Plack::Middleware::ContentLength unknown Plack::Middleware::ContentMD5 unknown Plack::Middleware::ErrorDocument unknown Plack::Middleware::FixMissingBodyInRedirect 0.12 Plack::Middleware::Head unknown Plack::Middleware::HTTPExceptions unknown Plack::Middleware::IIS6ScriptNameFix unknown Plack::Middleware::IIS7KeepAliveFix unknown Plack::Middleware::JSONP unknown Plack::Middleware::LighttpdScriptNameFix unknown Plack::Middleware::Lint unknown Plack::Middleware::Log4perl unknown Plack::Middleware::LogDispatch unknown Plack::Middleware::MethodOverride 0.20 Plack::Middleware::NullLogger unknown Plack::Middleware::RearrangeHeaders unknown Plack::Middleware::Recursive unknown Plack::Middleware::Refresh unknown Plack::Middleware::RemoveRedundantBody 0.06 Plack::Middleware::ReverseProxy 0.15 Plack::Middleware::Runtime unknown Plack::Middleware::SimpleContentFilter unknown Plack::Middleware::SimpleLogger unknown Plack::Middleware::StackTrace unknown Plack::Middleware::Static unknown Plack::Middleware::XFramework unknown Plack::Middleware::XSendfile unknown Plack::MIME unknown Plack::Request 1.0047 Plack::Request::Upload unknown Plack::Response 1.0047 Plack::Runner unknown Plack::TempBuffer unknown Plack::Test unknown Plack::Test::ExternalServer 0.02 Plack::Test::MockHTTP unknown Plack::Test::Server unknown Plack::Test::Suite unknown Plack::Util unknown Plack::Util::Accessor unknown Pod::Coverage 0.23 Pod::Coverage::CountParents unknown Pod::Coverage::ExportOnly unknown Pod::Coverage::Moose 0.07 Pod::Coverage::Overloader unknown Pod::Coverage::TrustPod 0.100005 Pod::Elemental 0.103004 Pod::Elemental::Autoblank 0.103004 Pod::Elemental::Autochomp 0.103004 Pod::Elemental::Command 0.103004 Pod::Elemental::Document 0.103004 Pod::Elemental::Element::Generic::Blank 0.103004 Pod::Elemental::Element::Generic::Command 0.103004 Pod::Elemental::Element::Generic::Nonpod 0.103004 Pod::Elemental::Element::Generic::Text 0.103004 Pod::Elemental::Element::Nested 0.103004 Pod::Elemental::Element::Pod5::Command 0.103004 Pod::Elemental::Element::Pod5::Data 0.103004 Pod::Elemental::Element::Pod5::Nonpod 0.103004 Pod::Elemental::Element::Pod5::Ordinary 0.103004 Pod::Elemental::Element::Pod5::Region 0.103004 Pod::Elemental::Element::Pod5::Verbatim 0.103004 Pod::Elemental::Flat 0.103004 Pod::Elemental::Node 0.103004 Pod::Elemental::Objectifier 0.103004 Pod::Elemental::Paragraph 0.103004 Pod::Elemental::Selectors 0.103004 Pod::Elemental::Transformer 0.103004 Pod::Elemental::Transformer::Gatherer 0.103004 Pod::Elemental::Transformer::Nester 0.103004 Pod::Elemental::Transformer::Pod5 0.103004 Pod::Elemental::Types 0.103004 Pod::Eventual 0.094001 Pod::Eventual::Simple 0.094001 Pod::Markdown 3.005 Pod::Perldoc::ToMarkdown 3.005 Pod::Readme unknown Pod::Readme::Filter unknown Pod::Readme::Plugin unknown Pod::Readme::Plugin::changes unknown Pod::Readme::Plugin::requires unknown Pod::Readme::Plugin::version unknown Pod::Readme::Types unknown Pod::Spell 1.20 Pod::Wordlist 1.20 POE 1.367 POE::Component 1.367 POE::Component::Client::TCP 1.367 POE::Component::Server::TCP 1.367 POE::Driver 1.367 POE::Driver::SysRW 1.367 POE::Filter 1.367 POE::Filter::Block 1.367 POE::Filter::Grep 1.367 POE::Filter::HTTPD 1.367 POE::Filter::Line 1.367 POE::Filter::Map 1.367 POE::Filter::RecordBlock 1.367 POE::Filter::Reference 1.367 POE::Filter::Stackable 1.367 POE::Filter::Stream 1.367 POE::Kernel 1.367 POE::Loop 1.367 POE::Loop::IO_Poll 1.367 POE::Loop::PerlSignals 1.367 POE::Loop::Select 1.367 POE::NFA 1.367 POE::Pipe 1.367 POE::Pipe::OneWay 1.367 POE::Pipe::TwoWay 1.367 POE::Queue 1.367 POE::Queue::Array 1.367 POE::Resource 1.367 POE::Resource::Aliases 1.367 POE::Resource::Clock 1.367 POE::Resource::Events 1.367 POE::Resource::Extrefs 1.367 POE::Resource::FileHandles 1.367 POE::Resource::Sessions 1.367 POE::Resource::SIDs 1.367 POE::Resource::Signals 1.367 POE::Resources 1.367 POE::Session 1.367 POE::Test::DondeEstan 1.360 POE::Test::Loops 1.360 POE::Test::Sequence unknown POE::Wheel 1.367 POE::Wheel::Curses 1.367 POE::Wheel::FollowTail 1.367 POE::Wheel::ListenAccept 1.367 POE::Wheel::ReadLine 1.367 POE::Wheel::ReadWrite 1.367 POE::Wheel::Run 1.367 POE::Wheel::SocketFactory 1.367 POSIX::strftime::Compiler 0.42 PostScript::Document 0.06 PostScript::Elements unknown PostScript::Metrics 0.06 PostScript::TextBlock 0.06 PPI 1.236 PPI::Cache 1.236 PPI::Document 1.236 PPI::Document::File 1.236 PPI::Document::Fragment 1.236 PPI::Document::Normalized 1.236 PPI::Dumper 1.236 PPI::Element 1.236 PPI::Exception 1.236 PPI::Exception::ParserRejection 1.236 PPI::Find 1.236 PPI::HTML 1.08 PPI::HTML::Fragment 1.08 PPI::Lexer 1.236 PPI::Node 1.236 PPI::Normal 1.236 PPI::Normal::Standard 1.236 PPI::Statement 1.236 PPI::Statement::Break 1.236 PPI::Statement::Compound 1.236 PPI::Statement::Data 1.236 PPI::Statement::End 1.236 PPI::Statement::Expression 1.236 PPI::Statement::Given 1.236 PPI::Statement::Include 1.236 PPI::Statement::Include::Perl6 1.236 PPI::Statement::Null 1.236 PPI::Statement::Package 1.236 PPI::Statement::Scheduled 1.236 PPI::Statement::Sub 1.236 PPI::Statement::Unknown 1.236 PPI::Statement::UnmatchedBrace 1.236 PPI::Statement::Variable 1.236 PPI::Statement::When 1.236 PPI::Structure 1.236 PPI::Structure::Block 1.236 PPI::Structure::Condition 1.236 PPI::Structure::Constructor 1.236 PPI::Structure::For 1.236 PPI::Structure::Given 1.236 PPI::Structure::List 1.236 PPI::Structure::Subscript 1.236 PPI::Structure::Unknown 1.236 PPI::Structure::When 1.236 PPI::Token 1.236 PPI::Token::_QuoteEngine 1.236 PPI::Token::_QuoteEngine::Full 1.236 PPI::Token::_QuoteEngine::Simple 1.236 PPI::Token::ArrayIndex 1.236 PPI::Token::Attribute 1.236 PPI::Token::BOM 1.236 PPI::Token::Cast 1.236 PPI::Token::Comment 1.236 PPI::Token::DashedWord 1.236 PPI::Token::Data 1.236 PPI::Token::End 1.236 PPI::Token::HereDoc 1.236 PPI::Token::Label 1.236 PPI::Token::Magic 1.236 PPI::Token::Number 1.236 PPI::Token::Number::Binary 1.236 PPI::Token::Number::Exp 1.236 PPI::Token::Number::Float 1.236 PPI::Token::Number::Hex 1.236 PPI::Token::Number::Octal 1.236 PPI::Token::Number::Version 1.236 PPI::Token::Operator 1.236 PPI::Token::Pod 1.236 PPI::Token::Prototype 1.236 PPI::Token::Quote 1.236 PPI::Token::Quote::Double 1.236 PPI::Token::Quote::Interpolate 1.236 PPI::Token::Quote::Literal 1.236 PPI::Token::Quote::Single 1.236 PPI::Token::QuoteLike 1.236 PPI::Token::QuoteLike::Backtick 1.236 PPI::Token::QuoteLike::Command 1.236 PPI::Token::QuoteLike::Readline 1.236 PPI::Token::QuoteLike::Regexp 1.236 PPI::Token::QuoteLike::Words 1.236 PPI::Token::Regexp 1.236 PPI::Token::Regexp::Match 1.236 PPI::Token::Regexp::Substitute 1.236 PPI::Token::Regexp::Transliterate 1.236 PPI::Token::Separator 1.236 PPI::Token::Structure 1.236 PPI::Token::Symbol 1.236 PPI::Token::Unknown 1.236 PPI::Token::Whitespace 1.236 PPI::Token::Word 1.236 PPI::Tokenizer 1.236 PPI::Transform 1.236 PPI::Transform::UpdateCopyright 1.236 PPI::Util 1.236 PPI::XSAccessor 1.236 PPIx::QuoteLike 0.006 PPIx::QuoteLike::Constant 0.006 PPIx::QuoteLike::Dumper 0.006 PPIx::QuoteLike::Token 0.006 PPIx::QuoteLike::Token::Control 0.006 PPIx::QuoteLike::Token::Delimiter 0.006 PPIx::QuoteLike::Token::Interpolation 0.006 PPIx::QuoteLike::Token::String 0.006 PPIx::QuoteLike::Token::Structure 0.006 PPIx::QuoteLike::Token::Unknown 0.006 PPIx::QuoteLike::Token::Whitespace 0.006 PPIx::QuoteLike::Utils 0.006 PPIx::Regexp 0.061 PPIx::Regexp::Constant 0.061 PPIx::Regexp::Dumper 0.061 PPIx::Regexp::Element 0.061 PPIx::Regexp::Lexer 0.061 PPIx::Regexp::Node 0.061 PPIx::Regexp::Node::Range 0.061 PPIx::Regexp::Node::Unknown 0.061 PPIx::Regexp::StringTokenizer 0.061 PPIx::Regexp::Structure 0.061 PPIx::Regexp::Structure::Assertion 0.061 PPIx::Regexp::Structure::BranchReset 0.061 PPIx::Regexp::Structure::Capture 0.061 PPIx::Regexp::Structure::CharClass 0.061 PPIx::Regexp::Structure::Code 0.061 PPIx::Regexp::Structure::Main 0.061 PPIx::Regexp::Structure::Modifier 0.061 PPIx::Regexp::Structure::NamedCapture 0.061 PPIx::Regexp::Structure::Quantifier 0.061 PPIx::Regexp::Structure::Regexp 0.061 PPIx::Regexp::Structure::RegexSet 0.061 PPIx::Regexp::Structure::Replacement 0.061 PPIx::Regexp::Structure::Subexpression 0.061 PPIx::Regexp::Structure::Switch 0.061 PPIx::Regexp::Structure::Unknown 0.061 PPIx::Regexp::Support 0.061 PPIx::Regexp::Token 0.061 PPIx::Regexp::Token::Assertion 0.061 PPIx::Regexp::Token::Backreference 0.061 PPIx::Regexp::Token::Backtrack 0.061 PPIx::Regexp::Token::CharClass 0.061 PPIx::Regexp::Token::CharClass::POSIX 0.061 PPIx::Regexp::Token::CharClass::POSIX::Unknown 0.061 PPIx::Regexp::Token::CharClass::Simple 0.061 PPIx::Regexp::Token::Code 0.061 PPIx::Regexp::Token::Comment 0.061 PPIx::Regexp::Token::Condition 0.061 PPIx::Regexp::Token::Control 0.061 PPIx::Regexp::Token::Delimiter 0.061 PPIx::Regexp::Token::Greediness 0.061 PPIx::Regexp::Token::GroupType 0.061 PPIx::Regexp::Token::GroupType::Assertion 0.061 PPIx::Regexp::Token::GroupType::BranchReset 0.061 PPIx::Regexp::Token::GroupType::Code 0.061 PPIx::Regexp::Token::GroupType::Modifier 0.061 PPIx::Regexp::Token::GroupType::NamedCapture 0.061 PPIx::Regexp::Token::GroupType::Subexpression 0.061 PPIx::Regexp::Token::GroupType::Switch 0.061 PPIx::Regexp::Token::Interpolation 0.061 PPIx::Regexp::Token::Literal 0.061 PPIx::Regexp::Token::Modifier 0.061 PPIx::Regexp::Token::NoOp 0.061 PPIx::Regexp::Token::Operator 0.061 PPIx::Regexp::Token::Quantifier 0.061 PPIx::Regexp::Token::Recursion 0.061 PPIx::Regexp::Token::Reference 0.061 PPIx::Regexp::Token::Structure 0.061 PPIx::Regexp::Token::Unknown 0.061 PPIx::Regexp::Token::Unmatched 0.061 PPIx::Regexp::Token::Whitespace 0.061 PPIx::Regexp::Tokenizer 0.061 PPIx::Regexp::Util 0.061 PPIx::Utilities 1.001000 PPIx::Utilities::Exception::Bug 1.001000 PPIx::Utilities::Node 1.001000 PPIx::Utilities::Statement 1.001000 Proc::Fork 0.804 Proc::Terminator::Ctx 0.05 RDF::Trine 1.019 RDF::Trine::Error 1.019 RDF::Trine::Exporter::CSV 1.019 RDF::Trine::Exporter::RDFPatch 1.019 RDF::Trine::Graph 1.019 RDF::Trine::Iterator 1.019 RDF::Trine::Iterator::Bindings 1.019 RDF::Trine::Iterator::Bindings::Materialized 1.019 RDF::Trine::Iterator::Boolean 1.019 RDF::Trine::Iterator::Graph 1.019 RDF::Trine::Iterator::Graph::Materialized 1.019 RDF::Trine::Iterator::JSONHandler 1.019 RDF::Trine::Iterator::SAXHandler 1.019 RDF::Trine::Model 1.019 RDF::Trine::Model::Dataset 1.019 RDF::Trine::Model::StatementFilter 1.019 RDF::Trine::Model::Union 1.019 RDF::Trine::Namespace 1.019 RDF::Trine::NamespaceMap 1.019 RDF::Trine::Node 1.019 RDF::Trine::Node::Blank 1.019 RDF::Trine::Node::Literal 1.019 RDF::Trine::Node::Nil 1.019 RDF::Trine::Node::Resource 1.019 RDF::Trine::Node::Variable 1.019 RDF::Trine::Parser 1.019 RDF::Trine::Parser::LineProtocol 1.019 RDF::Trine::Parser::NQuads 1.019 RDF::Trine::Parser::NTriples 1.019 RDF::Trine::Parser::RDFa 1.019 RDF::Trine::Parser::RDFJSON 1.019 RDF::Trine::Parser::RDFPatch 1.019 RDF::Trine::Parser::RDFXML 1.019 RDF::Trine::Parser::Redland 1.019 RDF::Trine::Parser::TriG 1.019 RDF::Trine::Parser::Turtle 1.019 RDF::Trine::Parser::Turtle::Constants 1.019 RDF::Trine::Parser::Turtle::Lexer 1.019 RDF::Trine::Parser::Turtle::Token unknown RDF::Trine::Pattern 1.019 RDF::Trine::Serializer 1.019 RDF::Trine::Serializer::NQuads 1.019 RDF::Trine::Serializer::NTriples 1.019 RDF::Trine::Serializer::NTriples::Canonical 1.019 RDF::Trine::Serializer::RDFJSON 1.019 RDF::Trine::Serializer::RDFPatch 1.019 RDF::Trine::Serializer::RDFXML 1.019 RDF::Trine::Serializer::TriG 1.019 RDF::Trine::Serializer::TSV 1.019 RDF::Trine::Serializer::Turtle 1.019 RDF::Trine::Statement 1.019 RDF::Trine::Statement::Quad 1.019 RDF::Trine::Store 1.019 RDF::Trine::Store::DBI 1.019 RDF::Trine::Store::DBI::mysql 1.019 RDF::Trine::Store::DBI::Pg 1.019 RDF::Trine::Store::DBI::SQLite 1.019 RDF::Trine::Store::Dydra 1.019 RDF::Trine::Store::Hexastore 1.019 RDF::Trine::Store::LanguagePreference 1.019 RDF::Trine::Store::Memory 1.019 RDF::Trine::Store::Redis 1.019 RDF::Trine::Store::Redland 1.019 RDF::Trine::Store::SPARQL 1.019 RDF::Trine::VariableBindings 1.019 re::engine::RE2 0.13 Readonly 2.05 Redis 1.991 Redis::Hash 1.991 Redis::List 1.991 Redis::Sentinel 1.991 Ref::Util 0.204 Ref::Util::PP 0.204 Ref::Util::XS 0.117 Regexp::Common 2017060201 Regexp::Common::_support 2017060201 Regexp::Common::balanced 2017060201 Regexp::Common::CC 2017060201 Regexp::Common::comment 2017060201 Regexp::Common::delimited 2017060201 Regexp::Common::lingua 2017060201 Regexp::Common::list 2017060201 Regexp::Common::net 2017060201 Regexp::Common::number 2017060201 Regexp::Common::profanity 2017060201 Regexp::Common::SEN 2017060201 Regexp::Common::URI 2017060201 Regexp::Common::URI::fax 2017060201 Regexp::Common::URI::file 2017060201 Regexp::Common::URI::ftp 2017060201 Regexp::Common::URI::gopher 2017060201 Regexp::Common::URI::http 2017060201 Regexp::Common::URI::news 2017060201 Regexp::Common::URI::pop 2017060201 Regexp::Common::URI::prospero 2017060201 Regexp::Common::URI::RFC1035 2017060201 Regexp::Common::URI::RFC1738 2017060201 Regexp::Common::URI::RFC1808 2017060201 Regexp::Common::URI::RFC2384 2017060201 Regexp::Common::URI::RFC2396 2017060201 Regexp::Common::URI::RFC2806 2017060201 Regexp::Common::URI::tel 2017060201 Regexp::Common::URI::telnet 2017060201 Regexp::Common::URI::tv 2017060201 Regexp::Common::URI::wais 2017060201 Regexp::Common::whitespace 2017060201 Regexp::Common::zip 2017060201 Reply::Plugin::TypeTiny 1.002002 Return::MultiLevel 0.05 rlib 0.02 Role::HasMessage 0.006 Role::HasMessage::Errf 0.006 Role::Identifiable::HasIdent 0.007 Role::Identifiable::HasTags 0.007 Role::Tiny 2.000006 Role::Tiny::With 2.000006 Router::Simple 0.17 Router::Simple::Declare unknown Router::Simple::Route unknown Router::Simple::SubMapper unknown Safe::Isa 1.000010 Scalar::Util 1.50 Scalar::Util::Numeric 0.40 Scope::Guard 0.21 Scope::Upper 0.30 Set::Infinite 0.65 Set::Infinite::_recurrence unknown Set::Infinite::Arithmetic unknown Set::Infinite::Basic unknown Set::IntervalTree 0.12 Set::IntSpan 1.19 Set::Object::Weak unknown Set::Scalar 1.29 Set::Scalar::Base 1.29 Set::Scalar::Null 1.29 Set::Scalar::Real 1.29 Set::Scalar::Universe 1.29 Set::Scalar::Valued 1.29 Set::Scalar::ValuedUniverse 1.29 Set::Scalar::Virtual 1.29 Slurp 0.4 Smart::Comments 1.06 SOAP::Constants 1.27 SOAP::Lite 1.27 SOAP::Lite::Deserializer::XMLSchema1999 1.27 SOAP::Lite::Deserializer::XMLSchema2001 1.27 SOAP::Lite::Deserializer::XMLSchemaSOAP1_1 1.27 SOAP::Lite::Deserializer::XMLSchemaSOAP1_2 1.27 SOAP::Lite::Packager 1.27 SOAP::Lite::Utils 1.27 SOAP::Packager 1.27 SOAP::Test 1.27 SOAP::Transport::HTTP 1.27 SOAP::Transport::IO 1.27 SOAP::Transport::LOCAL 1.27 SOAP::Transport::LOOPBACK 1.27 SOAP::Transport::MAILTO 1.27 SOAP::Transport::POP3 1.27 SOAP::Transport::TCP 1.27 Socket::GetAddrInfo 0.22 Socket::GetAddrInfo::Core 0.22 Socket::GetAddrInfo::Emul 0.22 Socket::GetAddrInfo::Socket6api 0.22 Socket::GetAddrInfo::Strict 0.22 Socket::GetAddrInfo::XS 0.22 Software::License 0.103013 Software::License::AGPL_3 0.103013 Software::License::Apache_1_1 0.103013 Software::License::Apache_2_0 0.103013 Software::License::Artistic_1_0 0.103013 Software::License::Artistic_2_0 0.103013 Software::License::BSD 0.103013 Software::License::CC0_1_0 0.103013 Software::License::Custom 0.103013 Software::License::EUPL_1_1 0.103013 Software::License::EUPL_1_2 0.103013 Software::License::FreeBSD 0.103013 Software::License::GFDL_1_2 0.103013 Software::License::GFDL_1_3 0.103013 Software::License::GPL_1 0.103013 Software::License::GPL_2 0.103013 Software::License::GPL_3 0.103013 Software::License::LGPL_2_1 0.103013 Software::License::LGPL_3_0 0.103013 Software::License::MIT 0.103013 Software::License::Mozilla_1_0 0.103013 Software::License::Mozilla_1_1 0.103013 Software::License::Mozilla_2_0 0.103013 Software::License::None 0.103013 Software::License::OpenSSL 0.103013 Software::License::Perl_5 0.103013 Software::License::PostgreSQL 0.103013 Software::License::QPL_1_0 0.103013 Software::License::SSLeay 0.103013 Software::License::Sun 0.103013 Software::License::Zlib 0.103013 Software::LicenseUtils 0.103013 Sort::Naturally 1.03 Specio 0.42 Specio::Coercion 0.42 Specio::Constraint::AnyCan 0.42 Specio::Constraint::AnyDoes 0.42 Specio::Constraint::AnyIsa 0.42 Specio::Constraint::Enum 0.42 Specio::Constraint::Intersection 0.42 Specio::Constraint::ObjectCan 0.42 Specio::Constraint::ObjectDoes 0.42 Specio::Constraint::ObjectIsa 0.42 Specio::Constraint::Parameterizable 0.42 Specio::Constraint::Parameterized 0.42 Specio::Constraint::Role::CanType 0.42 Specio::Constraint::Role::DoesType 0.42 Specio::Constraint::Role::Interface 0.42 Specio::Constraint::Role::IsaType 0.42 Specio::Constraint::Simple 0.42 Specio::Constraint::Structurable 0.42 Specio::Constraint::Structured 0.42 Specio::Constraint::Union 0.42 Specio::Declare 0.42 Specio::DeclaredAt 0.42 Specio::Exception 0.42 Specio::Exporter 0.42 Specio::Helpers 0.42 Specio::Library::Builtins 0.42 Specio::Library::Numeric 0.42 Specio::Library::Perl 0.42 Specio::Library::String 0.42 Specio::Library::Structured 0.42 Specio::Library::Structured::Dict 0.42 Specio::Library::Structured::Map 0.42 Specio::Library::Structured::Tuple 0.42 Specio::OO 0.42 Specio::PartialDump 0.42 Specio::Registry 0.42 Specio::Role::Inlinable 0.42 Specio::Subs 0.42 Specio::TypeChecks 0.42 Spiffy 0.46 Spiffy::mixin unknown Spreadsheet::ParseExcel 0.65 Spreadsheet::ParseExcel::Cell 0.65 Spreadsheet::ParseExcel::Dump 0.65 Spreadsheet::ParseExcel::FmtDefault 0.65 Spreadsheet::ParseExcel::FmtJapan 0.65 Spreadsheet::ParseExcel::FmtJapan2 0.65 Spreadsheet::ParseExcel::FmtUnicode 0.65 Spreadsheet::ParseExcel::Font 0.65 Spreadsheet::ParseExcel::Format 0.65 Spreadsheet::ParseExcel::SaveParser 0.65 Spreadsheet::ParseExcel::SaveParser::Workbook 0.65 Spreadsheet::ParseExcel::SaveParser::Worksheet 0.65 Spreadsheet::ParseExcel::Simple 1.04 Spreadsheet::ParseExcel::Utility 0.65 Spreadsheet::ParseExcel::Workbook 0.65 Spreadsheet::ParseExcel::Worksheet 0.65 Spreadsheet::WriteExcel 2.40 Spreadsheet::WriteExcel::BIFFwriter 2.40 Spreadsheet::WriteExcel::Big 2.40 Spreadsheet::WriteExcel::Chart 2.40 Spreadsheet::WriteExcel::Chart::Area 2.40 Spreadsheet::WriteExcel::Chart::Bar 2.40 Spreadsheet::WriteExcel::Chart::Column 2.40 Spreadsheet::WriteExcel::Chart::External 2.40 Spreadsheet::WriteExcel::Chart::Line 2.40 Spreadsheet::WriteExcel::Chart::Pie 2.40 Spreadsheet::WriteExcel::Chart::Scatter 2.40 Spreadsheet::WriteExcel::Chart::Stock 2.40 Spreadsheet::WriteExcel::Examples 2.40 Spreadsheet::WriteExcel::Format 2.40 Spreadsheet::WriteExcel::Formula 2.40 Spreadsheet::WriteExcel::OLEwriter 2.40 Spreadsheet::WriteExcel::Properties 2.40 Spreadsheet::WriteExcel::Simple 1.04 Spreadsheet::WriteExcel::Utility 2.40 Spreadsheet::WriteExcel::Workbook 2.40 Spreadsheet::WriteExcel::Worksheet 2.40 SQL::Abstract 1.86 SQL::Abstract::Test unknown SQL::Abstract::Tree unknown SQL::Translator 0.11024 SQL::Translator::Diff unknown SQL::Translator::Filter::DefaultExtra 1.59 SQL::Translator::Filter::Globals 1.59 SQL::Translator::Filter::Names 1.59 SQL::Translator::Generator::DDL::MySQL unknown SQL::Translator::Generator::DDL::PostgreSQL unknown SQL::Translator::Generator::DDL::SQLite unknown SQL::Translator::Generator::DDL::SQLServer unknown SQL::Translator::Generator::Role::DDL unknown SQL::Translator::Generator::Role::Quote unknown SQL::Translator::Parser 1.60 SQL::Translator::Parser::Access 1.59 SQL::Translator::Parser::DB2 unknown SQL::Translator::Parser::DB2::Grammar unknown SQL::Translator::Parser::DBI 1.59 SQL::Translator::Parser::DBI::DB2 1.59 SQL::Translator::Parser::DBI::MySQL 1.59 SQL::Translator::Parser::DBI::Oracle 1.59 SQL::Translator::Parser::DBI::PostgreSQL 1.59 SQL::Translator::Parser::DBI::SQLite 1.59 SQL::Translator::Parser::DBI::SQLServer 1.59 SQL::Translator::Parser::DBI::Sybase 1.59 SQL::Translator::Parser::DBIx::Class 1.10 SQL::Translator::Parser::Excel 1.59 SQL::Translator::Parser::JSON 1.00 SQL::Translator::Parser::MySQL 1.59 SQL::Translator::Parser::Oracle 1.59 SQL::Translator::Parser::PostgreSQL 1.59 SQL::Translator::Parser::SQLite 1.59 SQL::Translator::Parser::SQLServer 1.59 SQL::Translator::Parser::Storable 1.59 SQL::Translator::Parser::Sybase 1.59 SQL::Translator::Parser::XML 1.59 SQL::Translator::Parser::XML::SQLFairy 1.59 SQL::Translator::Parser::xSV 1.59 SQL::Translator::Parser::YAML 1.59 SQL::Translator::Producer 1.59 SQL::Translator::Producer::ClassDBI 1.59 SQL::Translator::Producer::DB2 1.59 SQL::Translator::Producer::DBIx::Class::File 0.1 SQL::Translator::Producer::Diagram 1.59 SQL::Translator::Producer::DiaUml 1.59 SQL::Translator::Producer::Dumper 1.59 SQL::Translator::Producer::GraphViz 1.59 SQL::Translator::Producer::HTML 1.59 SQL::Translator::Producer::JSON 1.00 SQL::Translator::Producer::Latex 1.59 SQL::Translator::Producer::MySQL 1.59 SQL::Translator::Producer::Oracle 1.59 SQL::Translator::Producer::POD 1.59 SQL::Translator::Producer::PostgreSQL 1.59 SQL::Translator::Producer::SQLite 1.59 SQL::Translator::Producer::SQLServer 1.59 SQL::Translator::Producer::Storable 1.59 SQL::Translator::Producer::Sybase 1.59 SQL::Translator::Producer::TT::Base 1.59 SQL::Translator::Producer::TT::Table 1.59 SQL::Translator::Producer::TTSchema 1.59 SQL::Translator::Producer::XML 1.59 SQL::Translator::Producer::XML::SQLFairy 1.59 SQL::Translator::Producer::YAML 1.59 SQL::Translator::Role::BuildArgs unknown SQL::Translator::Role::Debug unknown SQL::Translator::Role::Error unknown SQL::Translator::Role::ListAttr unknown SQL::Translator::Schema 1.59 SQL::Translator::Schema::Constants 1.59 SQL::Translator::Schema::Constraint 1.59 SQL::Translator::Schema::Field 1.59 SQL::Translator::Schema::Index 1.59 SQL::Translator::Schema::Object 1.59 SQL::Translator::Schema::Procedure 1.59 SQL::Translator::Schema::Role::Compare unknown SQL::Translator::Schema::Role::Extra unknown SQL::Translator::Schema::Table 1.59 SQL::Translator::Schema::Trigger 1.59 SQL::Translator::Schema::View 1.59 SQL::Translator::Types unknown SQL::Translator::Utils 1.59 StackTrace::Auto 0.200013 Starman 0.4014 Starman::Server unknown Statistics::ANOVA 0.14 Statistics::ANOVA::Compare 0.01 Statistics::ANOVA::EffectSize 0.02 Statistics::ANOVA::Friedman 0.02 Statistics::ANOVA::JT unknown Statistics::ANOVA::KW 0.01 Statistics::ANOVA::Page 0.02 Statistics::Basic 1.6611 Statistics::Basic::_OneVectorBase unknown Statistics::Basic::_TwoVectorBase unknown Statistics::Basic::ComputedVector unknown Statistics::Basic::Correlation unknown Statistics::Basic::Covariance unknown Statistics::Basic::LeastSquareFit unknown Statistics::Basic::Mean unknown Statistics::Basic::Median unknown Statistics::Basic::Mode unknown Statistics::Basic::StdDev unknown Statistics::Basic::Variance unknown Statistics::Basic::Vector unknown Statistics::Candidates unknown Statistics::ChiSquare 1.0000 Statistics::Contingency 0.09 Statistics::Cook 0.0.6 Statistics::Data 0.11 Statistics::Data::Dichotomize 0.05 Statistics::Data::Rank 0.02 Statistics::DependantTTest 0.03 Statistics::Descriptive 3.0701 Statistics::Descriptive::Discrete 0.07 Statistics::Descriptive::Full 3.0701 Statistics::Descriptive::LogScale 0.11 Statistics::Descriptive::Smoother 3.0701 Statistics::Descriptive::Smoother::Exponential 3.0701 Statistics::Descriptive::Smoother::Weightedexponential 3.0701 Statistics::Descriptive::Sparse 3.0701 Statistics::Discrete 0.05.00 Statistics::Distributions 1.02 Statistics::Distributions::Bartlett unknown Statistics::Distributions::GTest unknown Statistics::Diversity::Shannon 0.0102 Statistics::FactorAnalysis unknown Statistics::FisherPitman 0.034 Statistics::Frequency 0.04 Statistics::Histogram 0.1 Statistics::KruskalWallis 0.01 Statistics::Lite 3.62 Statistics::MaxEntropy 1.0 Statistics::Normality 0.01 Statistics::PCA unknown Statistics::PCA::Varimax unknown Statistics::PointEstimation 1.1 Statistics::R 0.34 Statistics::R::Legacy unknown Statistics::R::Win32 unknown Statistics::RankCorrelation 0.1205 Statistics::Robust 0.02 Statistics::Robust::Bootstrap unknown Statistics::Robust::Density unknown Statistics::Robust::Location unknown Statistics::Robust::Scale unknown Statistics::Sampler::Multinomial 0.7 Statistics::Sampler::Multinomial::AliasMethod 0.7 Statistics::Sequences 0.15 Statistics::Sequences::Joins 0.20 Statistics::Sequences::Pot 0.12 Statistics::Sequences::Runs 0.22 Statistics::Sequences::Turns 0.13 Statistics::Sequences::Vnomes 0.20 Statistics::Shannon 0.05 Statistics::Simpson 0.03 Statistics::SparseVector 0.2 Statistics::Standard_Normal unknown Statistics::TopK 0.02 Statistics::TTest 1.1 Statistics::Zed 0.10 Storable 3.11 Stream::Buffered 0.03 Stream::Buffered::Auto unknown Stream::Buffered::File unknown Stream::Buffered::PerlIO unknown strictures 2.000005 strictures::extra unknown String::Diff 0.07 String::Errf 0.008 String::Escape 2010.002 String::Flogger 1.101245 String::Format 1.18 String::Formatter 0.102084 String::Formatter::Cookbook 0.102084 String::Numeric 0.9 String::Numeric::PP 0.9 String::Print 0.93 String::RewritePrefix 0.007 String::ShellQuote 1.04 String::Tagged 0.15 String::Tagged::Terminal 0.02 String::ToIdentifier::EN 0.12 String::ToIdentifier::EN::Unicode 0.12 String::Truncate 1.100602 String::Util 1.26 Struct::Dumb 0.09 Sub::Attribute 0.06 Sub::Defer 2.001001 Sub::Exporter 0.987 Sub::Exporter::ForMethods 0.100052 Sub::Exporter::GlobExporter 0.005 Sub::Exporter::Progressive 0.001013 Sub::Exporter::Util 0.987 Sub::Identify 0.14 Sub::Info 0.002 Sub::Install 0.928 Sub::Name 0.21 Sub::Quote 2.001001 Sub::Uplevel 0.2800 Sub::Util 1.50 SUPER 1.20141117 SVG 2.84 SVG::DOM 2.84 SVG::Element 2.84 SVG::Extension 2.84 SVG::Graph 0.02 SVG::Graph::Data unknown SVG::Graph::Data::Datum unknown SVG::Graph::Data::Node unknown SVG::Graph::Data::Tree unknown SVG::Graph::File unknown SVG::Graph::Frame unknown SVG::Graph::Glyph unknown SVG::Graph::Glyph::axis unknown SVG::Graph::Glyph::bar unknown SVG::Graph::Glyph::barflex unknown SVG::Graph::Glyph::bezier unknown SVG::Graph::Glyph::bubble unknown SVG::Graph::Glyph::heatmap unknown SVG::Graph::Glyph::line unknown SVG::Graph::Glyph::pictogram unknown SVG::Graph::Glyph::scatter unknown SVG::Graph::Glyph::tree unknown SVG::Graph::Glyph::wedge unknown SVG::Graph::Group unknown SVG::XML 2.84 Symbol::Util 0.0203 SymTab unknown syntax 0.004 Syntax::Feature::Junction 0.003008 Syntax::Keyword::Junction 0.003008 Syntax::Keyword::Junction::All 0.003008 Syntax::Keyword::Junction::Any 0.003008 Syntax::Keyword::Junction::Base 0.003008 Syntax::Keyword::Junction::None 0.003008 Syntax::Keyword::Junction::One 0.003008 Sys::SigAction 0.23 Sys::SigAction::Alarm unknown TAP::Base 3.42 TAP::Formatter::Base 3.42 TAP::Formatter::Color 3.42 TAP::Formatter::Console 3.42 TAP::Formatter::Console::ParallelSession 3.42 TAP::Formatter::Console::Session 3.42 TAP::Formatter::File 3.42 TAP::Formatter::File::Session 3.42 TAP::Formatter::Session 3.42 TAP::Harness 3.42 TAP::Harness::Env 3.42 TAP::Object 3.42 TAP::Parser 3.42 TAP::Parser::Aggregator 3.42 TAP::Parser::Grammar 3.42 TAP::Parser::Iterator 3.42 TAP::Parser::Iterator::Array 3.42 TAP::Parser::Iterator::Process 3.42 TAP::Parser::Iterator::Stream 3.42 TAP::Parser::IteratorFactory 3.42 TAP::Parser::Multiplexer 3.42 TAP::Parser::Result 3.42 TAP::Parser::Result::Bailout 3.42 TAP::Parser::Result::Comment 3.42 TAP::Parser::Result::Plan 3.42 TAP::Parser::Result::Pragma 3.42 TAP::Parser::Result::Test 3.42 TAP::Parser::Result::Unknown 3.42 TAP::Parser::Result::Version 3.42 TAP::Parser::Result::YAML 3.42 TAP::Parser::ResultFactory 3.42 TAP::Parser::Scheduler 3.42 TAP::Parser::Scheduler::Job 3.42 TAP::Parser::Scheduler::Spinner 3.42 TAP::Parser::Source 3.42 TAP::Parser::SourceHandler 3.42 TAP::Parser::SourceHandler::Executable 3.42 TAP::Parser::SourceHandler::File 3.42 TAP::Parser::SourceHandler::Handle 3.42 TAP::Parser::SourceHandler::Perl 3.42 TAP::Parser::SourceHandler::RawTAP 3.42 TAP::Parser::YAMLish::Reader 3.42 TAP::Parser::YAMLish::Writer 3.42 Task::Catalyst 4.02 Task::Kensho 0.39 Task::Kensho::Async 0.39 Task::Kensho::CLI 0.39 Task::Kensho::Config 0.39 Task::Kensho::Dates 0.39 Task::Kensho::DBDev 0.39 Task::Kensho::Email 0.39 Task::Kensho::ExcelCSV 0.39 Task::Kensho::Exceptions 0.39 Task::Kensho::Hackery 0.39 Task::Kensho::Logging 0.39 Task::Kensho::ModuleDev 0.39 Task::Kensho::OOP 0.39 Task::Kensho::Scalability 0.39 Task::Kensho::Testing 0.39 Task::Kensho::Toolchain 0.39 Task::Kensho::WebCrawling 0.39 Task::Kensho::WebDev 0.39 Task::Kensho::XML 0.39 Task::Moose 0.03 Task::Weaken 1.06 Template 2.27 Template::Base 2.78 Template::Config 2.75 Template::Constants 2.75 Template::Context 2.98 Template::Directive 2.2 Template::Document 2.79 Template::Exception 2.7 Template::Filters 2.87 Template::Grammar 2.26 Template::Iterator 2.68 Template::Namespace::Constants 1.27 Template::Parser 2.89 Template::Plugin 2.7 Template::Plugin::Assert 1 Template::Plugin::CGI 2.7 Template::Plugin::Datafile 2.72 Template::Plugin::Date 2.78 Template::Plugin::Directory 2.7 Template::Plugin::Dumper 2.7 Template::Plugin::File 2.71 Template::Plugin::Filter 1.38 Template::Plugin::Format 2.7 Template::Plugin::HTML 2.62 Template::Plugin::Image 1.21 Template::Plugin::Iterator 2.68 Template::Plugin::Math 1.16 Template::Plugin::Pod 2.69 Template::Plugin::Procedural 1.17 Template::Plugin::Scalar 1 Template::Plugin::String 2.4 Template::Plugin::Table 2.71 Template::Plugin::URL 2.74 Template::Plugin::View 2.68 Template::Plugin::Wrap 2.68 Template::Plugins 2.77 Template::Provider 2.94 Template::Service 2.8 Template::Stash 2.91 Template::Stash::Context 1.63 Template::Stash::XS unknown Template::Test 2.75 Template::Timer 1.00 Template::Tiny 1.12 Template::Toolkit unknown Template::View 2.91 Template::VMethods 2.16 Term::Encoding 0.02 Term::ProgressBar 2.22 Term::ProgressBar::IO 2.22 Term::ProgressBar::Quiet 0.31 Term::ProgressBar::Simple 0.03 Term::ReadKey 2.37 Term::ReadLine::Perl5 1.45 Term::ReadLine::Perl5::Common unknown Term::ReadLine::Perl5::Dumb unknown Term::ReadLine::Perl5::History unknown Term::ReadLine::Perl5::Keymap unknown Term::ReadLine::Perl5::OO 0.43 Term::ReadLine::Perl5::OO::History unknown Term::ReadLine::Perl5::OO::Keymap unknown Term::ReadLine::Perl5::OO::State unknown Term::ReadLine::Perl5::readline 1.45 Term::ReadLine::Perl5::TermCap unknown Term::ReadLine::Perl5::Tie 1.45 Term::Size 0.207 Term::Table 0.012 Term::Table::Cell 0.012 Term::Table::CellStack 0.012 Term::Table::HashBase 0.003 Term::Table::LineBreak 0.012 Term::Table::Spacer 0.012 Term::Table::Util 0.012 Term::UI 0.46 Term::UI::History 0.46 Test2 1.302138 Test2::API 1.302138 Test2::API::Breakage 1.302138 Test2::API::Context 1.302138 Test2::API::Instance 1.302138 Test2::API::Stack 1.302138 Test2::AsyncSubtest 0.000115 Test2::AsyncSubtest::Event::Attach 0.000115 Test2::AsyncSubtest::Event::Detach 0.000115 Test2::AsyncSubtest::Formatter 0.000115 Test2::AsyncSubtest::Hub 0.000115 Test2::Bundle 0.000115 Test2::Bundle::Extended 0.000115 Test2::Bundle::More 0.000115 Test2::Bundle::Simple 0.000115 Test2::Compare 0.000115 Test2::Compare::Array 0.000115 Test2::Compare::Bag 0.000115 Test2::Compare::Base 0.000115 Test2::Compare::Bool 0.000115 Test2::Compare::Custom 0.000115 Test2::Compare::DeepRef 0.000115 Test2::Compare::Delta 0.000115 Test2::Compare::Event 0.000115 Test2::Compare::EventMeta 0.000115 Test2::Compare::Float 0.000115 Test2::Compare::Hash 0.000115 Test2::Compare::Meta 0.000115 Test2::Compare::Negatable 0.000115 Test2::Compare::Number 0.000115 Test2::Compare::Object 0.000115 Test2::Compare::OrderedSubset 0.000115 Test2::Compare::Pattern 0.000115 Test2::Compare::Ref 0.000115 Test2::Compare::Regex 0.000115 Test2::Compare::Scalar 0.000115 Test2::Compare::Set 0.000115 Test2::Compare::String 0.000115 Test2::Compare::Undef 0.000115 Test2::Compare::Wildcard 0.000115 Test2::Event 1.302138 Test2::Event::Bail 1.302138 Test2::Event::Diag 1.302138 Test2::Event::Encoding 1.302138 Test2::Event::Exception 1.302138 Test2::Event::Fail 1.302138 Test2::Event::Generic 1.302138 Test2::Event::Note 1.302138 Test2::Event::Ok 1.302138 Test2::Event::Pass 1.302138 Test2::Event::Plan 1.302138 Test2::Event::Skip 1.302138 Test2::Event::Subtest 1.302138 Test2::Event::TAP::Version 1.302138 Test2::Event::Times 0.000115 Test2::Event::V2 1.302138 Test2::Event::Waiting 1.302138 Test2::Event::Warning 0.06 Test2::EventFacet 1.302138 Test2::EventFacet::About 1.302138 Test2::EventFacet::Amnesty 1.302138 Test2::EventFacet::Assert 1.302138 Test2::EventFacet::Control 1.302138 Test2::EventFacet::Error 1.302138 Test2::EventFacet::Hub 1.302138 Test2::EventFacet::Info 1.302138 Test2::EventFacet::Meta 1.302138 Test2::EventFacet::Parent 1.302138 Test2::EventFacet::Plan 1.302138 Test2::EventFacet::Render 1.302138 Test2::EventFacet::Trace 1.302138 Test2::Formatter 1.302138 Test2::Formatter::TAP 1.302138 Test2::Hub 1.302138 Test2::Hub::Interceptor 1.302138 Test2::Hub::Interceptor::Terminator 1.302138 Test2::Hub::Subtest 1.302138 Test2::IPC 1.302138 Test2::IPC::Driver 1.302138 Test2::IPC::Driver::Files 1.302138 Test2::Manual 0.000115 Test2::Manual::Anatomy 0.000115 Test2::Manual::Anatomy::API 0.000115 Test2::Manual::Anatomy::Context 0.000115 Test2::Manual::Anatomy::EndToEnd 0.000115 Test2::Manual::Anatomy::Event 0.000115 Test2::Manual::Anatomy::Hubs 0.000115 Test2::Manual::Anatomy::IPC 0.000115 Test2::Manual::Anatomy::Utilities 0.000115 Test2::Manual::Contributing 0.000115 Test2::Manual::Testing 0.000115 Test2::Manual::Testing::Introduction 0.000115 Test2::Manual::Testing::Migrating 0.000115 Test2::Manual::Testing::Planning 0.000115 Test2::Manual::Testing::Todo 0.000115 Test2::Manual::Tooling 0.000115 Test2::Manual::Tooling::FirstTool 0.000115 Test2::Manual::Tooling::Formatter 0.000115 Test2::Manual::Tooling::Nesting 0.000115 Test2::Manual::Tooling::Plugin::TestExit 0.000115 Test2::Manual::Tooling::Plugin::TestingDone 0.000115 Test2::Manual::Tooling::Plugin::ToolCompletes 0.000115 Test2::Manual::Tooling::Plugin::ToolStarts 0.000115 Test2::Manual::Tooling::Subtest 0.000115 Test2::Manual::Tooling::TestBuilder 0.000115 Test2::Manual::Tooling::Testing 0.000115 Test2::Mock 0.000115 Test2::Plugin 0.000115 Test2::Plugin::BailOnFail 0.000115 Test2::Plugin::DieOnFail 0.000115 Test2::Plugin::ExitSummary 0.000115 Test2::Plugin::NoWarnings 0.06 Test2::Plugin::SRand 0.000115 Test2::Plugin::Times 0.000115 Test2::Plugin::UTF8 0.000115 Test2::Require 0.000115 Test2::Require::AuthorTesting 0.000115 Test2::Require::EnvVar 0.000115 Test2::Require::Fork 0.000115 Test2::Require::Module 0.000115 Test2::Require::Perl 0.000115 Test2::Require::RealFork 0.000115 Test2::Require::Threads 0.000115 Test2::Suite 0.000115 Test2::Todo 0.000115 Test2::Tools 0.000115 Test2::Tools::AsyncSubtest 0.000115 Test2::Tools::Basic 0.000115 Test2::Tools::Class 0.000115 Test2::Tools::ClassicCompare 0.000115 Test2::Tools::Compare 0.000115 Test2::Tools::Defer 0.000115 Test2::Tools::Encoding 0.000115 Test2::Tools::Event 0.000115 Test2::Tools::Exception 0.000115 Test2::Tools::Exports 0.000115 Test2::Tools::GenTemp 0.000115 Test2::Tools::Grab 0.000115 Test2::Tools::Mock 0.000115 Test2::Tools::Ref 0.000115 Test2::Tools::Spec 0.000115 Test2::Tools::Subtest 0.000115 Test2::Tools::Target 0.000115 Test2::Tools::Tester 0.000115 Test2::Tools::Tiny 1.302138 Test2::Tools::Warnings 0.000115 Test2::Util 1.302138 Test2::Util::ExternalMeta 1.302138 Test2::Util::Facets2Legacy 1.302138 Test2::Util::Grabber 0.000115 Test2::Util::HashBase 1.302138 Test2::Util::Ref 0.000115 Test2::Util::Stash 0.000115 Test2::Util::Sub 0.000115 Test2::Util::Table 0.000115 Test2::Util::Table::Cell 0.000115 Test2::Util::Table::LineBreak 0.000115 Test2::Util::Term 0.000115 Test2::Util::Times 0.000115 Test2::Util::Trace 1.302138 Test2::V0 0.000115 Test2::Workflow 0.000115 Test2::Workflow::BlockBase 0.000115 Test2::Workflow::Build 0.000115 Test2::Workflow::Runner 0.000115 Test2::Workflow::Task 0.000115 Test2::Workflow::Task::Action 0.000115 Test2::Workflow::Task::Group 0.000115 Test::Assert 0.0504 Test::Base 0.89 Test::Base::Filter unknown Test::Builder 1.302138 Test::Builder::Formatter 1.302138 Test::Builder::IO::Scalar 2.114 Test::Builder::Module 1.302138 Test::Builder::Tester 1.302138 Test::Builder::Tester::Color 1.302138 Test::Builder::TodoDiag 1.302138 Test::Class 0.50 Test::Class::Load 0.50 Test::Class::MethodInfo 0.50 Test::Class::Moose 0.92 Test::Class::Moose::AttributeRegistry 0.92 Test::Class::Moose::CLI 0.92 Test::Class::Moose::Config 0.92 Test::Class::Moose::Deprecated 0.92 Test::Class::Moose::Executor::Parallel 0.92 Test::Class::Moose::Executor::Sequential 0.92 Test::Class::Moose::Load 0.92 Test::Class::Moose::Report 0.92 Test::Class::Moose::Report::Class 0.92 Test::Class::Moose::Report::Instance 0.92 Test::Class::Moose::Report::Method 0.92 Test::Class::Moose::Report::Time 0.92 Test::Class::Moose::Role 0.92 Test::Class::Moose::Role::AutoUse 0.92 Test::Class::Moose::Role::CLI 0.92 Test::Class::Moose::Role::Executor 0.92 Test::Class::Moose::Role::HasTimeReport 0.92 Test::Class::Moose::Role::ParameterizedInstances 0.92 Test::Class::Moose::Role::Reporting 0.92 Test::Class::Moose::Runner 0.92 Test::Class::Moose::Tutorial 0.92 Test::Class::Moose::Util 0.92 Test::CleanNamespaces 0.23 Test::CPAN::Changes 0.400002 Test::CPAN::Meta 0.25 Test::CPAN::Meta::Version 0.25 Test::Deep 1.128 Test::Deep::All unknown Test::Deep::Any unknown Test::Deep::Array unknown Test::Deep::ArrayEach unknown Test::Deep::ArrayElementsOnly unknown Test::Deep::ArrayLength unknown Test::Deep::ArrayLengthOnly unknown Test::Deep::Blessed unknown Test::Deep::Boolean unknown Test::Deep::Cache unknown Test::Deep::Cache::Simple unknown Test::Deep::Class unknown Test::Deep::Cmp unknown Test::Deep::Code unknown Test::Deep::Hash unknown Test::Deep::HashEach unknown Test::Deep::HashElements unknown Test::Deep::HashKeys unknown Test::Deep::HashKeysOnly unknown Test::Deep::Ignore unknown Test::Deep::Isa unknown Test::Deep::JSON 0.05 Test::Deep::ListMethods unknown Test::Deep::Methods unknown Test::Deep::MM unknown Test::Deep::None unknown Test::Deep::NoTest unknown Test::Deep::Number unknown Test::Deep::Obj unknown Test::Deep::Ref unknown Test::Deep::RefType unknown Test::Deep::Regexp unknown Test::Deep::RegexpMatches unknown Test::Deep::RegexpOnly unknown Test::Deep::RegexpRef unknown Test::Deep::RegexpRefOnly unknown Test::Deep::RegexpVersion unknown Test::Deep::ScalarRef unknown Test::Deep::ScalarRefOnly unknown Test::Deep::Set unknown Test::Deep::Shallow unknown Test::Deep::Stack unknown Test::Deep::String unknown Test::Deep::Type 0.008 Test::Differences 0.64 Test::EOL 2.00 Test::Exception 0.43 Test::Expect 0.34 Test::FailWarnings 0.008 Test::Fatal 0.014 Test::File 1.443 Test::File::ShareDir 1.001002 Test::File::ShareDir::Dist 1.001002 Test::File::ShareDir::Module 1.001002 Test::File::ShareDir::Object::Dist 1.001002 Test::File::ShareDir::Object::Inc 1.001002 Test::File::ShareDir::Object::Module 1.001002 Test::File::ShareDir::TempDirObject 1.001002 Test::File::ShareDir::Utils 1.001002 Test::Fork 0.02 Test::Future 0.38 Test::Harness 3.42 Test::Identity 0.01 Test::JSON 0.11 Test::LeakTrace 0.16 Test::LeakTrace::Script unknown Test::LongString 0.17 Test::LWP::UserAgent 0.033 Test::Memory::Cycle 1.06 Test::Mock::HTTP::Request 0.01 Test::Mock::HTTP::Response 0.01 Test::Mock::LWP 0.08 Test::Mock::LWP::UserAgent 0.01 Test::MockModule 0.15 Test::MockObject 1.20180705 Test::MockObject::Extends 1.20180705 Test::MockTime 0.17 Test::Mojo unknown Test::Moose 2.2011 Test::MooseX::Daemonize 0.21 Test::More 1.302138 Test::More::UTF8 0.05 Test::Most 0.35 Test::Most::Exception 0.35 Test::Needs 0.002005 Test::NoWarnings 1.04 Test::NoWarnings::Warning 1.04 Test::Number::Delta 1.06 Test::Object 0.08 Test::Object::Test 0.08 Test::Output 1.031 Test::Perl::Critic::Policy 1.132 Test::Pod 1.52 Test::Pod::Content unknown Test::Pod::Coverage 1.10 Test::RDF::Trine::Store 1.019 Test::Refcount 0.08 Test::Requires 0.10 Test::RequiresInternet 0.05 Test::SharedFork 0.35 Test::SharedFork::Array unknown Test::SharedFork::Scalar unknown Test::SharedFork::Store unknown Test::Simple 1.302138 Test::Spec 0.54 Test::Spec::Context unknown Test::Spec::Example unknown Test::Spec::Mocks unknown Test::Spec::SharedHash unknown Test::Spec::TodoExample unknown Test::Specio 0.42 Test::SQL::Translator 1.59 Test::SubCalls 1.10 Test::TCP 2.19 Test::TCP::CheckPort unknown Test::TempDir::Tiny 0.018 Test::Tester 1.302138 Test::Tester::Capture 1.302138 Test::Tester::CaptureRunner 1.302138 Test::Tester::Delegate 1.302138 Test::Time 0.06 Test::Toolbox 0.4 Test::Trap unknown Test::Trap::Builder unknown Test::Trap::Builder::PerlIO unknown Test::Trap::Builder::SystemSafe unknown Test::Trap::Builder::TempFile unknown Test::TypeTiny 1.002002 Test::Unit::Lite 0.1202 Test::use::ok 1.302138 Test::utf8 1.01 Test::Warn 0.36 Test::Warnings 0.026 Test::Without::Module 0.20 Test::WWW::Mechanize 1.50 Test::WWW::Mechanize::Catalyst 0.60 Test::WWW::Mechanize::PSGI 0.38 Test::WWW::Selenium 1.36 Test::YAML 1.07 Text::Aligner 0.13 Text::Autoformat 1.74 Text::Autoformat::Hang 1.74 Text::Autoformat::NullHang 1.74 Text::CSV 1.95 Text::CSV_PP 1.95 Text::CSV_XS 1.36 Text::Diff 1.45 Text::Diff::Config 1.44 Text::Diff::Table 1.44 Text::Format 0.61 Text::German 0.06 Text::German::Adjektiv unknown Text::German::Ausnahme unknown Text::German::Cache unknown Text::German::Endung unknown Text::German::Regel unknown Text::German::Util unknown Text::German::Verb unknown Text::German::Vorsilbe unknown Text::Glob 0.11 Text::LineFold 2016.00702 Text::Reform 1.20 Text::SimpleTable 2.05 Text::Table 1.133 Text::Template 1.53 Text::Template::Preprocess 1.53 Text::Unidecode 1.30 Text::VisualWidth::PP 0.05 threads::shared::array 0.36 threads::shared::handle 0.36 threads::shared::hash 0.36 threads::shared::scalar 0.36 Throwable 0.200013 Throwable::Error 0.200013 Tie::Handle::Offset 0.004 Tie::Handle::SkipHeader 0.004 Tie::Hash::MultiValue 1.05 Tie::IxHash 1.23 Tie::ToObject 0.03 Tie::Watch 1.302 Time::CTime 2011.0505 Time::DaysInMonth 99.1117 Time::Duration 1.20 Time::Duration::Parse 0.14 Time::HiRes 1.9758 Time::JulianDay 2011.0505 Time::ParseDate 2015.103 Time::Piece 1.3204 Time::Seconds 1.3204 Time::Timezone 2015.0925 Time::Tiny 1.08 Time::Zone 2.24 Tk 804.034 Tk::Adjuster 4.008 Tk::After 4.008 Tk::Animation 4.008 Tk::Balloon 4.012 Tk::Bitmap 4.004 Tk::BrowseEntry 4.015 Tk::Button 4.010 Tk::Canvas 4.013 Tk::Checkbutton 4.006 Tk::Clipboard 4.009 Tk::CmdLine 4.007 Tk::ColorDialog 4.014 Tk::ColorEditor 4.014 Tk::ColorSelect 4.014 Tk::Compound 4.004 Tk::Config 804.034 Tk::Configure 4.009 Tk::Derived 4.011 Tk::Dialog 4.005 Tk::DialogBox 4.016 Tk::Dirlist 4.004 Tk::DirTree 4.022 Tk::DragDrop 4.015 Tk::DragDrop::Common 4.005 Tk::DragDrop::Local 4.004 Tk::DragDrop::Rect 4.012 Tk::DragDrop::SunConst 4.004 Tk::DragDrop::SunDrop 4.006 Tk::DragDrop::SunSite 4.007 Tk::DragDrop::XDNDDrop 4.007 Tk::DragDrop::XDNDSite 4.007 Tk::DropSite 4.008 Tk::DummyEncode 4.007 Tk::English 4.006 Tk::Entry 4.018 Tk::ErrorDialog 4.007 Tk::Event 4.035 Tk::Event::IO 4.009 Tk::FBox 4.018 Tk::FileSelect 4.018 Tk::FloatEntry 4.004 Tk::Font 4.004 Tk::Frame 4.010 Tk::HList 4.015 Tk::IconList 4.007 Tk::Image 4.011 Tk::InputO 4.004 Tk::install 4.004 Tk::IO 4.006 Tk::ItemStyle 4.004 Tk::JPEG 4.003 Tk::Label 4.006 Tk::LabeledEntryLabeledRadiobutton 4.004 Tk::Labelframe 4.003 Tk::LabEntry 4.006 Tk::LabFrame 4.010 Tk::LabRadiobutton 4.004 Tk::Listbox 4.015 Tk::MainWindow 4.015 Tk::MakeDepend 4.015 Tk::Menu 4.023 Tk::Menu::Item 4.005 Tk::Menubar 4.006 Tk::Menubutton 4.005 Tk::Message 4.006 Tk::MMtry 4.009 Tk::MMutil 4.026 Tk::MsgBox 4.002 Tk::Mwm 4.004 Tk::NBFrame 4.004 Tk::NoteBook 4.009 Tk::Optionmenu 4.014 Tk::Pane 4.007 Tk::Panedwindow 4.004 Tk::Photo 4.006 Tk::Pixmap 4.004 Tk::PNG 4.004 Tk::Pretty 4.006 Tk::ProgressBar 4.015 Tk::Radiobutton 4.006 Tk::Region 4.006 Tk::Reindex 4.006 Tk::ReindexedROText 4.004 Tk::ReindexedText 4.004 Tk::ROText 4.010 Tk::Scale 4.004 Tk::Scrollbar 4.010 Tk::Spinbox 4.007 Tk::Stats 4.004 Tk::Submethods 4.005 Tk::Table 4.016 Tk::Text 4.024 Tk::Text::Tag 4.004 Tk::TextEdit 4.004 Tk::TextList 4.006 Tk::TextUndo 4.015 Tk::Tiler 4.012 Tk::TixGrid 4.010 Tk::TList 4.006 Tk::Toplevel 4.006 Tk::Trace 4.009 Tk::Tree 4.72 Tk::Widget 4.036 Tk::widgets 4.005 Tk::WinPhoto 4.005 Tk::Wm 4.015 Tk::X 4.005 Tk::X11Font 4.007 Tk::Xlib 4.004 Tk::Xrm 4.005 Tree::DAG_Node 1.31 Tree::Simple 1.33 Tree::Simple::Visitor 1.33 Tree::Simple::Visitor::BreadthFirstTraversal 0.15 Tree::Simple::Visitor::CreateDirectoryTree 0.15 Tree::Simple::Visitor::FindByNodeValue 0.15 Tree::Simple::Visitor::FindByPath 0.15 Tree::Simple::Visitor::FindByUID 0.15 Tree::Simple::Visitor::FromNestedArray 0.15 Tree::Simple::Visitor::FromNestedHash 0.15 Tree::Simple::Visitor::GetAllDescendents 0.15 Tree::Simple::Visitor::LoadClassHierarchy 0.15 Tree::Simple::Visitor::LoadDirectoryTree 0.15 Tree::Simple::Visitor::PathToRoot 0.15 Tree::Simple::Visitor::PostOrderTraversal 0.15 Tree::Simple::Visitor::PreOrderTraversal 0.15 Tree::Simple::Visitor::Sort 0.15 Tree::Simple::Visitor::ToNestedArray 0.15 Tree::Simple::Visitor::ToNestedHash 0.15 Tree::Simple::Visitor::VariableDepthClone 0.15 Tree::Simple::VisitorFactory 0.15 Try::Tiny 0.30 TryCatch 1.003002 Type::Coercion 1.002002 Type::Coercion::FromMoose 1.002002 Type::Coercion::Union 1.002002 Type::Library 1.002002 Type::Params 1.002002 Type::Parser 1.002002 Type::Registry 1.002002 Type::Tiny 1.002002 Type::Tiny::_HalfOp 1.002002 Type::Tiny::Class 1.002002 Type::Tiny::Duck 1.002002 Type::Tiny::Enum 1.002002 Type::Tiny::Intersection 1.002002 Type::Tiny::Role 1.002002 Type::Tiny::Union 1.002002 Type::Utils 1.002002 Types::Common::Numeric 1.002002 Types::Common::String 1.002002 Types::Serialiser 1.0 Types::Standard 1.002002 Types::Standard::ArrayRef 1.002002 Types::Standard::CycleTuple 1.002002 Types::Standard::Dict 1.002002 Types::Standard::HashRef 1.002002 Types::Standard::Map 1.002002 Types::Standard::ScalarRef 1.002002 Types::Standard::Tuple 1.002002 Types::TypeTiny 1.002002 Unicode::CharName 0.00 Unicode::EastAsianWidth 1.33 Unicode::EastAsianWidth::Detect 0.03 Unicode::GCString 2013.10 Unicode::LineBreak 2018.003 Unicode::LineBreak unknown Unicode::Map 0.112 Unicode::Map8 0.13 Unicode::String 2.10 UNIVERSAL::can 1.20140328 UNIVERSAL::isa 1.20171012 UNIVERSAL::require 0.18 URI 1.74 URI::_foreign 1.74 URI::_generic 1.74 URI::_idna 1.74 URI::_ldap 1.74 URI::_login 1.74 URI::_punycode 1.74 URI::_query 1.74 URI::_segment 1.74 URI::_server 1.74 URI::_userpass 1.74 URI::data 1.74 URI::Escape 3.31 URI::file 4.21 URI::file::Base 1.74 URI::file::FAT 1.74 URI::file::Mac 1.74 URI::file::OS2 1.74 URI::file::QNX 1.74 URI::file::Unix 1.74 URI::file::Win32 1.74 URI::Find 20160806 URI::Find::Schemeless 20160806 URI::ftp 1.74 URI::gopher 1.74 URI::Heuristic 4.20 URI::http 1.74 URI::https 1.74 URI::IRI 1.74 URI::ldap 1.74 URI::ldapi 1.74 URI::ldaps 1.74 URI::mailto 1.74 URI::mms 1.74 URI::news 1.74 URI::nntp 1.74 URI::pop 1.74 URI::QueryParam 1.74 URI::rlogin 1.74 URI::rsync 1.74 URI::rtsp 1.74 URI::rtspu 1.74 URI::sftp 1.74 URI::sip 1.74 URI::sips 1.74 URI::snews 1.74 URI::Split 1.74 URI::ssh 1.74 URI::telnet 1.74 URI::tn3270 1.74 URI::URL 5.04 URI::urn 1.74 URI::isbn 1.74 URI::oid 1.74 URI::WithBase 2.20 URI::ws 0.03 URI::wss 0.03 UUID::Tiny 1.04 Variable::Magic 0.62 Want 0.29 WidgetDemo 4.012 Win32::ShellQuote 0.003001 WWW::Form::UrlEncoded 0.24 WWW::Form::UrlEncoded::PP unknown WWW::Mechanize 1.88 WWW::Mechanize::Image 1.88 WWW::Mechanize::Link 1.88 WWW::Mechanize::TreeBuilder 1.20000 WWW::Pastebin::PastebinCom::Create 1.003 WWW::RobotRules 6.02 WWW::RobotRules::AnyDBM_File 6.00 WWW::Selenium 1.36 WWW::Selenium::Util 1.36 XML::Atom 0.42 XML::Atom::Base unknown XML::Atom::Category unknown XML::Atom::Client unknown XML::Atom::Content unknown XML::Atom::Entry unknown XML::Atom::ErrorHandler unknown XML::Atom::Feed unknown XML::Atom::Link unknown XML::Atom::Person unknown XML::Atom::Server unknown XML::Atom::Thing unknown XML::Atom::Util unknown XML::CommonNS 0.06 XML::Compile 1.60 XML::Compile::Iterator 1.60 XML::Compile::Schema 1.60 XML::Compile::Schema::BuiltInFacets 1.60 XML::Compile::Schema::BuiltInTypes 1.60 XML::Compile::Schema::Instance 1.60 XML::Compile::Schema::NameSpaces 1.60 XML::Compile::Schema::Specs 1.60 XML::Compile::Tester 0.91 XML::Compile::Translate 1.60 XML::Compile::Translate::Reader 1.60 XML::Compile::Translate::Template 1.60 XML::Compile::Translate::Writer 1.60 XML::Compile::Util 1.60 XML::DOM 1.46 XML::DOM::DOMException unknown XML::DOM::NamedNodeMap unknown XML::DOM::NodeList unknown XML::DOM::PerlSAX unknown XML::DOM::XPath 0.14 XML::ESISParser 0.08 XML::Filter::BufferText 1.01 XML::Generator::PerlData 0.95 XML::Handler::BuildDOM unknown XML::Handler::CanonXMLWriter 0.08 XML::Handler::Sample unknown XML::Handler::Subs 0.08 XML::Handler::XMLWriter 0.08 XML::LibXML 2.0132 XML::LibXML::AttributeHash 2.0132 XML::LibXML::Boolean 2.0132 XML::LibXML::Common 2.0132 XML::LibXML::Devel 2.0132 XML::LibXML::ErrNo 2.0132 XML::LibXML::Error 2.0132 XML::LibXML::Literal 2.0132 XML::LibXML::NodeList 2.0132 XML::LibXML::Number 2.0132 XML::LibXML::Reader 2.0132 XML::LibXML::SAX 2.0132 XML::LibXML::SAX::Builder 2.0132 XML::LibXML::SAX::Generator 2.0132 XML::LibXML::SAX::Parser 2.0132 XML::LibXML::Simple 0.99 XML::LibXML::XPathContext 2.0132 XML::LibXSLT 1.96 XML::Namespace 0.02 XML::NamespaceFactory 1.02 XML::NamespaceSupport 1.12 XML::Parser 2.44 XML::Parser::Expat 2.44 XML::Parser::Lite 0.721 XML::Parser::PerlSAX 0.08 XML::Parser::Style::Debug unknown XML::Parser::Style::Objects unknown XML::Parser::Style::Stream unknown XML::Parser::Style::Subs unknown XML::Parser::Style::Tree unknown XML::PatAct::ACTION unknown XML::PatAct::Amsterdam 0.08 XML::PatAct::MatchName 0.08 XML::PatAct::PATTERN unknown XML::PatAct::ToObjects 0.08 XML::Perl2SAX 0.08 XML::RegExp 0.04 XML::RSS 1.60 XML::RSS::Private::Output::Base 1.60 XML::RSS::Private::Output::Roles::ImageDims 1.60 XML::RSS::Private::Output::Roles::ModulesElems 1.60 XML::RSS::Private::Output::V0_9 1.60 XML::RSS::Private::Output::V0_91 1.60 XML::RSS::Private::Output::V1_0 1.60 XML::RSS::Private::Output::V2_0 1.60 XML::SAX 1.00 XML::SAX2Perl 0.08 XML::SAX::Base 1.09 XML::SAX::DocumentLocator unknown XML::SAX::Exception 1.09 XML::SAX::Expat 0.51 XML::SAX::ParserFactory 1.01 XML::SAX::PurePerl 1.00 XML::SAX::PurePerl unknown XML::SAX::PurePerl unknown XML::SAX::PurePerl unknown XML::SAX::PurePerl unknown XML::SAX::PurePerl unknown XML::SAX::PurePerl unknown XML::SAX::PurePerl::DebugHandler unknown XML::SAX::PurePerl::Exception unknown XML::SAX::PurePerl::Productions unknown XML::SAX::PurePerl::Reader unknown XML::SAX::PurePerl::Reader unknown XML::SAX::PurePerl::Reader unknown XML::SAX::PurePerl::Reader::Stream unknown XML::SAX::PurePerl::Reader::String unknown XML::SAX::PurePerl::Reader::URI unknown XML::SAX::Writer 0.57 XML::SAX::Writer::XML 0.57 XML::Simple 2.25 XML::Twig 3.52 XML::Twig::XPath 0.02 XML::Writer 0.625 XML::XPath 1.42 XML::XPath::Boolean 1.42 XML::XPath::Builder 1.42 XML::XPath::Expr 1.42 XML::XPath::Function 1.42 XML::XPath::Literal 1.42 XML::XPath::LocationPath 1.42 XML::XPath::Node 1.42 XML::XPath::Node::Attribute 1.42 XML::XPath::Node::Comment 1.42 XML::XPath::Node::Element 1.42 XML::XPath::Node::Namespace 1.42 XML::XPath::Node::PI 1.42 XML::XPath::Node::Text 1.42 XML::XPath::NodeSet 1.42 XML::XPath::Number 1.42 XML::XPath::Parser 1.42 XML::XPath::PerlSAX 1.42 XML::XPath::Root 1.42 XML::XPath::Step 1.42 XML::XPath::Variable 1.42 XML::XPath::XMLParser 1.42 XML::XPathEngine 0.14 XML::XPathEngine::Boolean unknown XML::XPathEngine::Expr unknown XML::XPathEngine::Function unknown XML::XPathEngine::Literal unknown XML::XPathEngine::LocationPath unknown XML::XPathEngine::NodeSet unknown XML::XPathEngine::Number unknown XML::XPathEngine::Root unknown XML::XPathEngine::Step 1.0 XML::XPathEngine::Variable unknown XS unknown YAML 1.26 YAML::Any 1.26 YAML::Dumper unknown YAML::Dumper::Base unknown YAML::Error unknown YAML::LibYAML 0.72 YAML::Loader unknown YAML::Loader::Base unknown YAML::Marshall unknown YAML::Mo unknown YAML::Node unknown YAML::Tag unknown YAML::Tiny 1.73 YAML::Types unknown YAML::XS 0.72 YAML::XS::LibYAML unknown Include path (INC) directories Searched Number of Modules /sw/comp/perl_modules/5.26.2/rackham/lib/perl5/5.26.2/x86_64-linux-thread-multi yes 0 /sw/comp/perl_modules/5.26.2/rackham/lib/perl5/5.26.2 yes 0 /sw/comp/perl_modules/5.26.2/rackham/lib/perl5/x86_64-linux-thread-multi yes 1055 /sw/comp/perl_modules/5.26.2/rackham/lib/perl5 yes 5989 /sw/comp/perl/5.26.2/rackham/lib/5.26.2/x86_64-linux-thread-multi no unknown /sw/comp/perl/5.26.2/rackham/lib/5.26.2 no unknown /sw/comp/perl/5.26.2/rackham/lib no unknown /sw/comp/perl/5.26.2/rackham/lib/site_perl/5.26.2/x86_64-linux-thread-multi no unknown /sw/comp/perl/5.26.2/rackham/lib/site_perl/5.26.2 no unknown
  • Total modules : 5989
"},{"location":"software/perl_packages/","title":"How do I install local Perl packages?","text":""},{"location":"software/perl_packages/#what-is-available-already-in-the-perl-modules","title":"What is available already in the perl modules?","text":"
  • A number of packages are available by default with all Perl versions.

  • For Perl version 5.18.4 in particular (available through the software module system as perl/5.18.4), we have installed many more Perl packages. These are available by loading the software module perl_modules/5.18.4. We have a complete list of the Perl packages available.

  • If you would like to use BioPerl, module avail BioPerl after loading bioinfo-tools will show the versions available. The latest is BioPerl/1.6.924_Perl5.18.4, which is built against Perl 5.18.4 so also loads the modules perl/5.18.4 and perl_modules/5.18.4.

"},{"location":"software/perl_packages/#install-other-packages","title":"Install other packages","text":"

You could email support at support@uppmax.uu.se and suggest we include the package in perl_modules. If that doesn't work, or you decide to install it for yourself, please keep reading.

First you have to decide where you want to put your local Perl packages. Save this in a temporary environment variable called MY_PERL, make sure to substitute the path with your own:

export MY_PERL=/home/johanhe/slask/perl/\n

Then we download and install a more light weight CPAN client called cpanm, which have less confusing settings to configure and also makes it easier to install local packages. We'll then also install the module local::lib to a directory of your choice:

wget -O- http://cpanmin.us | perl - -l $MY_PERL App::cpanminus local::lib\n

Now we should be ready to set up the correct environment variables and load them for this session:

echo \"eval `perl -I $MY_PERL/lib/perl5 -Mlocal::lib=$MY_PERL`\" >> ~/.bash_profile \necho \"export PATH=$MY_PERL/bin/:$PATH\" >> ~/.bash_profile \nsource ~/.bash_profile\n

After this is done we can always install local packages easily by using the command:

bash cpanm [name-of-package-to-install]bash

"},{"location":"software/picard/","title":"Picard","text":"

'Picard is a set of command line tools for manipulating high-throughput sequencing (HTS) data and formats such as SAM/BAM/CRAM and VCF' (source: the Picard documentation).

"},{"location":"software/picard/#usage","title":"Usage","text":"

Load the bioinfo-tools module first:

module load bioinfo-tools\n

Then search for you favorite Picard version:

module spider picard\n
How does this look like?

Your output will be similar to this:

[sven@rackham2 ~]$ module spider picard\n\n----------------------------------------------------------------------------\n  picard:\n----------------------------------------------------------------------------\n     Versions:\n        picard/1.92\n        picard/1.118\n        picard/1.141\n        picard/2.0.1\n        picard/2.10.3\n        picard/2.19.2\n        picard/2.20.4\n        picard/2.23.4\n        picard/2.27.5\n        picard/3.1.1\n\n----------------------------------------------------------------------------\n  For detailed information about a specific \"picard\" package (including how to l\noad the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modu\nles.\n  For example:\n\n     $ module spider picard/3.1.1\n----------------------------------------------------------------------------\n

Then load your favorite version:

module load picard/3.1.1\n
How does this look like?

Your output will be similar to this:

[sven@rackham2 ~]$ module load picard/3.1.1\npicard/3.1.1: java -jar $PICARD command ...\n

Read up on how to use Picard:

module help picard/3.1.1\n
How does this look like?

Your output will be similar to this:

[sven@rackham2 ~]$ module help picard/3.1.1\n\n----------------------------------------------------------------------- Module Specific Help for \"picard/3.1.1\" -----------------------------------------------------------------------\n picard - use picard/3.1.1\n\n    Version 3.1.1\n\nUsage:\n\n    java -jar $PICARD command ...\n\nor\n\n    java -jar $PICARD_ROOT/picard.jar command ...\n\nwhere 'command' is the desired Picard command, and ... are the desired further arguments.\n

Here is an example of using Picard to test if a file is a valid BAM/CRAM/SAM file:

java -jar $PICARD ValidateSamFile --INPUT my_file.bam\n
How does this look like?

First, download an example BAM file from the Picard GitHub repository:

[sven@rackham2 ~]$ wget https://github.com/broadinstitute/picard/raw/master/testdata/picard/flow/reads/input/sample_mc.bam\n\n--2024-08-05 09:16:40--  https://github.com/broadinstitute/picard/raw/master/testdata/picard/flow/reads/input/sample_mc.bam\nResolving github.com (github.com)... 140.82.121.3\nConnecting to github.com (github.com)|140.82.121.3|:443... connected.\nHTTP request sent, awaiting response... 302 Found\nLocation: https://raw.githubusercontent.com/broadinstitute/picard/master/testdata/picard/flow/reads/input/sample_mc.bam [following]\n--2024-08-05 09:16:40--  https://raw.githubusercontent.com/broadinstitute/picard/master/testdata/picard/flow/reads/input/sample_mc.bam\nResolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.108.133, 185.199.110.133, ...\nConnecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 117715 (115K) [application/octet-stream]\nSaving to: \u2018sample_mc.bam\u2019\n\n100%[=============================================================================================================================================>] 117,715     --.-K/s   in 0.001s  \n\n2024-08-05 09:16:41 (171 MB/s) - \u2018sample_mc.bam\u2019 saved [117715/117715]\n

Your output will be similar to this, when using that valid BAM file:

[sven@rackham2 ~]$ java -jar $PICARD ValidateSamFile --INPUT sample_mc.bam \nAug 05, 2024 9:16:47 AM com.intel.gkl.NativeLibraryLoader load\nINFO: Loading libgkl_compression.so from jar:file:/sw/bioinfo/picard/3.1.1/rackham/picard.jar!/com/intel/gkl/native/libgkl_compression.so\n[Mon Aug 05 09:16:47 CEST 2024] ValidateSamFile --INPUT sample_mc.bam --MODE VERBOSE --MAX_OUTPUT 100 --IGNORE_WARNINGS false --VALIDATE_INDEX true --INDEX_VALIDATION_STRINGENCY EXHAUSTIVE --IS_BISULFITE_SEQUENCED false --MAX_OPEN_TEMP_FILES 8000 --SKIP_MATE_VALIDATION false --VERBOSITY INFO --QUIET false --VALIDATION_STRINGENCY STRICT --COMPRESSION_LEVEL 5 --MAX_RECORDS_IN_RAM 500000 --CREATE_INDEX false --CREATE_MD5_FILE false --help false --version false --showHidden false --USE_JDK_DEFLATER false --USE_JDK_INFLATER false\n[Mon Aug 05 09:16:47 CEST 2024] Executing as sven@rackham2.uppmax.uu.se on Linux 3.10.0-1160.119.1.el7.x86_64 amd64; OpenJDK 64-Bit Server VM 17+35-2724; Deflater: Intel; Inflater: Intel; Provider GCS is available; Picard version: Version:3.1.1\nWARNING 2024-08-05 09:16:47 ValidateSamFile NM validation cannot be performed without the reference. All other validations will still occur.\nNo errors found\n[Mon Aug 05 09:16:48 CEST 2024] picard.sam.ValidateSamFile done. Elapsed time: 0.01 minutes.\nRuntime.totalMemory()=2181038080\n[sven@rackham2 ~]$ \n

Your output will be similar to this, when using an invalid file, such as an R script file:

[sven@rackham2 ~]$ java -jar $PICARD ValidateSamFile --INPUT app.R \nAug 05, 2024 9:13:20 AM com.intel.gkl.NativeLibraryLoader load\nINFO: Loading libgkl_compression.so from jar:file:/sw/bioinfo/picard/3.1.1/rackham/picard.jar!/com/intel/gkl/native/libgkl_compression.so\n[Mon Aug 05 09:13:20 CEST 2024] ValidateSamFile --INPUT app.R --MODE VERBOSE --MAX_OUTPUT 100 --IGNORE_WARNINGS false --VALIDATE_INDEX true --INDEX_VALIDATION_STRINGENCY EXHAUSTIVE --IS_BISULFITE_SEQUENCED false --MAX_OPEN_TEMP_FILES 8000 --SKIP_MATE_VALIDATION false --VERBOSITY INFO --QUIET false --VALIDATION_STRINGENCY STRICT --COMPRESSION_LEVEL 5 --MAX_RECORDS_IN_RAM 500000 --CREATE_INDEX false --CREATE_MD5_FILE false --help false --version false --showHidden false --USE_JDK_DEFLATER false --USE_JDK_INFLATER false\n[Mon Aug 05 09:13:21 CEST 2024] Executing as sven@rackham2.uppmax.uu.se on Linux 3.10.0-1160.119.1.el7.x86_64 amd64; OpenJDK 64-Bit Server VM 17+35-2724; Deflater: Intel; Inflater: Intel; Provider GCS is available; Picard version: Version:3.1.1\nWARNING 2024-08-05 09:13:21 ValidateSamFile NM validation cannot be performed without the reference. All other validations will still occur.\nERROR::MISSING_READ_GROUP:Read groups is empty\nSAMFormatException on record 01\nERROR 2024-08-05 09:13:21 ValidateSamFile SAMFormatException on record 01\n[Mon Aug 05 09:13:21 CEST 2024] picard.sam.ValidateSamFile done. Elapsed time: 0.01 minutes.\nRuntime.totalMemory()=2181038080\nTo get help, see http://broadinstitute.github.io/picard/index.html#GettingHelp\n
"},{"location":"software/profilers/","title":"Profilers","text":"

There are some profiling tools that are available at UPPMAX.

Software Compiler(s) Description Intel VTune Intel Broad set of tools with a focus on performance improvement Intel Advisor Intel Broad set of tools with a focus on performance analysis gprof GCC run-time profiler valgrind GCC and Intel Broad set of tools"},{"location":"software/projplot/","title":"projplot","text":"

projplot is an UPPMAX tool to plot your core hour usage

"},{"location":"software/projplot/#minimal-use","title":"Minimal use","text":"

projplot needs only the project code:

projplot -A [project_code]\n

For example:

projplot -A uppmax2020-2-2\n

Output will look similar to this:

Example projplot output. The horizontal axis shows the days before today, the vertical axis shows the cores used on that day (hence, the amount of core hours is the area under the curve). For this example project, apparently, the maximum number of cores per day is 800.

This graph shows you the projects core usage during the last 30 days. The heights of the peaks in the plot shows you how many cores that were used simultaneously, and the width show you for how long they were used.

If we look at the big peak to the left in the diagram, we can see that 15 cores were used for around 24 hours, and somewhere in the middle of that period, another 8 cores was used for a shorter period of time.

Since the plots are made using ordinary text, there will sometimes be rounding errors because of the low resolution of the terminal window, which is usually around 80x30 characters. The plot will adapt to your terminal window, so increase the size of your window to increase the resolution of the plot (the data being plotted has a resolution down to single seconds).

As time progresses the peaks in the graph will move to the left in the diagram. In the standard plot of the last 30 days, that means that when a peak exits the plot to the left, your get those core hours back to the project.

"},{"location":"software/projplot/#if-you-are-over-quota","title":"If you are over quota","text":"

If we look at a project that has used more core hours than their projects allocation, the image will look like this:

There is a message about the core hour limit being reached at the top of the plot. If you look in the diagram at around 10 days ago, you will see the point where the core hour limit is reached (the bar of >s). This point is calculated by summing up all core hour usage to the right of the bar. What this means in reality is that if this project was to stop analyzing right now, they would have to wait until the bar of >s has exited the graph to the left (i.e. ~20 days) before they are below their core hour limit again. Most of the time, projects do not completely stop analyzing, so for each core hour they use the more to the right the > bar will move.

"},{"location":"software/projplot/#other-options","title":"Other options","text":"

projplot has more options, that are shown by using --help:

projplot --help\n

Below, these options are discussed in detail.

"},{"location":"software/projplot/#help","title":"Help","text":"

Use --help (or -h) to get a short description of the options and some examples:

projplot --help\n
How does that look like?
Usage: projplot -A <proj-id> [options]\n\nMore details: https://uppmax.uu.se/support/user-guides/plotting-your-core-hour-usage\n\nExample runs:\n\n# Plot the last 30 days of project <proj>\nprojplot -A <proj>\n\n# Plot the last 30 days of project <proj> on cluster <cluster>\nprojplot -A <proj> -c <cluster>\n\n# Plot the last <n> days of project <proj>\nprojplot -A <proj> -d <n>\n\n# Plot the usage for project <proj> since <date>\nprojplot -A <proj> -s <date>\n\n# Plot the usage for project <proj> between <date_1> and <date_2>\nprojplot -A <proj> -s <date_1> -e <date_2>\n\n# Plot the usage for project <proj> between <date_1> and <date_2>, on cluster <cluster>\nprojplot -A <proj> -s <date_1> -e <date_2> -c <cluster>\n\n# Plot the usage for project <proj> between date <date_1> and <days> days later\nprojplot -A <proj> -s <date_1> -d <days>\n\n# Plot the usage for project <proj> between date <date_1> and <days> days earlier\nprojplot -A <proj> -e <date_1> -d <days>\n\n# Plot the last 30 days of project <proj>, but don't check the queue for running jobs\nprojplot -A <proj> -R\n\n\nOptions:\n  -h, --help            show this help message and exit\n  -A ACCOUNT, --account=ACCOUNT\n                        Your UPPMAX project ID\n  -c CLUSTER, --cluster=CLUSTER\n                        The cluster you want to plot (default: current\n                        cluster)\n  -d DAYS, --days=DAYS  The number of days you want to plot (default: none)\n  -s START, --start=START\n                        The starting date you want to plot (format: YYYY-MM-\n                        DD)\n  -e END, --end=END     The ending date you want to plot (format: YYYY-MM-DD)\n  -R, --no-running-jobs\n                        Use to skip including running jobs in the plot\n                        (faster). Useful if you are not running any jobs and\n                        want to save time.\n
"},{"location":"software/projplot/#number-of-days","title":"Number of days","text":"

Use --days (or -d) the plot a custom number of days, instead of the default of 30 days:

projplot -A [project_code] --days [number_of_days]\n

For example, this will plot the last 45 days: :

projplot -A uppmax2020-2-2 --days 45\n
"},{"location":"software/projplot/#starting-date","title":"Starting date","text":"

Use --start (or -s) to specify a custom starting date, from when the time in your plot will start:

projplot -A [project_code] --start [starting_date_in_yyyy-mm-dd_format]\n

For example:

projplot -A uppmax2020-2-2 --start 2023-05-03\n

will give you a plot starting on the date 2023-05-03 and the default number of days after that date. The command below does exactly the same, yet makes the default number of days explicit:

projplot -A uppmax2020-2-2 --start 2023-05-03 --days 30\n
"},{"location":"software/projplot/#ending-data","title":"Ending data","text":"

Use --end (or -e) to specify a custom ending date, from when the time in your plot will end:

projplot -A [project_code] --end [ending_date_in_yyyy-mm-dd_format]\n

For example:

projplot -A uppmax2020-2-2 --end 2023-05-03\n

will give you a plot ending on the date 2023-05-03 and the default number of days before that date. The command below does exactly the same, yet makes the default number of days explicit:

projplot -A uppmax2020-2-2 --end 2023-05-03 --days 30\n
"},{"location":"software/projplot/#start-and-end-date-combined","title":"Start and end date combined","text":"

Use --start and --end combined to specify a custom range of dates for your plot:

projplot -A [project_code] --start [starting_date_in_yyyy-mm-dd_format] --end [ending_date_in_yyyy-mm-dd_format]\n

For example:

projplot -A uppmax2020-2-2 --start 2022-05-03 --end 2023-05-03\n
"},{"location":"software/projplot/#cluster","title":"Cluster","text":"

Use --cluster (or -c) to determine which UPPMAX cluster to plot. By default, the current cluster is used.

Since the different clusters at UPPMAX have separate core hour quotas, it makes sense to being able to plot them separately.

projplot -A [project_code] -c [cluster_name]\n

For example:

projplot -A uppmax2020-2-2 -c snowy\n

Valid cluster names are bianca, rackham and snowy.

How to get valid cluster names?

Use projplot with a nonsense clustername:

projplot -A uppmax2020-2-2 --cluster nonsensename\n

The error message will display valid cluster names.

This option can be combined with all the other options.

"},{"location":"software/projplot/#exclude-running-jobs","title":"Exclude running jobs","text":"

Use --no-running-jobs (or -R) to skip checking the queue for running jobs.

If you don't have any running jobs, asking the queue system to list jobs is just a waste of time (anywhere 1-15 seconds). By giving --no-running-jobs when running projplot, it skips checking the queue and if you do have jobs running, they will not be visible in the plot or in the sum of core hours used.

projplot -A [project_code] --no-running-jobs\n

For example:

projplot -A uppmax2020-2-2 --no-running-jobs\n
"},{"location":"software/python/","title":"Python user guide","text":"

Welcome to the UPPMAX Python user guide.

We describe what Python is and that there are multiple Python versions.

Then, we show how to load Python and to load Python packages after which you can run Python.

Finally, you can find UPPMAX Python-related courses and these more advanced topics:

  • Programming in Python
  • Installing Python packages
  • Virtual environments in Python
  • How to run parallel jobs in Python
"},{"location":"software/python/#what-is-python","title":"What is Python?","text":"

Python is a high-level, general-purpose programming language. Its design philosophy emphasizes code readability with the use of significant indentation [Kuhlman, 2009].

"},{"location":"software/python/#python-versions","title":"Python versions","text":"

Python (or to be precise: the Python interpreter) has different versions. The current major version of Python is Python 3. Python 3 is not backwards compatible with Python 2. This means that you need to use the correct Python version to run a Python script.

Could you give me an example of a difference between Python 2 and 3?

One example is how Python 2 and Python 3 dividetwo integers. Here is an example that will work on all UPMMAX clusters.

Load Python 2.7.15:

module load python/2.7.15\n

Then

python -c \"print(1/2)\"\n

will print 0, as this is an integer division: two fits zero times in one.

Load Python 3.11.4:

module load python/3.11.4\n

Then

python -c \"print(1/2)\"\n

will print 0.5, as this is turned into a floating point division, equivalent to 1.0 / 2.0.

Which version of Python is python?

To determine which version python is, in a terminal, type:

python --version\n

to see which Python version you are using now.

Which version of Python is python3?

To determine which version python3 is, in a terminal, type:

python3 --version\n

to see which Python version you are using now.

"},{"location":"software/python/#loading-python","title":"Loading Python","text":"Prefer seeing a video?

A video that shows how to load the Python module can be found here.

The different versions of Python are available via the module system on all UPPMAX clusters. Loading a Python module also makes some Python packages available.

Forgot what the module system is?

See the UPPMAX pages on the module system here.

UPPMAX modules or Python modules?

At this page, we will use the word 'modules' for UPPMAX modules and 'packages' for Python modules, to be clear in what is meant. The word 'package' is used in multiple other languages, such as R, with a similar definition as a Python module.

To find out which Python modules there are, use module spider python.

What is the output of that command?

The output of module spider python on the day of writing, is:

[user@rackham1 ~]$ module spider python\n\n---------------------------------------------------------------------------------------\n  python:\n---------------------------------------------------------------------------------------\n     Versions:\n        python/2.7.6\n        python/2.7.9\n        python/2.7.11\n        python/2.7.15\n        python/3.3\n        python/3.3.1\n        python/3.4.3\n        python/3.5.0\n        python/3.6.0\n        python/3.6.8\n        python/3.7.2\n        python/3.8.7\n        python/3.9.5\n        python/3.10.8\n        python/3.11.4\n     Other possible modules matches:\n        Biopython  Boost.Python  GitPython  IPython  Python  biopython  flatbuffers-python\n ...\n\n---------------------------------------------------------------------------------------\n  To find other possible module matches execute:\n\n      $ module -r spider '.*python.*'\n\n---------------------------------------------------------------------------------------\n  For detailed information about a specific \"python\" package (including how to load the mod\nules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider python/3.11.4\n---------------------------------------------------------------------------------------\n

To load a specific version of Python into your environment, type module load python/[version], where [version] is a Python version, for example, module load python/3.11.4

Do I really need to load a Python module?

It is recommended to load a Python module, but in some case you will not get into trouble.

When you do not load a module, the system-installed Python version are used. These are python version 2.7.5, and python3 version 3.6.8.

If using those older versions give you no trouble, all is well, for example, when running basic Python scripts that have no package imports.

However, when any problem occurs, load those newer modules.

Why are there both python/3.X.Y and python3/3.X.Y modules?

Sometimes existing software might use python2 and there\u2019s nothing you can do about that.

In pipelines and other toolchains the different tools may together require both python2 and python3.

How to deal with tools that require both python2 and python3?

You can run two python modules at the same time if one of the modules is python/2.X.Y and the other module is python3/3.X.Y (i.e. not python/3.X.Y).

"},{"location":"software/python/#loading-python-package-modules","title":"Loading Python package modules","text":"

Terminology

A Python package consists out of one or more Python modules. in this document, we avoid using this term, to avoid confusion with the UPPMAX modules.

For more complex complex Python packages, there exist UPPMAX modules to load these:

  • python_GIS_packages: for geographic information system packages
  • python_ML_packages: for machine learning Python packages
How could I find these modules myself?

Use:

module spider packages\n
"},{"location":"software/python/#loading-python-packages","title":"Loading Python packages","text":"

Terminology

A Python package consists out of one or more Python modules. in this document, we avoid using this term, to avoid confusion with the UPPMAX modules.

Many scientific tools are distributed as Python packages, which allows any user to run complex tools from a terminal or script. For example, the following Python code imports the functionality of the pandas library:

import pandas\n

Some packages/tools are preinstalled on all UPPMAX clusters. To load such a package:

  • determine if it comes with your Python version
  • determine if it comes as a module
"},{"location":"software/python/#determine-if-a-python-package-comes-with-your-python-module","title":"Determine if a Python package comes with your Python module","text":"

To determine if a Python package comes with your Python module, there are multiple ways:

  • Using pip list
  • Using the module help
  • Importing the package
"},{"location":"software/python/#using-pip-list","title":"Using pip list","text":"

To determine if a Python package comes with your Python module, pip list is one of the ways to do so.

On a terminal, type:

pip list\n

This shows a list of Python packages that are installed.

How does the output of pip list look like?

Here is an example:

Package                   Version\n------------------------- ---------------\nanndata                   0.10.5.post1\nanyio                     4.2.0\nappdirs                   1.4.4\nargon2-cffi               23.1.0\nargon2-cffi-bindings      21.2.0\n[more Python packages]\nWerkzeug                  3.0.1\nwheel                     0.42.0\nwidgetsnbextension        4.0.9\nzipp                      3.17.0\nzope.interface            6.1\n
"},{"location":"software/python/#using-the-module-help","title":"Using the module help","text":"

Determine if a Python package comes with your Python module using the module help, in a terminal, type:

module help python/[module_version]\n

where [module_version] is a version of a Python module, for example:

module help python/3.11.4\n
What is the output of module help python/3.11.4?

Here is part of the output of module help python/3.11.4:

------------------------ Module Specific Help for \"python/3.11.4\" -------------------------\n    Python - use Python\n\n    Version 3.11.4\n\n\nThis module provides the executable names 'python' and 'python3'.\n\nSeveral additional python packages are also installed in this module. The complete list of\npackages in this module, produced using 'pip list', is:\n\nPackage                   Version\n------------------------- -----------\nanndata                   0.9.2\nanyio                     3.7.1\nargon2-cffi               21.3.0\n...\nwidgetsnbextension        4.0.8\nzipp                      3.16.2\nzope.interface            6.0\n
"},{"location":"software/python/#importing-the-package","title":"Importing the package","text":"

Importing a Python package is a way to determine if a Python package comes with your Python module installed. From the terminal do:

python -c \"import [your_package]\"\n
What does that -c do?

python -c will run the text after it as Python code. In this way, you can directly run code, i.e. you do not need to create a file to run.

where [your_package] is the name of a Python package, for example:

python -c \"import pandas\"\n
What is the output if the Python package is found?

The output if the Python package is found is nothing.

What is the output if the Python package is not found?

Here an absent package is loaded, with the nonsense name absentpackage:

python -c \"import absentpackage\"\n

This results in the following error:

Traceback (most recent call last):\n  File \"<string>\", line 1, in <module>\nModuleNotFoundError: No module named 'absentpackage'\n
"},{"location":"software/python/#determine-if-a-python-package-comes-with-a-module","title":"Determine if a Python package comes with a module","text":"

If the Python package is not pre-installed with your version of Python, use the UPPMAX module system to search for it.

Not all packages are easy to find, as some are part of super-packages, for example the TensorFlow Python libraries, which are part of the python_ML_packages/[version]-{cpu,gpu}, for example python_ML_packages/3.11.8-cpu.

Want to see a list of Python packages in python_ML_packages/3.11.8-cpu that are not in python/3.11.8?

Here you go:

  • absl-py
  • array-record
  • astunparse
  • cachetools
  • cons
  • dill
  • dm-tree
  • ducc0
  • etils
  • etuples
  • flatbuffers
  • gast
  • google-auth
  • google-auth-oauthlib
  • google-pasta
  • googleapis-common-protos
  • grpcio
  • imbalanced-learn
  • importlib_resources
  • keras
  • libclang
  • llvmlite
  • logical-unification
  • miniKanren
  • ml-dtypes
  • multipledispatch
  • nlp
  • numba
  • oauthlib
  • opt-einsum
  • patsy
  • promise
  • protobuf
  • pyasn1
  • pyasn1-modules
  • pytensor
  • requests-oauthlib
  • rsa
  • scikit-learn
  • seaborn
  • statsmodels
  • tensorboard
  • tensorboard-data-server
  • tensorflow-cpu
  • tensorflow-datasets
  • tensorflow-estimator
  • tensorflow-io-gcs-filesyst
  • tensorflow-metadata
  • tensorflow-probability
  • termcolor
  • threadpoolctl
  • toml
  • torch
  • torchaudio
  • torchvision
  • wrapt
  • xxhash

It may not always be easy to find your Python package within the many modules. Do not hesitate to contact support so that you can spend time on your research and we figure this out :-)

"},{"location":"software/python/#stand-alone-tools","title":"Stand-alone tools","text":"

Some Python packages are working as stand-alone tools, for instance in bioinformatics. The tool may be already installed as a module. Check if it is there by using the module system spider function:

module spider [tool_name]\n

where [tool_name] is (part of) the name of the tool. module spider is case-insensitive, hence YourTool and yourtool give similar results.

What are UPPMAX modules?

See the page about the UPPMAX module system here

"},{"location":"software/python/#running-python","title":"Running Python","text":"

You can run Python in multiple ways:

  • use Python to run a Python script
  • use Python in an interactive session

To program in Python, there are more ways, which are discussed at the UPPMAX page on Python programming here

"},{"location":"software/python/#use-python-to-run-a-python-script","title":"Use Python to run a Python script","text":"

You can run a Python script in the shell by:

python example_script.py\n

or, if you loaded a python3 module:

python3 example_script.py\n
"},{"location":"software/python/#use-python-in-an-interactive-session","title":"Use Python in an interactive session","text":"

You start a python session by typing:

python\n

or

python3\n

The python prompt looks like this:

>>>\n

Exit with <Ctrl-D>, quit() or exit().

"},{"location":"software/python/#programming-in-python","title":"Programming in Python","text":"

To program in Python, there are more ways, which are discussed at the UPPMAX page on Python programming here

"},{"location":"software/python/#uppmax-python-related-courses","title":"UPPMAX Python-related courses","text":"

See the UPPMAX courses and workshops to find UPPMAX courses related to Python.

"},{"location":"software/python/#installing-python-packages","title":"Installing Python packages","text":"

How to install Python packages is described here.

"},{"location":"software/python/#virtual-environments-in-python","title":"Virtual environments in Python","text":"

How to use virtual environments in Python is described here.

"},{"location":"software/python/#how-to-run-parallel-jobs-in-python","title":"How to run parallel jobs in Python","text":"

How to run parallel jobs in Python is described here.

"},{"location":"software/python/#references","title":"References","text":"
  • [Kuhlman, 2009] Kuhlman, Dave. A python book: Beginning python, advanced python, and python exercises. Lutz: Dave Kuhlman, 2009.
"},{"location":"software/python/#links","title":"Links","text":"
  • Official Python documentation
  • Python forum
  • Free online book: 'How to Think Like a Computer Scientist'
  • UPPMAX TensorFlow guide
  • UPPMAX PyTorch guide
"},{"location":"software/python_install_packages/","title":"Installing Python packages","text":"

This page described how to install Python packages.

There are many ways to install a Python package:

  • Using setup.py
  • using a Python package installer
    • PyPI using pip
    • Conda using conda

You may want to check if a package is already installed first :-).

The Python package installers are compared after which each is discussed:

  • PyPI using pip
  • Conda using conda
","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#check-if-a-package-is-already-installed","title":"Check if a package is already installed","text":"

There are multiple ways to check if a Python package is installed:

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#1-pip-list","title":"1. pip list","text":"

In the terminal, type:

pip list\n

You'll see a list of all installed packages.

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#2-import","title":"2. import","text":"

Start Python. Then, within the Python interpreter, type:

import [package]\n

where [package] is the name of the Python package, for example import mkhcnuggets.

Does it work? Then it is there!

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#comparison-between-conda-and-pypi","title":"Comparison between Conda and PyPI","text":"
  • PyPI (pip) is traditionally for Python-only packages but it is no problem to also distribute packages written in other languages as long as they provide a Python interface.

  • Conda (conda) is more general and while it contains many Python packages and packages with a Python interface, it is often used to also distribute packages which do not contain any Python (e.g. C or C++ packages).

Parameter conda pip Installs Python packages Yes Yes Installs non-Python software Yes No

Many libraries and tools are distributed in both ecosystems.

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#pip","title":"pip","text":"

pip is a popular Python package installer.

To install a Python package using pip, in a terminal or Python shell, do:

pip install --user [package name]\n

where [package name] is the name of a Python package, for example pip install --user mhcnuggets.

Can I also use pip3?

Yes, you can. The command then becomes:

pip3 install --user [package name]\n

For example pip3 install --user mhcnuggets.

Most that applies to pip applies to pip3.

Due to using --user, the package ends up in a subfolder of the user's home folder, which is ~/.local/lib/python[version]/site-packages/, where version is the Python version with only the major and minor version, so for Python version 3.11.8, the folder will be python3.11 (i.e. the patch number, 8 is not included).

If you would like to have your packages installed in another folder, do:

pip install --prefix=[root_folder] [package name]\n

where [root_folder] is the root folder of the package installation, for example --prefix=~/.local. Using this root folder, this option is the same to using --user, as described above.

When using a custom root folder, Python cannot find it without help. Setting the environment variable PYTHONPATH to the correct folder allows Python to find packages in a custom folder.

export PYTHONPATH=[root_folder]/lib/python[version]/site-packages/:$PYTHONPATH.\n

for example, when [root_folder] is ~/my_python_packages and for using Python 3.11.8, this will be:

export PYTHONPATH=~/my_python_packages/lib/python3.11/site-packages/:$PYTHONPATH.\n

Consider adding this line to your .bashrc file, so it is loaded every time you login.

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#conda","title":"conda","text":"

See our Conda user Guide

","tags":["pip","pip install","Python","package"]},{"location":"software/python_install_packages/#using-setuppy","title":"Using setup.py","text":"

Some Python packages are only available as downloads and need to be installed using a Python script, commonly called setup.py.

If that is the case for the package you need, this is how you do it:

  • Pick a location for your installation (change below to fit - I am installing under a project storage)

    • mkdir /proj/<project>/<mystorage>/mypythonpackages
    • cd /proj/<project>/<mystorage>/mypythonpackages
  • Load Python + (on Kebnekaise) site-installed prerequisites (SciPy-bundle, matplotlib, etc.)

  • Install any remaining prerequisites. Remember to activate your Virtualenv if installing with pip!
  • Download Python package, place it in your chosen installation dir, then untar/unzip it
  • cd into the source directory of the Python package

    • Run python setup.py build
    • Then install with: python setup.py install --prefix=<path to install dir>
  • Add the path to $HOME/.bash_profile (note that it will differ by Python version):

    • export PYTHONPATH=$PYTHONPATH:<path to your install directory>/lib/python3.11/site-packages

You can use it as normal inside Python (remember to load dependent modules as well as activate virtual environment if it depends on some packages you installed with pip): import <python-module>

","tags":["pip","pip install","Python","package"]},{"location":"software/python_parallel_jobs/","title":"How to run parallel jobs in Python","text":"

This page describes how to run parallel jobs in Python. For the general pages on Python, go here.

Material here is taken partly from the parallel part of the online course Python for Scientific Computing

Parallel computing is when many different tasks are carried out simultaneously. There are three main models:

  • Embarrassingly parallel: the code does not need to synchronize/communicate with other instances, and you can run multiple instances of the code separately, and combine the results later. If you can do this, great! (array jobs, task queues)

  • Shared memory parallelism: Parallel threads need to communicate and do so via the same memory (variables, state, etc). (OpenMP)

  • Message passing: Different processes manage their own memory segments. They share data by communicating (passing messages) as needed. (Message Passing Interface (MPI)).

There are several packages available for Python that let you run parallel jobs. Some of them are only able to run on one node, while others try to leverage several machines.

","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#threading","title":"Threading","text":"

Threading divides up your work among a number of cores within a node. The threads shares its memory.

  • Multi-threading documentation
  • Examples

The designers of the Python language made the choice that only one thread in a process can run actual Python code by using the so-called global interpreter lock (GIL). This means that approaches that may work in other languages (C, C++, Fortran), may not work in Python without being a bit careful. At first glance, this is bad for parallelism. But it\u2019s not all bad!:

External libraries (NumPy, SciPy, Pandas, etc), written in C or other languages, can release the lock and run multi-threaded. Also, most input/output releases the GIL, and input/output is slow.

If speed is important enough you need things parallel, you usually wouldn\u2019t use pure Python.

More on the global interpreter lock

Threading python module. This is very low level and you shouldn\u2019t use it unless you really know what you are doing.

We recommend you find a UNIX threading tutorial first before embarking on using the threading module.

","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#distributed-computing","title":"Distributed computing","text":"

As opposed to threading, Python has a reasonable way of doing something similar that uses multiple processes.

Distributed processing uses individual processes with individual memory, that communicate with each other. In this case, data movement and communication is explicit. Python supports various forms of distributed computing.

  • A native master-worker system based on remote procedure calls: multiprocessing.py
  • MPI through mpi4py : a Python wrapper for the MPI protocol, see further down

If choosing between multiprocessing and MPI, distributed is easier to program, whereas MPI may be more suitable for multi-node applications.

","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#multiprocessingdistributed","title":"Multiprocessing/distributed","text":"

The interface is a lot like threading, but in the background creates new processes to get around the global interpreter lock.

There are low-level functions which have a lot of the same risks and difficulties as when using threading.

To show an example, the split-apply-combine or map-reduce paradigm is quite useful for many scientific workflows. Consider you have this:

def square(x):\n    return x*x\n

You can apply the function to every element in a list using the map() function:

>>>list(map(square, [1, 2, 3, 4, 5, 6]))\n[1, 4, 9, 16, 25, 36]\n

The multiprocessing.pool.Pool class provides an equivalent but parallelized (via multiprocessing) way of doing this. The pool class, by default, creates one new process per CPU and does parallel calculations on the list:

>>>from multiprocessing import Pool\n>>>with Pool() as pool:\n...    pool.map(square, [1, 2, 3, 4, 5, 6])\n[1, 4, 9, 16, 25, 36]\n

As you can see, you can run distributed computing directly from the python shell.

Another example, distributed.py:

import random\n\ndef sample(n):\n    \"\"\"Make n trials of points in the square.\n    Return (n, number_in_circle)\n    This is our basic function.\n    By design, it returns everything it needs to compute\n    the final answer: both n (even though it is an input\n    argument) and n_inside_circle.\n    To compute our final answer, all we have to do is\n    sum up the n:s and the n_inside_circle:s and do our\n    computation\"\"\"\n    n_inside_circle = 0\n    for i in range(n):\n        x = random.random()\n        y = random.random()\n        if x**2 + y**2 < 1.0:\n            n_inside_circle += 1\n    return n, n_inside_circle\n\nimport multiprocessing.pool\npool = multiprocessing.pool.Pool()\n# The default pool makes one process per CPU\n#%%timeit\n# Do it once to time it\n#results = pool.map(sample, [10**5] * 10)     # \"* 10\" would mean 10 processes\n# Do it again to get the results, since the results of the above\n# cell aren't accessible because of the %%timeit magic.\nresults = pool.map(sample, [10**5] * 10)\npool.close()\nn_sum = sum(x[0] for x in results)\nn_inside_circle_sum = sum(x[1] for x in results)\npi = 4.0 * (n_inside_circle_sum / n_sum)\nprint(pi)\n
","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#batch-example","title":"Batch example","text":"

If you need to revive your knowledge about the scheduling system, please check Slurm user guide.

Batch script job_distributed.slurm:

#!/bin/bash\n#SBATCH -A j<proj>\n#SBATCH -p devel\n#SBATCH --job-name=distr_py      # create a short name for your job\n#SBATCH --nodes=1                # node count\n#SBATCH --ntasks=20              # total number of tasks across all nodes\n#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)\n#SBATCH --time=00:01:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n#SBATCH --mail-user=<email>\nmodule load python/3.9.5\npython distributed.py\n

\u200bPut job in queue:

sbatch job_distributed.slurm\n
","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#interactive-example","title":"Interactive example","text":"
salloc -A <proj> -p node -N 1 -n 10 -t 1:0:0\npython distributed.py\n
","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#mpi","title":"MPI","text":"

Presently you have to install your own mpi4py. You will need to activate paths to the MPI libraries. Therefore follow these steps.

  1. If you use python 3.10.8:
module load gcc/12.2.0 openmpi/4.1.4\n
 Otherwise:\n
module load gcc/9.3.0 openmpi/3.1.5\n
  1. pip install locally or in an virtual environment
pip install --user mpi4py\n

Remember that you will also have to load the the openmpi module before running mpi4py code, so that the MPI header files can be found (e.g. with the command \"module load gcc/X.X.X openmpi/X.X.X\"). Because of how MPI works, we need to explicitly write our code into a file, pythonMPI.py:

import random\nimport time\nfrom mpi4py import MPI\ndef sample(n):\n    \"\"\"Make n trials of points in the square.\n    Return (n, number_in_circle)\n    This is our basic function.\n    By design, it returns everything it needs to compute\n    the final answer: both n (even though it is an input\n    argument) and n_inside_circle.\n    To compute our final answer, all we have to do is\n    sum up the n:s and the n_inside_circle:s and do our\n    computation\"\"\"\n    n_inside_circle = 0\n    for i in range(n):\n        x = random.random()\n        y = random.random()\n        if x ** 2 + y ** 2 < 1.0:\n            n_inside_circle += 1\n    return n, n_inside_circle\ncomm = MPI.COMM_WORLD\nsize = comm.Get_size()\nrank = comm.Get_rank()\nn = 10 ** 7\nif size > 1:\n    n_task = int(n / size)\nelse:\n    n_task = n\nt0 = time.perf_counter()\n_, n_inside_circle = sample(n_task)\nt = time.perf_counter() - t0\n\nprint(f\"before gather: rank {rank}, n_inside_circle: {n_inside_circle}\")\nn_inside_circle = comm.gather(n_inside_circle, root=0)\nprint(f\"after gather: rank {rank}, n_inside_circle: {n_inside_circle}\")\nif rank == 0:\n    pi_estimate = 4.0 * sum(n_inside_circle) / n\n    print(f\"\\nnumber of darts: {n}, estimate: {pi_estimate},\n        time spent: {t:.2} seconds\")\n

You can execute your code the normal way as

mpirun -n 3 python pythonMPI.py\n

A batch script, job_MPI.slurm, should include a \"module load gcc/9.3.0 openmpi/3.1.5\"

#!/bin/bash\n#SBATCH -A j<proj>\n#SBATCH -p devel\n#SBATCH --job-name=MPI_py        # create a short name for your job\n#SBATCH --nodes=1                # node count\n#SBATCH --ntasks=20              # total number of tasks across all nodes\n#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)\n#SBATCH --time=00:05:00          # total run time limit (HH:MM:SS)\n#SBATCH --mail-type=begin        # send email when job begins\n#SBATCH --mail-type=end          # send email when job ends\n#SBATCH --mail-user=<email>\nmodule load python/3.9.5\nmodule load gcc/9.3.0 openmpi/3.1.5\nmpirun -n 20 python pythonMPI.py\n
","tags":["Python","parallel"]},{"location":"software/python_parallel_jobs/#using-the-gpu-nodes","title":"Using the GPU nodes","text":"

Example with numba. First install numba locally:

pip install --user numba\n

Test script: add-list.py

import numpy as np\nfrom timeit import default_timer as timer\nfrom numba import vectorize\n# This should be a substantially high value.\nNUM_ELEMENTS = 100000000\n# This is the CPU version.\ndef vector_add_cpu(a, b):\n  c = np.zeros(NUM_ELEMENTS, dtype=np.float32)\n  for i in range(NUM_ELEMENTS):\n      c[i] = a[i] + b[i]\n  return c\n# This is the GPU version. Note the @vectorize decorator. This tells\n# numba to turn this into a GPU vectorized function.\n@vectorize([\"float32(float32, float32)\"], target='cuda')\ndef vector_add_gpu(a, b):\n  return a + b;\ndef main():\n  a_source = np.ones(NUM_ELEMENTS, dtype=np.float32)\n  b_source = np.ones(NUM_ELEMENTS, dtype=np.float32)\n  # Time the CPU function\n  start = timer()\n  vector_add_cpu(a_source, b_source)<\n  vector_add_cpu_time = timer() - start\n  # Time the GPU function\n  start = timer()\n  vector_add_gpu(a_source, b_source)\n  vector_add_gpu_time = timer() - start\n  # Report times\n  print(\"CPU function took %f seconds.\" % vector_add_cpu_time)\n  print(\"GPU function took %f seconds.\" % vector_add_gpu_time)\n  return 0\nif __name__ == \"__main__\":\n  main()\n

Run in an interactive session with GPU:s on Snowy

[bjornc@rackham3 ~]$ interactive -A staff -n 1 -M snowy --gres=gpu:1  -t 1:00:01 --mail-type=BEGIN --mail-user=bjorn.claremar@uppmax.uu.se\nYou receive the high interactive priority.\nPlease, use no more than 8 GB of RAM.\nWaiting for job 6907137 to start...\nStarting job now -- you waited for 90 seconds.\n[bjornc@s160 ~]$ ml python/3.9.5\n[bjornc@s160 ~]$ python add-list.py  #run the script\nCPU function took 36.849201 seconds.\nGPU function took 1.574953 seconds.\n
","tags":["Python","parallel"]},{"location":"software/python_programming/","title":"Python programming","text":"

This page describes how to program in Python on the UPPMAX clusters.

There are multiple ways to program in Python:

Description Features Screenshot Use a text editor (see below) Non-interactive, no help Use the Python interpreter (see below) Interactive, terminal-based, some help Use IPython Interactive, terminal-based, more help and features Use Jupyter Interactive, web-based Use Visual Studio Code Interactive, install on local computer, use locally installed Python and Python packages"},{"location":"software/python_programming/#use-a-text-editor","title":"Use a text editor","text":"

Using a text editor to program in Python is a simple way to write code: it is the same as writing any text file.

Here we use the text editor GNU nano to write a Python script:

nano example_script.py\n

Within nano, write:

print('Hello, world!')\n
  • To save, press CTRL + O (i.e. the letter), then enter to keep the same filename
  • To quite, press CTRL + Q

You can run this Python script in the shell by:

python example_script.py\n

or, if you want to be explicitly use Python 3:

python3 example_script.py\n

Some features of this approach are:

  • this is a simple way to write code: it is the same as writing any text file.
  • you get no help while writing code
  • you can only run the script from start to finish, i.e. you cannot partially run the script
How to run a Python script line-by-line?

You can run a Python script line-by-line using a Python debugger, such as pdb.

On the terminal, for python, do:

pdb example_script.py\n

or for python3:

pdb3 example_script.py\n

See the official Python documentation of pdb here.

"},{"location":"software/python_programming/#use-the-python-interpreter","title":"Use the Python interpreter","text":"

After loading a Python module, you have the Python interpreter available.

Forgot how to load a Python module?

See the UPPMAX page about Python here.

What is a Python interpreter?

In computing, an interpreter is a program that reads text and runs it directly, without any additional steps.

The Python interpreter runs the Python commands you type directly, without any additional steps.

Start the Python interpreter by typing:

python\n

or (for explicit Python 3):

python3\n

The Python prompt looks like this:

>>>\n

Type, for example:

print('Hello, world!')\n

and the interpreter will run the statement.

Exit the Python interpreter with CTRL + D, quit() or exit().

The Python interpreter gives limited auto-complete while writing code

How do I get auto-complete?

As an example, writing this line of code in the Python interpreter ...

s = 'Hello, world!'\n

... and press enter. Now a variable called s will hold some text.

Now type ...

s.\n

and press Tab twice. You will see a list of things you can do with that string.

The Python interpreter can show graphics.

How do I get the Python interpreter to show graphics?

In the Python interpreter, run this code line-by-line:

import matplotlib.pyplot as plt\nplt.plot([1, 4, 9, 16])\nplt.show()\n

(or as a one-liner: import matplotlib.pyplot as plt; plt.plot([1, 4, 9, 16]); plt.show())

You will see a window appear:

You will only see a window appear, if you've logged in to Rackham with SSH with X forwarding enabled.

Spoiler: ssh -X sven@rackham.uppmax.uu.se.

The Python interpreter cannot directly run scripts.

"},{"location":"software/python_programming/#links","title":"Links","text":"
  • Official Python documentation
  • Python forum
  • Free online book: 'How to Think Like a Computer Scientist'
  • UPPMAX TensorFlow guide
  • UPPMAX PyTorch guide
"},{"location":"software/python_pyenv/","title":"Python pyenv","text":"

pyenv is one of multiple Python virtual environment managers.

This approach is more advanced and should be, in our opinion, used only if the above are not enough for the purpose. Probably Conda will work well for you. The approach below allows you to install your own python version and much more\u2026

Confer the official pyenv documentation.

","tags":["Python","pyenv"]},{"location":"software/python_pyenv/#first-time-at-uppmax","title":"First time at UPPMAX","text":"
  1. Download pyenv:

    git clone git://github.com/yyuu/pyenv.git ~/.pyenv\n
  2. Make pyenv start when you login each time

echo 'export PYENV_ROOT=\"$HOME/.pyenv\"' >> ~/.bash_profile\necho 'export PATH=\"$PYENV_ROOT/bin:$PATH\"' >> ~/.bash_profile\necho 'eval \"$(pyenv init -)\"' >> ~/.bash_profile\n

To make sure everything gets loaded correctly, log out and back in to uppmax.

","tags":["Python","pyenv"]},{"location":"software/python_pyenv/#installing-own-python-version-not-already-available-as-an-uppmax-module","title":"Installing own python version (not already available as an UPPMAX module)","text":"
  1. Get pyenv to install the python version of your liking.

    pyenv install 3.10.6\n
  2. Make the version you just installed to the standard version for every time you run python.

    pyenv global 3.10.6\n

Now you should be all set. If you change your mind about which version of Python to use, just redo this section and choose a different version. You can also have multiple versions installed at the same time and just switch between them usuing 'pyenv global' as shown above, if you have a script that requires Python 3.3 or any other version.

","tags":["Python","pyenv"]},{"location":"software/python_pyenv/#install-packages-in-your-selected-python-version","title":"Install packages in your selected python version","text":"
  1. Set python version with

    pyenv global <version>\n
  2. Install packages in your python, use pip

    pip install [package name]\n

Example:

pip install mechanize\n
","tags":["Python","pyenv"]},{"location":"software/python_pyenv/#links","title":"Links","text":"
  • CodeRefinery's course: Python for Scientific Computing.
","tags":["Python","pyenv"]},{"location":"software/python_venv/","title":"Python venv","text":"

venv is one of multiple Python virtual environment managers.

venv is a Python-only environment manager and is an official Python library, with its own official Python tutorial.

flowchart TD\n  create[Create]\n  activate[Activate]\n  use[Use]\n  deactivate[Deactivate]\n\n  create --> activate\n  activate --> use\n  use --> deactivate\n  deactivate --> activate

The venv workflow

First, the common workflow for using a venv is described:

  • how to create a virtual environment
  • how to activate a virtual environment
  • how to deactivate a virtual environment

Then:

  • how to export and import a virtual environment
"},{"location":"software/python_venv/#create-a-virtual-environment","title":"Create a virtual environment","text":"

A virtual environment can be created in multiple ways, for example, from scratch, which is not recommended.

Here we discuss the recommended way to create a virtual environment, which has these steps:

  1. Load a Python module or a modules with Python packages
  2. Create the virtual environment
"},{"location":"software/python_venv/#1-load-a-python-module-or-a-modules-with-python-packages","title":"1. Load a Python module or a modules with Python packages","text":"

The first step is described at 'Loading Python' and 'Loading Python package modules'.

Just show me how to do this

Sure, here is how to load a Python module:

module load python/3.11.8\n

Here is how to load a Python package module:

module load python_ML_packages/3.11.8-cpu\n

Because you can load Python modules of different Python versions, you can create venv virtual environments with different Python versions. Consider adding this in the venv name, e.g. my_python2_venv or my_python3_venv.

"},{"location":"software/python_venv/#2-create-the-virtual-environment","title":"2. Create the virtual environment","text":"

After loading the needed Python modules, one can create a virtual environment most efficiently using:

python -m venv --system-site-packages [path]/[venv_name]\n

where [path] is the path where you want to create your venv virtual environment and [venv_name] is the name of the venv virtual environment. For example python -m venv --system-site-packages ~/my_venvs/example_venv.

Create virtual environments in your project storage

Virtual environments can take up a lot of disc space.

If you use either (1) many venv virtual environments, or (2) install many Python packages to a venv virtual environment, we strongly recommend that you create the venv virtual environments in your project (/proj/[your_uppmax_project]) folder.

The -m flag makes sure that you use the libraries from the Python version you are using. The --system-site-packages flags ensure you use the packages already installed in the loaded Python module.

How long does this step take?

This depends.

This takes around 10 seconds:

module load python/3.11.8\npython -m venv --system-site-packages ~/my_venvs/example_venv\n

This takes around 10 seconds:

module load python_ML_packages/3.11.8-cpu\npython -m venv --system-site-packages ~/my_venvs/example_ml_venv\n
"},{"location":"software/python_venv/#activate-a-virtual-environment","title":"Activate a virtual environment","text":"

To activate your newly created virtual environment locate the script called activate and execute it:

source [path]/[venv_name]/bin/activate\n

where [path] is the path where you want to create your venv virtual environment and [venv_name] is the name of the venv virtual environment. For example source ~/my_venvs/example_venv/bin/activate.

When a venv virtual environment is active, the prompt is changed to start with the name of your venv.

How does that look like?

This is how your changed prompt looks like:

[sven@rackham1 ~]$ module load python_ML_packages/3.11.8-cpu\n[sven@rackham1 ~]$ python -m venv --system-site-packages ~/my_venvs/example_venv\n[sven@rackham1 ~]$ source ~/my_venvs/example_venv/bin/activate\n(example_venv) [sven@rackham1 ~]$\n

With the venv virtual environment active, you can now install and update Python packages in an isolated way.

"},{"location":"software/python_venv/#deactivate-a-virtual-environment","title":"Deactivate a virtual environment","text":"

To deactivate a venv virtual environment:

deactivate\n

As the venv virtual environment you just used is now inactive, the prompt will not show the name of your venv anymore.

You will need to activate a virtual environment to work with it again.

"},{"location":"software/python_venv/#export-and-import-a-virtual-environment","title":"Export and import a virtual environment","text":""},{"location":"software/python_venv/#export","title":"Export","text":"

To export the Python packages used in your virtual environment, do:

pip freeze > requirements.txt\n

This will create a file with all the Python packages and their versions, using the conventional name for such a file.

How does that file look like?

This is how a requirements.txt file may look like:

anndata==0.10.5.post1\nanyio==4.2.0\nappdirs==1.4.4\nargon2-cffi==23.1.0\nargon2-cffi-bindings==21.2.0\n[more Python packages]\nwebsocket-client==1.7.0\nWerkzeug==3.0.1\nwidgetsnbextension==4.0.9\nzipp==3.17.0\nzope.interface==6.1\n

Note that [more Python packages] is a placeholder for many more Python packages.

"},{"location":"software/python_venv/#import","title":"Import","text":"
pip install -r requirements.txt\n
"},{"location":"software/python_virtual_environments/","title":"Virtual environments in Python","text":"

This page described how to use virtual environments in Python.

"},{"location":"software/python_virtual_environments/#why-use-virtual-environments","title":"Why use virtual environments?","text":"

Virtual environments allows one to have independent Python environments.

This allows one to have multiple projects

  • You can install specific, also older, versions into them
  • You can create one for each project and no problem if the two projects require different versions
  • If you make some mistake and install something you did not want or need, you can remove the environment and create a new one
"},{"location":"software/python_virtual_environments/#environment-managers","title":"Environment managers","text":"

Here is an incomplete overview of virtual environment managers that work with Python:

Virtual environment manager Description venv Works on Rackham virtualenv venv for older Python versions conda Works on Rackham, recommended on Bianca pyenv More advanced than venv"},{"location":"software/python_virtual_environments/#general-virtual-environment-manager-workflow","title":"General virtual environment manager workflow","text":"
flowchart TD\n  create[Create]\n  activate[Activate]\n  use[Use]\n  deactivate[Deactivate]\n\n  create --> activate\n  activate --> use\n  use --> deactivate\n  deactivate --> activate

Whatever virtual environment manager you use, this is the workflow:

  • You create the isolated environment
  • You activate the environment
  • You work in the isolated environment. Here you install (or update) the environment with the packages you need
  • You deactivate the environment after use

A virtual environment can be created in multiple ways, for example, from scratch. However, there are more efficient ways, such as by re-using already installed Python packages. How to do so, can be found on the page about your specific virtual environment manager.

"},{"location":"software/python_virtualenv/","title":"Python virtualenv","text":"

virtualenv is one of multiple Python virtual environment managers.

Here we show the differences between venv and virtualenv

Parameter venv virtualenv Supports which Python versions? Newer Older Is standard library? Yes No

Also, virtualenv has a few more minor unique features.

Because these two are so similar, most information is documented at venv.

"},{"location":"software/pytorch/","title":"PyTorch","text":""},{"location":"software/qiime2/","title":"qiime2","text":"

qiime2 is a tool.

qiime2 can be found among the UPPMAX modules.

module spider qiime2\n
How does that look like?

You output will look similar to this:

[sven@rackham3 ~]$ module spider qiime2\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  qiime2:\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n     Versions:\n        qiime2/2018.11.0\n        qiime2/2024.2\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  For detailed information about a specific \"qiime2\" package (including how to load the modules) use the module's full name.\n  Note that names that have a trailing (E) are extensions provided by other modules.\n  For example:\n\n     $ module spider qiime2/2024.2\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n

To find out how to load a specific version:

module spider qiime2/1.22.2\n
How does that look like?

Output will look similar to:

[sven@rackham3 ~]$ module spider qiime2/2024.2\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\nqiime2: qiime2/2024.2\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n    You will need to load all module(s) on any one of the lines below before the \"qiime2/2024.2\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n      qiime2 - use qiime2 \n\n      Description\n\n      Version 2024.2\n\n      https://qiime2.org\n\n      The version installed is 2024.2 amplicon, slightly modified from the publicly available docker image.\n\n\n         qiime ...\n\n\n      You may see a message like \n\n          Matplotlib created a temporary config/cache directory at /scratch/matplotlib-a10b2an0 because the default path (/home/qiime2/matplotlib) is not a writable directory...\n\n      This is because qiime2 is running within an Apptainer container. This message can be ignored.\n

After reading that documentation, we know how to load it:

module load bioinfo-tools \nmodule load qiime2/2024.2\n
How does that look like?

Your output will look similar to this:

[sven@rackham3 ~]$ module load bioinfo-tools \n[sven@rackham3 ~]$ module load qiime2/2024.2\n[sven@rackham3 ~]$ \n
"},{"location":"software/qiime2/#singularity-script","title":"Singularity script","text":"

If you want to put qiime2 in a Singularity container, here is an example script:

BootStrap: library\nFrom: centos:7\n\n%runscript\n  . /miniconda/etc/profile.d/conda.sh\n  PATH=$PATH:/miniconda/bin\n  conda activate qiime2-2019.7\n  qiime \"$@\"\n\n%post\n  yum clean all\n  yum -y update\n  yum -y install wget python-devel\n  cd /tmp\n  wget https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh\n  bash ./Miniconda2-latest-Linux-x86_64.sh -b -p /miniconda\n  /miniconda/bin/conda update -y conda\n  wget https://data.qiime2.org/distro/core/qiime2-2019.7-py36-linux-conda.yml\n  /miniconda/bin/conda env create -n qiime2-2019.7 --file qiime2-2019.7-py36-linux-conda.yml\n  # OPTIONAL CLEANUP\n  rm qiime2-2019.7-py36-linux-conda.yml\n  /miniconda/bin/conda clean -a\n

See the documentation on Singularity how to do so.

"},{"location":"software/r/","title":"R","text":"

R is a programming language for statistical computing and data visualization (from Wikipedia).

Here we discuss:

  • the R programming language
  • the R interpreter
  • R packages
  • R software development
  • How to install personal packages
  • How to create a Singularity container for an R package
flowchart TD\n\n    subgraph r[R]\n      r_interpreter[the R interpreter]\n      r_packages[R packages]\n      r_language[the R programming language]\n      r_dev[R software development]\n      rstudio[RStudio]\n\n      interpreted_language[Interpreted]\n      cran[CRAN]\n    end\n\n    subgraph uppmax_modules[UPPMAX modules]\n      r_module[R]\n      r_packages_module[R_packages]\n      rstudio_module[RStudio]\n    end\n\n\n    r_language --> |has| r_dev\n    r_language --> |is| interpreted_language\n    r_language --> |uses| r_packages\n    interpreted_language --> |done by| r_interpreter\n    r_packages --> |maintained by| cran\n    r_dev --> |commonly done in| rstudio\n\n    r_interpreter --> r_module\n    r_packages --> r_packages_module\n    rstudio --> rstudio_module\n\n    rstudio_module --> |automatically loads latest| r_packages_module\n    r_packages_module --> |automatically loads corresponding version of| r_module
"},{"location":"software/r/#the-r-programming-language","title":"the R programming language","text":"

R is 'a programming language for statistical computing and data visualization') and is of the most commonly used programming languages in data mining, analysis and visualization.

R is an interpreted language; users can access it through the R interpreter.

R is a dynamically typed programming language with basic built-in data structures are (among others): vectors, arrays, lists, and data frames. and its supports both procedural programming and object-oriented programming.

R has many user-created R packages to augment the functions of the R language, most commonly hosted on CRAN. These packages offer statistical techniques, graphical devices, import/export, reporting (RMarkdown, knitr, Sweave), etc.

"},{"location":"software/r/#the-r-interpreter","title":"the R interpreter","text":"

The R interpreter is the program that reads R code and runs it. Commonly, 'the programming language R' and 'the R interpreter' are use as synonyms.

To load the latest version of the R interpreter, load the R module version 4.3.1 like this:

module load R/4.3.1\n
Do I really need to load an R module?

We strongly recommend loading an R module.

If you do not load an R module, you will be using the version of R used by the UPPMAX systems.

Sometimes that may work.

If not, load an R module.

Need a different version?

If you need a different R version, use the following command to see which versions of the R interpreter are installed on UPPMAX:

module spider R\n

Then start the R interpreter with:

R\n
"},{"location":"software/r/#r-packages","title":"R packages","text":"

R packages extend what R can do. The most common repository for R packages is CRAN. As these packages are so common, UPPMAX provides most of CRAN packages in one module, called R_packages

To load the latest version of the pre-installed R packages, do:

module load R_packages/4.3.1\n

This will automatically load the corresponding version of the R interpreter.

Do I really need to load the R_packages module?

We strongly recommend loading the R_packages module.

If you do not load the R_packages module (nor the R module), you will be using the version of R used by the UPPMAX systems.

Sometimes that may work.

If not, load the R_packages module.

Need a different version?

If you need a different package version, use the following command to see which versions of the R packages are installed on UPPMAX:

module spider R_packages\n
"},{"location":"software/r/#r-software-development","title":"R software development","text":"

RStudio in action on Bianca using the remote desktop environment

Software development is commonly done in a so-called Integrated Development Environment, abbreviated 'IDE.

RStudio is the most commonly used IDE for R software development. See the UPPMAX page about RStudio on how to use.

"},{"location":"software/r/#how-to-install-personal-packages","title":"How to install personal packages","text":"

Installing R packages on Bianca

  • If a package is unavailable on Bianca, one can create a Singularity container for R packages
  • The alternative is found here

First load R_packages to make sure that the package is not already installed!

To install personal packages in your own home directory you type

install.packages(\"package_name\")\n

as usual. That will install all your packages under the path ~/R/[arch]/[version of R]/. Then you can load it by just doing library(package_name) or require(package_name) in the R environment.

You can also specify a specific folder for where to put your packages, with

install.packages(\"package_name\", lib=\"~/some/path/under/your/home/directory/\")\n

But to then be able to find the package inside the R environment you need to either export the R_LIBS_USER environment variable, or specify the flag lib.loc when calling require/library, e.g.

library(package_name, lib.loc='~/some/path/under/your/home/directory')\n

Notice that if you are planning on running R on different clusters then it is probably wisest to manually specify the installation directory, and to have separate directories for each cluster. This is because some of the clusters have different architectures, and this will render some packages unusable if you compile them on one system but try to run them on the other.

"},{"location":"software/r/#technicalities","title":"Technicalities","text":"

As of this writing, our most recent installations are

  • R/4.3.1
  • R_packages/4.3.1
  • RStudio/2023.06.2-561

If you need an older version, do module avail R or R_packages or RStudio to see older versions as well.

Note that R_packages/4.3.1 contains 23475 packages, nearly all packages available on CRAN and BioConductor, as well as several custom packages installed from Github and other repositories. See module help R_packages/4.3.1 and R_packages for more information.

"},{"location":"software/r/#what-r-packages-are-in-the-omnibus-r_packages-modules","title":"What R packages are in the omnibus R_packages modules?","text":""},{"location":"software/r/#r_packages411","title":"R_PACKAGES/4.1.1","text":"

As of 2021-11-11 there are a total of 21659 R packages installed. A total of 21740 packages are available in CRAN and BioConductor. 18022 CRAN packages are installed, out of 18348 available. 3382 BioConductor-specific packages are installed, out of 3392 available. 255 other R packages are installed. These are not in CRAN/BioConductor, and instead are hosted on github or elsewhere.

These R packages are available as part of the R_packages/4.1.1 module as installed on rackham, bianca and snowy, which requires and loads the R/4.1.1 module. When the R_packages/4.1.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

  • To use some R packages from this module, other modules may need to be loaded. For example, to use the Rmpi package, the openmpi/3.1.5 module must be loaded after loading R_packages/4.0.4.
  • See module help R_packages/4.1.1 for more information.
"},{"location":"software/r/#r_packages404","title":"R_PACKAGES/4.0.4","text":"

As of 2021-04-16 there are a total of 20663 CRAN and BioConductor packages installed, out of 20751 packages available. 17354 CRAN packages are installed, out of 17428 available. 3309 BioConductor-specific packages are installed, out of 3323 available.

These R packages are available as part of the R_packages/4.0.4 module as installed on rackham, bianca and snowy, which requires and loads the R/4.0.4 module. When the R_packages/4.0.4 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

  • To use some R packages from this module, other modules may need to be loaded. For example, to use the Rmpi package, the openmpi/3.1.5 module must be loaded after loading R_packages/4.0.4.
  • See module help R_packages/4.0.4 for more information.
"},{"location":"software/r/#r_packages400","title":"R_PACKAGES/4.0.0","text":"

As of 2021-02-24 there are a total of 18652 CRAN and BioConductor packages installed, out of 20422 packages available. 14839 CRAN packages are installed, out of 17165 available. 3217 BioConductor-specific packages are installed, out of 3257 available.

These R packages are available as part of the R_packages/4.0.0 module as installed on rackham, bianca and snowy, which requires and loads the R/4.0.0 module. When the R_packages/4.0.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

See module help R_packages/4.0.0 for more information.

"},{"location":"software/r/#r_packages361","title":"R_PACKAGES/3.6.1","text":"

As of 2019-09-18 there are a total of 17657 packages available in this module. This includes 14579 CRAN packages installed, out of 14913 available; and 3054 BioConductor-specific packages installed, out of 3079 available. These R packages are available as part of the R_packages/3.6.1 module as installed on rackham, bianca and snowy, which requires and loads the R/3.6.1 module. When the R_packages/3.6.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

See module help R_packages/3.6.1 for more information.

"},{"location":"software/r/#r_packages360","title":"R_PACKAGES/3.6.0","text":"

As of 2019-05-14 there are a total of 17257 packages available. This includes 13769 CRAN packages installed, out of 14178 available; and 3031 BioConductor-specific packages installed, out of 3079 available. These R packages are available as part of the R_packages/3.6.0 module as installed on rackham, bianca and snowy, which requires and loads the R/3.6.0 module. When the R_packages/3.6.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

See module help R_packages/3.6.0 for more information.

"},{"location":"software/r/#r_packages352","title":"R_PACKAGES/3.5.2","text":"

As of 2019-02-08 there are a total of 16642 packages available. This includes 13355 CRAN packages installed, out of 13683 available; and 2933 BioConductor-specific packages installed, out of 2959 available. These R packages are available as part of the R_packages/3.5.2 module as installed on rackham, bianca and snowy, which requires and loads the R/3.5.2 module. When the R_packages/3.5.2 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

See module help R_packages/3.5.2 for more information.

"},{"location":"software/r/#r_packages350","title":"R_PACKAGES/3.5.0","text":"

With its 3.5.0 version, R_packages now attempts to install all available R packages from both CRAN and BioConductor.

As of 2018-06-26 there are a total of 14532 packages available. This includes 11734 CRAN packages installed, out of 12867 available; and 2798 BioConductor-specific packages installed, out of 2843 available. These R packages are available as part of the R_packages/3.5.0 module as installed on rackham, bianca and snowy, which requires and loads the R/3.5.0 module. When the R_packages/3.5.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

See module help R_packages/3.5.0 for more information.

"},{"location":"software/r/#r_packages343","title":"R_packages/3.4.3","text":"

A large number of R packages are available as part of the R_packages/3.4.3 module as installed on rackham and bianca, which requires and loads the R/3.4.3 module. When the R_packages/3.4.3 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

"},{"location":"software/r/#r_packages340","title":"R_packages/3.4.0","text":"

A large number of R packages are available as part of the R_packages/3.4.0 module, which requires and loads the R/3.4.0 module. When the R_packages/3.4.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

"},{"location":"software/r/#r_packages332","title":"R_packages/3.3.2","text":"

A large number of R packages are available as part of the R_packages/3.3.2 module, which requires and loads the R/3.3.2 module. When the R_packages/3.3.2 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

"},{"location":"software/r/#r_packages331","title":"R_packages/3.3.1","text":"

A large number of R packages are available as part of the R_packages/3.3.1 module, which requires and loads the R/3.3.1 module. When the R_packages/3.3.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these should be available via library(package-name).

"},{"location":"software/r/#r_packages330","title":"R_packages/3.3.0","text":"

A large number of R packages are available as part of the R_packages/3.3.0 module, which requires and loads the R/3.3.0 module. When the R_packages/3.3.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these should be available via library(package-name).

"},{"location":"software/r/#learning-r","title":"Learning R","text":""},{"location":"software/r/#starter-r-courses","title":"Starter R courses","text":"

The Carpentries teaches basic lab skills for research computing, such as:

  • Programming with R
  • R for reproducible scientific analysis
"},{"location":"software/r/#experienced-r-courses","title":"Experienced R courses","text":"

CodeRefinery develops and maintains training material on software best practices for researchers that already write code. Their material addresses all academic disciplines and tries to be as programming language-independent as possible:

  • CodeRefinery lessons

Aalto Scientific Computing:

  • Data analysis workflows with R and Python
"},{"location":"software/r/#overview-of-naiss-centers-and-their-documentation-about-r","title":"Overview of NAISS centers and their documentation about R","text":"
  • C3SE
  • HPC2N
  • LUNARC
  • NSC
  • PDC
  • UPPMAX
"},{"location":"software/r/#links","title":"Links","text":"
  • The home page of \"The R Project for Statistical Computing\"
  • Official R documentation
  • CRAN homepage
  • CRAN mirrors
"},{"location":"software/r_packages_bianca/","title":"Installing R packages on Bianca","text":"Read through the content below Try to do the exercise"},{"location":"software/r_packages_bianca/#first-check-if-package-is-already-in-r_packagesxyz","title":"First check if package is already in R_packages/x.y.z","text":"
  • On UPPMAX the module R_packages is an omnibus package library containing almost all packages in the CRAN and BioConductor repositories.
    • As of 2023-11-21, there were a total of 23478 R packages installed in R_packages/4.3.1.
      • A total of 23603 packages are available in CRAN and BioConductor
      • 19586 CRAN packages are installed, out of 20044 available
      • 3544 BioConductor-specific packages are installed, out of 3559 available
      • 346 other R packages are installed. These are not in CRAN/BioConductor, are only available in the CRAN/BioConductor archives, or are hosted on github, gitlab or elsewhere

Chances are good the R packages you need are already available once you load this module. You can quickly check by loading it:

$ ml R_packages/4.3.1

Then within R, try loading the package you want:

library(glmnet)

Alternatively, and this is both a longer solution and not our recommended one, you can grep for the package after this module is loaded using the environment variable $R_LIBS_SITE, which contains the locations of all R packages installed within the module.

$ ls -l $R_LIBS_SITE | grep glmnet\ndrwxrwsr-x  9 douglas sw  4096 May 28 16:59 EBglmnet\ndrwxrwsr-x 11 douglas sw  4096 May 25 01:22 glmnet\ndrwxrwsr-x  6 douglas sw  4096 May 25 04:03 glmnetSE\ndrwxrwsr-x  7 douglas sw  4096 May 25 04:04 glmnetUtils\ndrwxrwsr-x  8 douglas sw  4096 May 25 04:04 glmnetcr\ndrwxrwsr-x  7 douglas sw  4096 May 25 10:46 glmnetr\n
"},{"location":"software/r_packages_bianca/#install-steps","title":"Install steps","text":""},{"location":"software/r_packages_bianca/#install-on-rackham","title":"Install on Rackham","text":"
  • R on UPPMAX course
  • note First decide on which R version it should be based on and load that R_packages module.
  • If not stated otherwise, your installation will end up in the ~/R directory within your home directory
"},{"location":"software/r_packages_bianca/#methods","title":"Methods","text":"
  • automatic download and install from CRAN

    • https://uppmax.github.io/bianca_workshops/extra/rpackages_copy/#automatic-download-and-install-from-cran
  • automatic download and install from GitHub

    • https://uppmax.github.io/bianca_workshops/extra/rpackages_copy/#automatic-download-and-install-from-github
  • manual download and install

    • https://uppmax.github.io/bianca_workshops/extra/rpackages_copy/#manual-download-and-install
    • NOTE that if you install a package this way, you need to handle any dependencies yourself.
      • For instance you might get use of our modules
"},{"location":"software/r_packages_bianca/#transfer-to-wharf","title":"Transfer to wharf","text":"
  • You may transfer the whole R library (in you home folder)
    • this is usually the easiest way
  • or select the directory(-ies) related to you new installation
    • note there may be more than one directory
"},{"location":"software/r_packages_bianca/#move-package-to-local-bianca-r-package-path","title":"Move package to local Bianca R package path","text":"
  • Sync or move the R directory or the specific folders to your ~/R directory on bianca
"},{"location":"software/r_packages_bianca/#test-your-installation","title":"Test your installation","text":"
  • Start an R session on bianca and load the new package
"},{"location":"software/r_packages_bianca/#example-update-dowser","title":"Example: Update dowser","text":"

dowser on ReadTheDocs

Info

  • Dowser is part of the Immcantation analysis framework for Adaptive Immune Receptor Repertoire sequencing (AIRR-seq).
  • Dowser provides a set of tools for performing phylogenetic analysis on B cell receptor repertoires.
  • It supports building and visualizing trees using multiple methods, and implements statistical tests for discrete trait analysis of B cell migration, differentiation, and isotype switching.

The version of dowser in R_packages/4.2.1 is 1.1.0. It was updated to version 1.2.0 on 2023-05-30.

"},{"location":"software/r_packages_bianca/#install-dowser-rackham","title":"Install dowser Rackham","text":"

You can update this for yourself by beginning on rackham. Do

module load R_packages/4.2.1\n

and then, within R, do

install.packages('dowser')\n

The install.packages() command that you use to install new packages is also used to update already installed packages.

As the update begins, you will see two questions, answer yes to both:

Warning in install.packages(\"dowser\") :\n      'lib = \"/sw/apps/R_packages/4.2.1/rackham\"' is not writable\n    Would you like to use a personal library instead? (yes/No/cancel) yes\n

and

Would you like to create a personal library\n    '~/R/x86_64-pc-linux-gnu-library/4.2'\n    to install packages into? (yes/No/cancel) yes\n

If you have already installed or updated an R package with R_packages/4.2.1 loaded that resulted in creating a personal library, you may not see one or both of these questions.

This will then lead to a brief installation process. This creates the directory ~/R/x86_64-pc-linux-gnu-library/4.2 that it refers to in the question. This directory contains your personal installations and updates of R packages.

The complete installation output for this update on rackham was:

> packageVersion('dowser')\n[1] '1.1.0'\n> install.packages('dowser')\nInstalling package into '/sw/apps/R_packages/4.2.1/rackham'\n(as 'lib' is unspecified)\nWarning in install.packages(\"dowser\") :\n  'lib = \"/sw/apps/R_packages/4.2.1/rackham\"' is not writable\nWould you like to use a personal library instead? (yes/No/cancel) yes\nWould you like to create a personal library\n'/domus/h1/douglas/R/x86_64-pc-linux-gnu-library/4.2'\nto install packages into? (yes/No/cancel) yes\n--- Please select a CRAN mirror for use in this session ---\ntrying URL 'https://ftp.acc.umu.se/mirror/CRAN/src/contrib/dowser_1.2.0.tar.gz'\nContent type 'application/x-gzip' length 1722229 bytes (1.6 MB)\n==================================================\ndownloaded 1.6 MB\n\n* installing *source* package 'dowser' ...\n** package 'dowser' successfully unpacked and MD5 sums checked\n** using staged installation\n** R\n** data\n*** moving datasets to lazyload DB\n** inst\n** byte-compile and prepare package for lazy loading\n** help\n*** installing help indices\n** building package indices\n** installing vignettes\n** testing if installed package can be loaded from temporary location\n** testing if installed package can be loaded from final location\n** testing if installed package keeps a record of temporary installation path\n* DONE (dowser)\n\nThe downloaded source packages are in\n    '/scratch/RtmpRo0Gz5/downloaded_packages'\n>\n> packageVersion('dowser')\n[1] '1.2.0'\n
"},{"location":"software/r_packages_bianca/#transfer-to-the-wharf","title":"Transfer to the Wharf","text":"

After installation, the next step is to copy the contents of this directory over to bianca so that it is the same directory within your bianca home directory.

Make sure you are in your home directory. Then connect to the bianca wharf. Replace the name and project with your bianca user name and project.

sftp douglas-sens2017625@bianca-sftp\n

You log in here like you log into bianca: the first password is your password followed by the 6-digit authenticator code, the second password (if required for you) is only your password.

Once sftp has connected, the contents of the current directory can be listed with

dir\n

It should look like this:

sftp> dir\ndouglas-sens2017625\n

Now cd to this directory, which is your wharf directory within your project.

sftp> cd douglas-sens2017625/\nsftp> dir\nsftp>\n

If you have not uploaded anything to your wharf, this will be empty. It might have a few things in it.

Now, upload your entire personal R directory from rackham here.

sftp> put -r R\n

This will take a while to upload all the files. When it has completed, quit.

sftp> quit\n
  • Now, log into bianca using the shell, or using the web interface and start a terminal.
  • Once you have a bianca shell, change to your wharf directory within your project. Replace my user and project with yours.
cd /proj/sens2017625/nobackup/wharf/douglas/douglas-sens2017625\n

Within this directory should be your R directory.

[douglas@sens2017625-bianca douglas-sens2017625]$ ls -l\ntotal 1892\ndrwxrwxr-x  3 douglas douglas    4096 Mar  2 14:27 R\n
"},{"location":"software/r_packages_bianca/#sync-from-wharf-to-home-directory","title":"Sync from Wharf to Home directory","text":"
  • Now sync this to your home directory:
[douglas@sens2017625-bianca douglas-sens2017625]$ rsync -Pa R ~/\n
"},{"location":"software/r_packages_bianca/#start-an-r-session-and-load-the-new-package","title":"Start an R session and load the new package","text":"

Because R_packages/4.2.1 was loaded when you installed/updated the packages in your personal R library, you need to have it loaded when you use these packages as well.

Simply change to the directory you want to work in, load the R_packages/4.2.1 module, and get to work.

[douglas@sens2017625-bianca douglas-sens2017625]$ cd /proj/sens2017625/nobackup/douglas/\n    [douglas@sens2017625-bianca douglas]$ module load R_packages/4.2.1\n

Then start R, and load the new package.

[douglas@sens2017625-bianca douglas]$ R\n
    > packageVersion('dowser')\n    [1] '1.2.0'\n    > library(dowser)\n    >\n
"},{"location":"software/r_packages_bianca2/","title":"Installing R packages on Bianca","text":"

R on UPPMAX course

"},{"location":"software/r_packages_bianca2/#what-is-a-package-really","title":"What is a package, really?","text":"
  • An R package is essentially a contained folder and file structure containing R code (and possibly C/C++ or other code) and other files relevant for the package e.g. documentation(vignettes), licensing and configuration files.

  • Let us look at a very simple example

   $ git clone git@github.com:MatPiq/R_example.git\n\n   $ cd R_example\n\n   $ tree\n   .\n   \u251c\u2500\u2500 DESCRIPTION\n   \u251c\u2500\u2500 NAMESPACE\n   \u251c\u2500\u2500 R\n   \u2502   \u2514\u2500\u2500 hello.R\n   \u251c\u2500\u2500 man\n   \u2502   \u2514\u2500\u2500 hello.Rd\n   \u2514\u2500\u2500 r_example.Rproj\n
"},{"location":"software/r_packages_bianca2/#installing-your-own-packages","title":"Installing your own packages","text":"

Sometimes you will need R packages that are not already installed. The solution to this is to install your own packages.

  • These packages will usually come from CRAN - the Comprehensive R Archive Network, or

  • sometimes from other places, like GitHub or R-Forge

Here we will look at installing R packages with automatic download and with manual download. It is also possible to install from inside RStudio.

"},{"location":"software/r_packages_bianca2/#methods","title":"Methods","text":"
  • setup (first time)
  • automatic download and install from CRAN
  • automatic download and install from GitHub
  • manual download and install
"},{"location":"software/r_packages_bianca2/#setup-first-time","title":"setup (first time)","text":"

https://uppmax.github.io/bianca_workshops/extra/rpackages/#setup

  • We need to create a place for the own-installed packages to be and to tell R where to find them. The initial setup only needs to be done once, but separate package directories need to be created for each R version used.

  • R reads the $HOME/.Renviron file to setup its environment. It should be created by R on first run, or you can create it with the command: touch $HOME/.Renviron

NOTE: In this example we are going to assume you have chosen to place the R packages in a directory under your home directory. As mentioned, you will need separate ones for each R version.

If you have not yet installed any packages to R yourself, the environment file should be empty and you can update it like this:

    echo R_LIBS_USER=\\\"$HOME/R-packages-%V\\\" > ~/.Renviron  \n

If it is not empty, you can edit $HOME/.Renviron with your favorite editor so that R_LIBS_USER contain the path to your chosen directory for own-installed R packages. It should look something like this when you are done:

    R_LIBS_USER=\"/home/u/user/R-packages-%V\"  \n

| NOTE: Replace /home/u/user with the value of $HOME. Run echo $HOME to see its value. | NOTE: The %V should be written as-is, it's substituted at runtime with the active R version.

For each version of R you are using, create a directory matching the pattern used in .Renviron to store your packages in. This example is shown for R version 4.0.4:

    mkdir -p $HOME/R-packages-4.0.4  \n
"},{"location":"software/r_packages_bianca2/#automatic-download-and-install-from-cran","title":"Automatic download and install from CRAN","text":"

https://uppmax.github.io/bianca_workshops/extra/rpackages/#automatic-download-and-install-from-cran

Note

You find a list of packages in CRAN and a list of repos here: https://cran.r-project.org/mirrors.html

  • Please choose a location close to you when picking a repo.
From command lineFrom inside R
R --quiet --no-save --no-restore -e \"install.packages('<r-package>', repos='<repo>')\"  \n
install.packages('<r-package>', repos='<repo>')  \n

In either case, the dependencies of the package will be downloaded and installed as well.

"},{"location":"software/r_packages_bianca2/#automatic-download-and-install-from-github","title":"Automatic download and install from GitHub","text":"

https://uppmax.github.io/bianca_workshops/extra/rpackages/#automatic-download-and-install-from-github

If you want to install a package that is not on CRAN, but which do have a GitHub page, then there is an automatic way of installing, but you need to handle prerequisites yourself by installing those first.

  • It can also be that the package is not in as finished a state as those on CRAN, so be careful.

Note

To install packages from GitHub directly, from inside R, you first need to install the devtools package. Note that you only need to install this once.

This is how you install a package from GitHub, inside R:

    install.packages(\"devtools\")   # ONLY ONCE\n    devtools::install_github(\"DeveloperName/package\")\n
"},{"location":"software/r_packages_bianca2/#manual-download-and-install","title":"Manual download and install","text":"

https://uppmax.github.io/bianca_workshops/extra/rpackages/#manual-download-and-install

If the package is not on CRAN or you want the development version, or you for other reason want to install a package you downloaded, then this is how to install from the command line:

    R CMD INSTALL -l <path-to-R-package>/R-package.tar.gz\n

NOTE that if you install a package this way, you need to handle any dependencies yourself.

Note

Places to look for R packages

  • CRAN
  • R-Forge
  • Project's own GitHub page
  • etc.
"},{"location":"software/r_packages_bianca2/#example-install-tidycmprsk","title":"Example \u2014 Install Tidycmprsk","text":"

tidycmprsk on GitHub

Info

The tidycmprsk package provides an intuitive interface for working with the competing risk endpoints. The package wraps the cmprsk package, and exports functions for univariate cumulative incidence estimates with cuminc() and competing risk regression with crr().

"},{"location":"software/r_packages_bianca2/#install-on-rackham","title":"Install on Rackham","text":"

You can install this for yourself by beginning on rackham. Do

module load R_packages/4.1.1\n

and then, within R, do

install.packages('tidycmprsk')\n

You will see two questions to answer yes to:

Warning in install.packages(\"tidycmprsk\") :\n      'lib = \"/sw/apps/R_packages/4.1.1/rackham\"' is not writable\n    Would you like to use a personal library instead? (yes/No/cancel) yes\n

and

Would you like to create a personal library\n    '~/R/x86_64-pc-linux-gnu-library/4.1'\n    to install packages into? (yes/No/cancel) yes\n

This will then to an extended installation process that also does some updates. This creates a directory ~/R that contains the installations and updates of R packages.

"},{"location":"software/r_packages_bianca2/#transfer-to-the-wharf","title":"Transfer to the Wharf","text":"

After installation, the next step is to copy the contents of this directory over to bianca so that it is the same directory within your bianca home directory.

Make sure you are in your home directory. Then connect to the bianca wharf. Replace the name and project with your bianca user name and project.

sftp douglas-sens2017625@bianca-sftp\n

You log in here like you log into bianca: the first password is your password followed by the 6-digit authenticator code, the second password (if required for you) is only your password.

Once sftp has connected, the contents of the current directory can be listed with

dir\n

It should look like this:

sftp> dir\ndouglas-sens2017625\n

Now cd to this directory, which is your wharf directory within your project.

sftp> cd douglas-sens2017625/\nsftp> dir\nsftp>\n

If you have not uploaded anything to your wharf, this will be empty. It might have a few things in it.

Now, upload your (whole) R directory here.

sftp> put -r R\n

This will take a while to upload all the files. When it has completed, quit.

sftp> quit\n
  • Now, log into bianca using the shell, or using the web interface and start a terminal.
  • Once you have a bianca shell, change to your wharf directory within your project. Replace my user and project with yours.
cd /proj/sens2017625/nobackup/wharf/douglas/douglas-sens2017625\n

Within this directory should be your R directory.

[douglas@sens2017625-bianca douglas-sens2017625]$ ls -l\ntotal 1892\ndrwxrwxr-x  3 douglas douglas    4096 Mar  2 14:27 R\n
"},{"location":"software/r_packages_bianca2/#sync-from-wharf-to-home-directory","title":"Sync from Wharf to Home directory","text":"
  • Now sync this to your home directory:
[douglas@sens2017625-bianca douglas-sens2017625]$ rsync -Pa R ~/\n
"},{"location":"software/r_packages_bianca2/#start-an-r-session-and-load-the-new-package","title":"Start an R session and load the new package","text":"

To use R_packages/4.1.1 with these new installations/updates, change to the directory you want to work in, load the R_packages/4.1.1 module. Substitute your directory for my example directory.

[douglas@sens2017625-bianca douglas-sens2017625]$ cd /proj/sens2017625/nobackup/douglas/\n    [douglas@sens2017625-bianca douglas]$ module load R_packages/4.1.1\n

Then start R, and load the new package.

[douglas@sens2017625-bianca douglas]$ R\n
    R version 4.1.1 (2021-08-10) -- \"Kick Things\"\n    Copyright (C) 2021 The R Foundation for Statistical Computing\n    ....\n    Type 'demo()' for some demos, 'help()' for on-line help, or\n    'help.start()' for an HTML browser interface to help.\n    Type 'q()' to quit R.\n\n    > library(tidycmprsk)\n    >\n
"},{"location":"software/rackham_file_transfer_using_filezilla/","title":"File transfer to/from Rackham using FileZilla","text":"

There are multiple ways to transfer data to/from Rackham.

Here, we show how to transfer files using a graphical tool called FileZilla.

FileZilla connected to Rackham

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#procedure","title":"Procedure","text":"

The FileZilla logo

Would you like a video?

If you like to see how to do file transfer from/to Rackham using FileZilla, watch the video here

FileZilla is a secure file transfer tool that works under Linux, Mac and Windows.

To transfer files to/from Rackham using FileZilla, do the following steps:

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#1-start-filezilla","title":"1. Start FileZilla","text":"

Start FileZilla.

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#2-start-filezillas-site-manager","title":"2. Start FileZilla's site manager","text":"

From the menu, select 'File | Site manager'

Where is that?

It is here:

The FileZilla 'File' menu contains the item 'Site manager'

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#3-add-a-new-site-in-filezillas-site-manager","title":"3. Add a new site in FileZilla's site manager","text":"

In FileZilla's site manager, click 'New site'

Where is that?

It is here:

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#4-setup-the-site","title":"4. Setup the site","text":"

In FileZilla's site manager:

  • create a name for the site, e.g. rackham.
  • for that site, use all standards, except:
    • Set protocol to 'SFTP - SSH File Transfer Protocol'
    • Set host to rackham.uppmax.uu.se
    • Set user to [username], e.g. sven
How does that look like?

It looks similar to this:

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#5-connect-to-the-site","title":"5. Connect to the site","text":"

Click 'Connect'.

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#6-fill-in-your-password","title":"6. Fill in your password","text":"

You will be asked for your password, hence type [your password], e.g. VerySecret. You can save the password.

How does that look like?

It looks similar to this:

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_filezilla/#7-ready-to-transfer-files","title":"7. Ready to transfer files","text":"

Now you can transfer files between your local computer and Rackham.

How does that look like?

It looks like this:

","tags":["FileZilla","Rackham"]},{"location":"software/rackham_file_transfer_using_scp/","title":"Data transfer to/from Rackham using SCP","text":"

There are multiple ways to transfer files to or from Rackham.

Here it is described how to do file transfer to/from Rackham using SCP. SCP is an abbreviation of 'Secure copy protocol', however, it is not considered 'secure' anymore: instead it is considered an outdated protocol. The program scp allows you to transfer files to/from Rackham using SCP, by coping them between your local computer and Rackham.

","tags":["transfer","data transfer","file transfer","scp","SCP","Rackham"]},{"location":"software/rackham_file_transfer_using_scp/#procedure","title":"Procedure","text":"Prefer a video?

See this procedure as a video at YouTube

","tags":["transfer","data transfer","file transfer","scp","SCP","Rackham"]},{"location":"software/rackham_file_transfer_using_scp/#1-start-a-terminal-on-your-local-computer","title":"1. Start a terminal on your local computer","text":"

Start a terminal on your local computer

","tags":["transfer","data transfer","file transfer","scp","SCP","Rackham"]},{"location":"software/rackham_file_transfer_using_scp/#2-copy-files-using-scp","title":"2. Copy files using scp","text":"

In the terminal, copy files using scp to connect to Rackham:

scp [from] [to]\n

Where [from] is the file(s) you want to copy, and [to] is the destination. This is quite a shorthand notation!

This is how you copy a file from your local computer to Rackham:

scp [local_filename] [username]@rackham.uppmax.uu.se:/home/[username]\n

where [local_filename] is the path to a local filename, and [username] is your UPPMAX username, for example:

scp my_file.txt sven@rackham.uppmax.uu.se:/home/sven\n

To copy a file from Rackham to your local computer, do the command above in reverse order:

scp [username]@rackham.uppmax.uu.se:/home/[username]/[remote_filename] [local_folder]\n

where [remote_filename] is the path to a remote filename, [username] is your UPPMAX username, and [local_folder] is your local folder, for example:

scp sven@rackham.uppmax.uu.se:/home/sven/my_remote_file.txt /home/sven\n
","tags":["transfer","data transfer","file transfer","scp","SCP","Rackham"]},{"location":"software/rackham_file_transfer_using_scp/#3-if-asked-give-your-uppmax-password","title":"3. If asked, give your UPPMAX password","text":"

If asked, give your UPPMAX password. You can get rid of this prompt if you have setup SSH keys

","tags":["transfer","data transfer","file transfer","scp","SCP","Rackham"]},{"location":"software/rackham_file_transfer_using_sftp/","title":"Data transfer to/from Rackham using SFTP","text":"

Data transfer to/from Rackham using SFTP is one of the ways ways to transfer files to/from Rackham

What are the other ways?

Other ways to transfer data to/from Rackham are described here

One can transfer files to/from Rackham using SFTP. SFTP is an abbreviation of 'SSH File Transfer Protocol', where 'SSH' is an abbreviation of 'Secure Shell protocol' The program sftp allows you to transfer files to/from Rackham using SFTP.

The process is described here:

"},{"location":"software/rackham_file_transfer_using_sftp/#step-1-start-a-terminal-on-your-local-computer","title":"Step 1. Start a terminal on your local computer","text":"

Start a terminal on your local computer.

"},{"location":"software/rackham_file_transfer_using_sftp/#step-2-run-sftp-to-connect-to-rackham","title":"Step 2. Run sftp to connect to Rackham","text":"

In the terminal, run sftp to connect to Rackham by doing:

sftp [username]@rackham.uppmax.uu.se\n

where [username] is your UPPMAX username, for example:

sftp sven@rackham.uppmax.uu.se\n
"},{"location":"software/rackham_file_transfer_using_sftp/#step-3-if-asked-give-your-uppmax-password","title":"Step 3. If asked, give your UPPMAX password","text":"

If asked, give your UPPMAX password. You can get rid of this prompt if you have setup SSH keys

"},{"location":"software/rackham_file_transfer_using_sftp/#step-4-uploaddownload-files-tofrom-rackham","title":"Step 4. Upload/download files to/from Rackham","text":"

In sftp, upload/download files to/from Rackham.

Basic sftp command can be found here.

flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n\n    user(User)\n      user_local_files(Files on user computer):::file_node\n\n    subgraph sub_inside[SUNET]\n      subgraph sub_rackham_shared_env[Rackham]\n          login_node(login/calculation/interactive node):::calculation_node\n          files_in_rackham_home(Files in Rackham home folder):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#fcc,color:#000,stroke:#fcc\n    style sub_rackham_shared_env fill:#ffc,color:#000,stroke:#ffc\n\n    user --> |logs in |login_node\n    user --> |uses| user_local_files\n\n    login_node --> |can use|files_in_rackham_home\n    %% user_local_files <--> |graphical tool|files_in_rackham_home\n    %% user_local_files <--> |SCP|files_in_rackham_home\n    user_local_files <==> |SFTP|files_in_rackham_home\n\n    %% Aligns nodes prettier\n    user_local_files ~~~ login_node

Overview of file transfer on Rackham The purple nodes are about file transfer, the blue nodes are about 'doing other things'. The user can be either inside or outside SUNET.

"},{"location":"software/rackham_file_transfer_using_transit_scp/","title":"Data transfer to/from Rackham using Transit using SCP","text":"

One can use SCP to copy files between Rackham and Transit, from either Rackham or Transit.

Both ways are shown step-by-step below.

  • Using SCP from Rackham
  • Using SCP from transit
","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/","title":"Data transfer to/from Rackham using Transit using SCP from Rackham","text":"

One can transfer files to/from Rackham using the UPPMAX Transit server, using SCP. The program scp allows you to copy file between Rackham and Transit.

The process is:

","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/#2-use-the-terminal-to-login-to-rackham","title":"2. Use the terminal to login to Rackham","text":"

Use a terminal to login to Rackham.

Forgot how to login to Rackham?

A step-by-step guide how to login to Rackham can be found here.

Spoiler: ssh [username]@rackham.uppmax.uu.se

","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/#3a-run-scp-to-copy-files-from-rackham-to-transit","title":"3a. Run scp to copy files from Rackham to Transit","text":"

This is how you would copy a file from Rackham to Transit: in the terminal, run scp to copy files from Rackham to Transit by doing:

scp [file_on_rackham] [username]@transit.uppmax.uu.se\n

where [file_on_rackham] is the name of a file on Rackham and [username] is your UPPMAX username, for example:

scp my_rackham_file.txt [username]@transit.uppmax.uu.se\n

However, Transit is a service, not a file server. The scp command will complete successfully, yet the file will not be found on Transit.

","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/#3b-run-scp-to-copy-files-from-transit-to-rackham","title":"3b. Run scp to copy files from Transit to Rackham","text":"

In the terminal, run scp to copy files from Transit to Rackham by doing:

scp [file_on_rackham] [username]@transit.uppmax.uu.se\n

where [file_on_rackham] is the name of a file on Rackham and [username] is your UPPMAX username, for example:

scp my_rackham_file.txt [username]@transit.uppmax.uu.se\n
","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_rackham/#4-if-asked-give-your-uppmax-password","title":"4. If asked, give your UPPMAX password","text":"

You can get rid of this prompt if you have setup SSH keys

","tags":["transfer","data transfer","file transfer","SCP","scp","Transit","transit","Rackham"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/","title":"Data transfer to/from Rackham using Transit using SCP from Transit","text":"

One can use SCP to copy files between Rackham and Transit, from either Rackham or Transit.

One can transfer files to/from Rackham using the UPPMAX Transit server, using SCP. The program scp allows you to copy file between Rackham and Transit.

The process is:

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/#2-use-the-terminal-to-login-to-transit","title":"2. Use the terminal to login to Transit","text":"

Use a terminal to login to Transit.

Forgot how to login to Transit?

A step-by-step guide how to login to Transit can be found here.

Spoiler: ssh [username]@transit.uppmax.uu.se

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/#3a-run-scp-to-copy-files-from-transit-to-rackham","title":"3a. Run scp to copy files from Transit to Rackham","text":"

In the terminal, run scp to copy files from Transit to Rackham by doing:

scp [username]@rackham.uppmax.uu.se:/home/[username]/[file_on_rackham] [path_on_transit]\n

where [file_on_rackham] is the name of a file on Rackham, [username] is your UPPMAX username, and [path_on_transit] is the target path on Transit, for example:

scp sven@rackham.uppmax.uu.se:/home/sven/my_rackham_file.txt .\n

Where . means 'the directory where I am now on Transit'.

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/#3b-run-scp-to-copy-files-from-rackham-to-transit","title":"3b. Run scp to copy files from Rackham to Transit","text":"

This is how you would copy a file from Rackham to Transit: in the terminal, run scp to copy files from Rackham to Transit by doing:

scp [file_on_rackham] [username]@transit.uppmax.uu.se\n

where [file_on_transit] is the name of a file on Transit and [username] is your UPPMAX username, for example:

scp my_local_rackham_file.txt [username]@transit.uppmax.uu.se\n

However, Transit is a service, not a file server. The scp command will complete successfully, yet the file will not be found on Transit.

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_scp_from_transit/#4-if-asked-give-your-uppmax-password","title":"4. If asked, give your UPPMAX password","text":"

You can get rid of this prompt if you have setup SSH keys

","tags":["transfer","data transfer","file transfer","Transit","transit","Rackham","SCP","scp"]},{"location":"software/rackham_file_transfer_using_transit_sftp/","title":"Data transfer to/from Rackham using Transit and SFTP","text":"

Data transfer to/from Rackham using Transit is one of the ways ways to transfer files to/from Rackham

One can use SFTP to copy files between Rackham and Transit, from either Rackham or Transit.

Both ways are shown step-by-step below.

  • Using SFTP from Rackham
  • Using SFTP from transit

Basic sftp command can be found here.

"},{"location":"software/rackham_file_transfer_using_transit_sftp/#overview","title":"Overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n    classDef transit_node fill:#fff,color:#000,stroke:#fff\n\n    subgraph sub_inside[SUNET]\n      direction LR\n      user(User)\n      subgraph sub_transit_env[Transit]\n        transit_login(Transit login):::calculation_node\n        files_on_transit(Files posted to Transit):::transit_node\n      end\n      subgraph sub_rackham_shared_env[Rackham]\n          files_in_rackham_home(Files in Rackham home folder):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#ccc,color:#000,stroke:#000\n    style sub_transit_env fill:#cfc,color:#000,stroke:#000\n    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000\n\n    user --> |logs in |transit_login\n\n    transit_login --> |can use|files_on_transit\n    %% user_local_files <--> |graphical tool|files_in_rackham_home\n    %% user_local_files <--> |SCP|files_in_rackham_home\n    files_on_transit <==> |transfer|files_in_rackham_home

Overview of file transfer on Rackham The purple nodes are about file transfer, the blue nodes are about 'doing other things'. The user can be either inside or outside SUNET.

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/","title":"Data transfer to/from Rackham using Transit and SFTP from Rackham","text":"

Data transfer to/from Rackham using Transit is one of the ways ways to transfer files to/from Rackham

One can transfer files to/from Rackham using the UPPMAX Transit server. Transit is an abbreviation of 'SSH File Transfer Protocol', where 'SSH' is an abbreviation of 'Secure Shell protocol' The program sftp allows you to transfer files to/from Rackham using Transit.

The process is:

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/#2-use-the-terminal-to-login-to-rackham","title":"2. Use the terminal to login to Rackham","text":"

Use a terminal to login to Rackham.

Forgot how to login to Rackham?

A step-by-step guide how to login to Transit can be found here.

Spoiler: ssh [username]@rackham.uppmax.uu.se

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/#3-run-sftp-to-connect-to-transit","title":"3. Run sftp to connect to Transit","text":"

In the terminal, run sftp to connect to Transit by doing:

sftp [username]@transit.uppmax.uu.se\n

where [username] is your UPPMAX username, for example:

sftp sven@transit.uppmax.uu.se\n
"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/#4-if-asked-give-your-uppmax-password","title":"4. If asked, give your UPPMAX password","text":"

You can get rid of this prompt if you have setup SSH keys

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_rackham/#5-in-sftp-uploaddownload-files-tofrom-transit","title":"5. In sftp, upload/download files to/from Transit","text":"

Transit is a service, not a file server. This means that if you upload files to Transit using SFTP, they will remain there as long a the connection is active. These files need to be forwarded to more permanent storage.

Basic sftp command can be found here.

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/","title":"Data transfer to/from Rackham using Transit and SFTP from Transit","text":"

Data transfer to/from Rackham using Transit is one of the ways ways to transfer files to/from Rackham

One can transfer files to/from Rackham using the UPPMAX Transit server. Transit is an abbreviation of 'SSH File Transfer Protocol', where 'SSH' is an abbreviation of 'Secure Shell protocol' The program sftp allows you to transfer files to/from Rackham using Transit.

The process is:

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/#2-use-the-terminal-to-login-to-transit","title":"2. Use the terminal to login to Transit","text":"

Use a terminal to login to Transit

Forgot how to login to Transit?

A step-by-step guide how to login to Transit can be found here.

Spoiler: ssh [username]@transit.uppmax.uu.se

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/#3-run-sftp-to-connect-to-rackham","title":"3. Run sftp to connect to Rackham","text":"

In the terminal, run sftp to connect to Rackham by doing:

sftp [username]@rackham.uppmax.uu.se\n

where [username] is your UPPMAX username, for example:

sftp sven@rackham.uppmax.uu.se\n
"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/#4-if-asked-give-your-uppmax-password","title":"4. If asked, give your UPPMAX password","text":"

You can get rid of this prompt if you have setup SSH keys

"},{"location":"software/rackham_file_transfer_using_transit_sftp_from_transit/#5-in-sftp-uploaddownload-files-tofrom-rackham","title":"5. In sftp, upload/download files to/from Rackham","text":"

Transit is a service, not a file server. This means that if you upload files to Transit using SFTP, they will remain there as long a the connection is active. These files need to be forwarded to more permanent storage.

Basic sftp command can be found here.

"},{"location":"software/rackham_file_transfer_using_winscp/","title":"File transfer to/from Rackham using WinSCP","text":"

There are multiple ways to transfer data to/from Rackham.

Here, we show how to transfer files using a graphical tool called WinSCP.

To transfer files to/from Rackham using WinSCP, do:

  • Start WinSCP
  • Create a new site
  • For that site, use all standards, except:
    • Set file protocol to 'SFTP'
    • Set host name to rackham.uppmax.uu.se
    • Set user name to [username], e.g. sven
","tags":["transfer","data transfer","file transfer","Rackham","WinSCP"]},{"location":"software/rclone/","title":"Rclone","text":"

Rclone is a command-line program to manage files on cloud storage.

There is an Rclone module called rclone.

"},{"location":"software/rclone/#finding-an-rclone-version","title":"Finding an Rclone version","text":"
module spider rclone\n
What is the output?

Here is some example output:

---------------------------------------------------------------------------------------\n  rclone: rclone/1.56.2\n---------------------------------------------------------------------------------------\n\n    This module can be loaded directly: module load rclone/1.56.2\n\n    Help:\n      rclone - use rclone\n\n      Description\n\n      a command line program to manage files on cloud storage, supporting over 40 cloud sto\nrage products\n\n      Version 1.56.2\n\n      https://rclone.org\n\n      Run 'rclone config' to set up rclone for your own use.\n
"},{"location":"software/rclone/#loading-an-rclone-module","title":"Loading an Rclone module","text":"

Here the Rclone module for version 1.56.2 is loaded:

module load rclone/1.56.2\n
What is the output?

Here is some example output:

rclone/1.56.2 : run 'rclone config' to set up rclone for your own use.  'man rclone' is available for further documentation, and see https://rclone.org/ for more\n
"},{"location":"software/rclone/#finding-the-rclone-config-file","title":"Finding the Rclone config file","text":"

After having loaded an Rclone mode, one can find the path to the Rclone config file by:

rclone config file\n
What is the output?

Here is some example output:

Configuration file doesn't exist, but rclone will use this path:\n/home/sven/.config/rclone/rclone.conf\n
"},{"location":"software/rclone/#using-the-rclone-web-interface","title":"Using the Rclone web interface","text":"

With SSH X forwarding enabled, one can use rclone from a web interface:

rclone rcd --rc-web-gui\n

?Do not run this on the login node?

What is the output?

Here is some example output:

2024/04/02 08:31:59 ERROR : Error reading tag file at /home/sven/.cache/rclone/webgui/tag\n2024/04/02 08:31:59 NOTICE: A new release for gui (v2.0.5) is present at https://github.com/rclone/rclone-webui-react/releases/download/v2.0.5/currentbuild.zip\n2024/04/02 08:31:59 NOTICE: Downloading webgui binary. Please wait. [Size: 4763452, Path :  /home/sven/.cache/rclone/webgui/v2.0.5.zip]\n2024/04/02 08:32:00 NOTICE: Unzipping webgui binary\n2024/04/02 08:32:01 NOTICE: Serving Web GUI\n2024/04/02 08:32:01 NOTICE: Serving remote control on http://localhost:5572/\n
"},{"location":"software/rclone/#connect-to-swestore","title":"Connect to Swestore","text":"

Rclone is one of the recommended ways to connect to Swestore.

  • The Swestore documentation on Rclone
  • YouTube video: Connect to Swestore using Rclone
URL invalid?

When setting the URL to the correct https://webdav.swestore.se, Rclone will flag this as an error:

Rclone flags an error, that may be a false error

However, this may be a false error. To determine this: click on 'Explorer' and explore Swestore.

An example Swestore folder structure

If you see the Swestore folder structure above, Rclone works fine.

"},{"location":"software/rclone/#links","title":"Links","text":"
  • The Rclone homepage
  • YouTube video: A Beginner's Guide To Rclone
  • YouTube video: Connect to Swestore using Rclone
"},{"location":"software/rstudio/","title":"RStudio","text":"

RStudio is an IDE specialized for the R programming language.

What is an IDE?

See the page on IDEs.

Using RStudio differs per UPPMAX cluster:

  • RStudio on Bianca
  • RStudio on Rackham
","tags":["RStudio"]},{"location":"software/rstudio/#rstudio-versions","title":"RStudio versions","text":"Which versions of RStudio are available?

Use module spider Rstudio to see all versions:

[sven@r210 sven]$ module spider Rstudio\n\n----------------------------------------------------------------------------\n  RStudio:\n----------------------------------------------------------------------------\n     Versions:\n        RStudio/1.0.136\n        RStudio/1.0.143\n        RStudio/1.0.153\n        RStudio/1.1.423\n        RStudio/1.1.463\n        RStudio/1.4.1106\n        RStudio/2022.02.0-443\n        RStudio/2022.02.3-492\n        RStudio/2022.07.1-554\n        RStudio/2023.06.0-421\n        RStudio/2023.06.2-561\n        RStudio/2023.12.1-402 (may not always work)\n

Some links between version and official documentation:

RStudio module RStudio Builds documentation RStudio/2023.06.2-561 here","tags":["RStudio"]},{"location":"software/rstudio/#troubleshooting","title":"Troubleshooting","text":"","tags":["RStudio"]},{"location":"software/rstudio/#rstudio-runs-partially","title":"RStudio runs partially","text":"

RStudio runs partially:

  • File content is displayed just fine
  • The R interpreter does not respond
  • The files pane at the bottom-right is loading forever

In one case (see ticket for details), the problem was caused by a process called -bash (yes, the first character is a dash/minus). Killing it with kill -s 1 [PID] (for example, kill -s 1 11723) and then restarting RStudio solved the problem.

","tags":["RStudio"]},{"location":"software/rstudio/#r-encountered-a-fatal-error","title":"R encountered a fatal error","text":"

Full error message:

R encountered a fatal error. The session was terminated.\n

This is because the home folder is full.

Check this by using uquota.md.

How does that look like?

Your output will be similar to this:

[sven@rackham3 ~]$ uquota\nYour project     Your File Area       Unit        Usage  Quota Limit  Over Quota\n---------------  -------------------  -------  --------  -----------  ----------\nhome             /home/sven           GiB          24.7           32\nhome             /home/sven           files       79180       300000\nnaiss2024-22-49  /proj/worldpeace     GiB           5.1          128\nnaiss2024-22-49  /proj/worldpeace     files       20276       100000\n

Candidates for files that are too big, that are hidden files:

  • .RData
  • .Renviron
  • .Rhistory

One can use ls -all to see all files, including hidden files:

ls --all\n
How does that look like?

Your output will be similar to this:

[sven@rackham2 ~]$ ls --all\n.                      .gtkrc               .nextflow.log.8\n..                     .ICEauthority        .nextflow.log.9\n.allinea               .ipython             .nv\n.bash_history          .java                .oracle_jre_usage\n.bash_logout           .jupyter             .pki\n.bash_profile          .kde                 private\n.bashrc                .keras               .profile\n.bashrc.save           .lesshst             .python_history\n.beast                 lib                  .r\nbin                    .lmod.d              R\n.cache                 .local               .RData\n.conda                 .login               .Rhistory\n.config                .MathWorks           .rstudio-desktop\n.cshrc                 .matlab              .ssh\n.dbus                  .mozilla             .subversion\nDNABERT_2              my_little_turtle.py  ticket_297538\n.emacs                 .nextflow            users\n.esd_auth              .nextflow.log        .viminfo\n.gitconfig             .nextflow.log.1      .vscode-oss\n.git-credential-cache  .nextflow.log.2      .vscode-server\nglob                   .nextflow.log.3      .wget-hsts\n.gnupg                 .nextflow.log.4      .Xauthority\n.gracetimefile         .nextflow.log.5      .xfce4-session.verbose-log\n.gradle                .nextflow.log.6      .xfce4-session.verbose-log.last\n.gstreamer-0.10        .nextflow.log.7      .zshrc\n

You can delete these hidden files, by:

rm .RData\nrm .Renviron\nrm .Rhistory\n
For staff

Full report can be found at RT ticket 298623

","tags":["RStudio"]},{"location":"software/rstudio_on_bianca/","title":"RStudio on Bianca","text":"","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#introduction","title":"Introduction","text":"

RStudio is an IDE specialized for the R programming language.

What is an IDE?

See the page on IDEs.

In this session, we show how to use RStudio on Bianca, using Bianca's remote desktop environment.

Forgot how to login to a remote desktop environment?

See the 'Logging in to Bianca' page.

Spoiler: go to https://bianca.uppmax.uu.se/

As RStudio is a resource-heavy program, it must be run on an interactive node.

Forgot how to start an interactive node?

See the 'Starting an interactive node' page.

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#procedure-to-start-rstudio","title":"Procedure to start RStudio","text":"

Below is a step-by-step procedure to start RStudio on Bianca.

Prefer a video?

This procedure is also demonstrated in this YouTube video.

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#1-get-within-sunet","title":"1. Get within SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#2-start-the-bianca-remote-desktop-environment","title":"2. Start the Bianca remote desktop environment","text":"Forgot how to start Bianca's remote desktop environment?

See the 'Logging in to Bianca' page.

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#3-start-an-interactive-session","title":"3. Start an interactive session","text":"

Within the Bianca remote desktop environment, start a terminal. Within that terminal, start an interactive node with 2 cores:

Why two cores?

RStudio is a resource-heavy program. Due to this, we recommend using at least two cores for a more pleasant user experience.

interactive -A [project_number] -n 2 -t 8:00:00\n

Where [project_number] is your UPPMAX project, for example:

interactive -A sens2016001 -n 2 -t 8:00:00\n
What is my UPPMAX project number?

Easy answers that is probably true:

The one you used to login, which is part of your prompt. For example, in the prompt below, the project is sens2016001.

[sven@sens2016001-bianca sven]$\n

Do not start RStudio from the menus

You can start a version of RStudio from the menus. However, this will not have access to loaded modules.

Instead, load RStudio from the module system instead.

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#4-load-the-modules-needed","title":"4. Load the modules needed","text":"

In the terminal of the interactive session, do:

module load R_packages/4.3.1 RStudio/2023.12.1-402\n
Do all combinations of R_packages and RStudio work?

No.

Not all combination of R_packages and RStudio work equally well, but this one is known to work (as it was used in this solved ticket).

There have been issues using RStudio/2023.06.2-561 together with R/4.3.1

Shouldn't I load R first?

No.

Loading R_packages will load the corresponding R module.

What happens if I do not load R_packages?

Then you will have RStudio running without any R packages installed

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#5-start-rstudio","title":"5. Start RStudio","text":"

With the modules loaded, start RStudio from the terminal (on the interactive node):

rstudio\n

RStudio can be slow to startup, as R has thousands (!) of packages. Additionally, at startup and if enabled, your saved RStudio workspace (with potentially a lot of data!) is read.

How does RStudio look on Bianca?

RStudio when starting up:

RStudio when started up:

RStudio in action:

The RStudio debugger, at the error message level:

The RStudio debugger, at the function-that-caused-the-error level:

The RStudio debugger, at the program level:

","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#troubleshooting","title":"Troubleshooting","text":"","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#rstudio-freezes-when-i-start-it-where-yesterday-it-still-worked","title":"RStudio freezes when I start it, where yesterday it still worked","text":"","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_bianca/#hypothesis-your-home-folder-is-full","title":"Hypothesis: Your home folder is full","text":"

Your home folder is full. That explains why it still worked yesterday: at that day, your home folder was not full yet.

RStudio uses your home folder to store the things it needs, so when it is full, it cannot do its things.

To confirm, from a terminal do:

du -h -d 1 .\n

This will show how much space the folders in your home folder take:

In this example, there is a folder called wharf_backup that is 4.5 gigabyte. Moving it to a project folder solved the problem:

mv wharf_backup/ /proj/nobackup/[your_project_folder] \n

For example:

mv wharf_backup/ /proj/nobackup/sens2016001 \n
","tags":["RStudio","Bianca"]},{"location":"software/rstudio_on_rackham/","title":"RStudio on Rackham","text":"","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#introduction","title":"Introduction","text":"

RStudio is an IDE specialized for the R programming language.

What is an IDE?

See the page on IDEs.

In this session, we show how to use RStudio on Rackham, using Rackham's remote desktop environment.

Forgot how to login to a remote desktop environment?

See the 'Logging in to Rackham' page.

Spoiler: go to https://rackham.uppmax.uu.se/

As RStudio is a resource-heavy program, it must be run on an interactive node.

Forgot how to start an interactive node?

See the 'Starting an interactive node' page.

","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#procedure-to-start-rstudio","title":"Procedure to start RStudio","text":"

Below is a step-by-step procedure to start RStudio on Rackham.

Prefer a video?

This procedure is also demonstrated in this YouTube video.

","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#1-get-within-sunet","title":"1. Get within SUNET","text":"

This step is only needed when outside of Sweden.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#2-start-a-rackham-remote-desktop-environment","title":"2. Start a Rackham remote desktop environment","text":"

This can be either:

  • Login to the Rackham remote desktop environment using the website
  • Login to the Rackham remote desktop environment using a local ThinLinc client
","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#3-start-an-interactive-session","title":"3. Start an interactive session","text":"

Within the Rackham remote desktop environment, start a terminal. Within that terminal, start an interactive session with 2 cores:

interactive -A [naiss_project_id] -n 2 -t [duration]\n

Where:

  • [naiss_project_id] is your UPPMAX project code
  • [duration] is the duration of the interactive session

Resulting in, For example:

interactive -A naiss2024-22-310 -n 2 -t 8:00:00\n

Why two cores?

RStudio is a resource-heavy program. Due to this, we recommend using at least two cores for a more pleasant user experience.

What is an interactive node?

See start an interactive session

Do not start RStudio from the menus

You can start a version of RStudio from the menus. However, this will not have access to loaded modules.

Instead, load RStudio from the module system instead.

","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#4-load-the-modules-needed","title":"4. Load the modules needed","text":"

In the terminal of the interactive session, do:

module load R/4.3.1 R_packages/4.3.1 RStudio/2023.12.1-402\n
How does that look like?

Your output will be similar to:

[sven@r210 sven]$ module load R/4.3.1 R_packages/4.3.1 RStudio/2023.06.2-561\nR/4.3.1: Nearly all CRAN and BioConductor packages are installed and available by loading\nthe module R_packages/4.3.1 \nR_packages/4.3.1: Note that loading some spatial analysis packages, especially geo-related packages, might\nR_packages/4.3.1: require you to load additional modules prior to use. monocle3 is such a package. See\nR_packages/4.3.1: 'module help R_packages/4.3.1'\n\nR_packages/4.3.1: The RStudio packages pane is disabled when loading this module, due to RStudio slowdowns\nR_packages/4.3.1: because there are >20000 available packages. *All packages are still available.*  For \nR_packages/4.3.1: more information and instructions to re-enable the packages pane (not recommended) see\nR_packages/4.3.1: 'module help R_packages/4.3.1'\n\nRStudio/2023.12.1-402: Sandboxing is not enabled for RStudio at UPPMAX. See 'module help RStudio/2023.12.1-402' for more information\n
What happens if I do not load R or R_packages?

Then you will have the sytem-wide R version 3.6.0 without any packages installed.

What does 'Sandboxing is not enabled for RStudio at UPPMAX' mean?

Nothing.

Here is how it looks like:

[sven@r482 sven]$ module load RStudio/2023.06.2-561\nRStudio/2023.06.2-561: Sandboxing is not enabled for RStudio at UPPMAX. See 'module help RStudio/2023.06.2-561' for more information\n[sven@r482 sven]$ module help RStudio/2023.06.2-561\n\n--------------------------------------------------- Module Specific Help for \"RStudio/2023.06.2-561\" ----------------------------------------------------\n RStudio - use RStudio 2023.06.2-561\n\n Version 2023.06.2-561\n\nWith the Linux distribution used on most UPPMAX clusters (CentOS 7), RStudio/2023.06.2-561\nprefers to use a 'suid sandbox'. We do not enable this at UPPMAX. Instead, we disable sandboxing\nduring startup of RStudio by defining a shell alias for the 'rstudio' command. You may notice\nadditional errors in the terminal window from which you ran the 'rstudio' command. This is\nexpected and does not affect RStudio operation.\n\nFor performance reasons, UPPMAX disables checks for updates.\n\nUPPMAX also disables the 'Packages' pane of RStudio if an R_packages module is loaded.\n
","tags":["RStudio","Rackham"]},{"location":"software/rstudio_on_rackham/#5-start-rstudio","title":"5. Start RStudio","text":"

With the modules loaded, start RStudio from the terminal (on the interactive node):

rstudio\n

RStudio can be slow to startup, as R has thousands (!) of packages. Additionally, at startup and if enabled, your saved RStudio workspace (with potentially a lot of data!) is read.

How does RStudio look on Rackham?

RStudio when starting up:

RStudio when started up:

RStudio when ready:

RStudio in action:

The RStudio debugger, at the error message level:

The RStudio debugger, at the function-that-caused-the-error level:

The RStudio debugger, at the program level:

","tags":["RStudio","Rackham"]},{"location":"software/rsync/","title":"rsync","text":"

rsync is a command-line tool for file transfer, with the goal of ensuring integrity of the data, as well as a minimal amount of data transfer.

rsync can be used for copying, but also synchronizing files, such as is ideal for making a backup. At this page, we use the word 'copy', although rsync by default does a one-way synchronize: if the data is already there, it will do nothing.

  • Using rsync on Bianca
  • Using rsync on Rackham
"},{"location":"software/rsync/#installing-rsync","title":"Installing rsync","text":"

To installing rsync, see the official rsync download page.

Tip for Ubuntu users

Use apt like usual:

sudo apt install rsync\n
Tip for Windows users

When looking to download an executable of rsycn, look for the words 'binary' (all executables are binary) and Cygwin (the environment in which the rsync executable was built on Windows).

"},{"location":"software/rsync/#copy-a-folder-from-local-to-rackham","title":"Copy a folder from local to Rackham","text":"

Copy a folder from a local computer to a Rackham home folder.

On your local computer, do:

rsync --recursive [folder_name] [user_name]@rackham.uppmax.uu.se:/home/[user_name]/\n

For example:

rsync --recursive my_folder sven@rackham.uppmax.uu.se:/home/sven/\n

The --recursive flag is used to copy a folder and all of its subfolders.

Want to preserve timestamps?

To preserve the files' timestamps, use the --archive flag, e.g.

rsync --recursive --archive my_folder sven@rackham.uppmax.uu.se:/home/sven/\n
"},{"location":"software/rsync/#copy-a-folder-from-rackham-to-local","title":"Copy a folder from Rackham to local","text":"

Copy a folder from Rackham to your local computer.

On your local computer, do:

rsync --recursive [user_name]@rackham.uppmax.uu.se:/home/[user_name]/[folder_name] [local_folder_destination]\n

For example:

rsync --recursive sven@rackham.uppmax.uu.se:/home/sven/my_folder .\n

Where . means 'the folder where I am now'.

Want to preserve timestamps?

To preserve the files' timestamps, use the --archive flag, e.g.

rsync --recursive --archive my_folder sven@rackham.uppmax.uu.se:/home/sven/\n
"},{"location":"software/rsync_on_bianca/","title":"rsync on Bianca","text":"

rsync is a command-line tool for file transfer.

This page describes how to use rsync on Bianca.

Using rsync for direct file transfer from a local computer to wharf fails, as cannot rsync directly to wharf.

It can be made to work (by using transit), as described in the UPPMAX Bianca file transfer using rsync.

How does it look like if I try to rsync directly to wharf anyways?

One cannot rsync directly to wharf.

However, this is how it looks like:

sven@sven-N141CU:~$ rsync my_local_file.txt sven-sens2016001@bianca-sftp.uppmax.uu.se:/sven-sens2016001\n\nHi!\n\nYou are connected to the bianca wharf (sftp service) at\nbianca-sftp.uppmax.uu.se.\n\nNote that we only support SFTP, which is not exactly the\nsame as SSH (rsync and scp will not work).\n\nPlease see our homepage and the Bianca User Guide\nfor more information:\n\nhttps://www.uppmax.uu.se/support/user-guides/bianca-user-guide/\n\nIf you have any questions not covered by the User Guide, you are\nwelcome to contact us at support@uppmax.uu.se.\n\nBest regards,\nUPPMAX\n\nsven-sens2016001@bianca-sftp.uppmax.uu.se's password:\nprotocol version mismatch -- is your shell clean?\n(see the rsync manpage for an explanation)\nrsync error: protocol incompatibility (code 2) at compat.c(622) [sender=3.2.7]\n

If you want to do file transfer to/from Bianca, read the UPPMAX page on Bianca file transfer using rsync.

"},{"location":"software/rsync_on_bianca/#links","title":"Links","text":"
  • rsync homepage
"},{"location":"software/rsync_on_rackham/","title":"rsync on Rackham","text":"

rsync is a command-line tool for file transfer.

This page describes how to use rsync on Rackham.

"},{"location":"software/rsync_on_rackham/#copy-a-folder-from-local-to-rackham","title":"Copy a folder from local to Rackham","text":"
flowchart LR\n  local_computer[Your local computer. Run rsync from here]\n  rackham[Rackham]\n\n  local_computer --> |rsync| rackham

Copy a folder from a local computer to a Rackham home folder.

On your local computer, do:

rsync --recursive [folder_name] [user_name]@rackham.uppmax.uu.se:/home/[user_name]/\n

For example:

rsync --recursive my_folder sven@rackham.uppmax.uu.se:/home/sven/\n

The --recursive flag is used to copy a folder and all of its subfolders.

"},{"location":"software/rsync_on_rackham/#copy-a-folder-from-rackham-to-local","title":"Copy a folder from Rackham to local","text":"
flowchart LR\n  local_computer[Your local computer. Run rsync from here]\n  rackham[Rackham]\n\n  rackham --> |rsync| local_computer

Copy a folder from Rackham to your local computer.

On your local computer, do:

rsync --recursive [user_name]@rackham.uppmax.uu.se:/home/[user_name]/[folder_name] [local_folder_destination]\n

For example:

rsync --recursive sven@rackham.uppmax.uu.se:/home/sven/my_folder .\n

Where . means 'the folder where I am now'.

"},{"location":"software/rsync_on_rackham/#links","title":"Links","text":"
  • rsync homepage
"},{"location":"software/sbatch/","title":"sbatch","text":"

The job scheduler consists of many programs to manage jobs. sbatch is the program to submit a job to the scheduler.

flowchart TD\n  sbatch[sbatch: submit a job]\n  scancel[scancel: cancel a running job]\n  squeue[squeue: view the job queue]\n  sbatch --> |Oops| scancel\n  sbatch --> |Verify| squeue

After submitting a job, one can use squeue to verify the job is in the job queue. If there is an error in the sbatch command, one can cancel a job using scancel.

","tags":["sbatch"]},{"location":"software/sbatch/#minimal-examples","title":"Minimal examples","text":"

There are two ways to demonstrate minimal use of sbatch:

  • with command-line Slurm arguments: easier to experiment with
  • with Slurm parameters in the script: easier when you know what you need

These minimal examples use a run-time of a short, default time.

","tags":["sbatch"]},{"location":"software/sbatch/#with-command-line-slurm-arguments","title":"with command-line Slurm arguments","text":"

To let Slurm schedule a job, one uses sbatch.

For Bianca and Rackham, one uses sbatch like this:

sbatch -A [project_code] [script_filename]\nsbatch -M snowy -A [project_code] [script_filename]\n

For Snowy, one uses sbatch like this:

sbatch -M snowy -A [project_code] [script_filename]\n

Where:

  • -A [project_code]: the project to use, for example sens2017625
  • [script_filename]: the name of a file that is a bash script, for example, my_script.sh
  • -M snowy: if you use the Snowy computational resources

Filling this all in, for Bianca and Rackham:

sbatch -A sens2017625 my_script.sh\n

Filling this all in, for Snowy:

sbatch -M snowy -A sens2017625 my_script.sh\n
What is my project?

See the UPPMAX documentation on projects.

How do I convert my project name to the project code I need to use here?

See the UPPMAX documentation on projects.

What is in the script file?

The script file my_script.sh is a minimal example script. Such a minimal example script could be:

#!/bin/bash\necho \"Hello\"\n
","tags":["sbatch"]},{"location":"software/sbatch/#with-slurm-parameters-in-the-script","title":"with Slurm parameters in the script","text":"

The minimal command to use sbatch with Slurm parameters in the script:

sbatch [script_filename]\n

where [script_filename] the name of a bash script, for example:

sbatch my_script.sh\n

For Bianca and Rackham, the script must contain at least the following lines:

#SBATCH -A [project_code]\n

For Snowy, the script must contain at least the following lines:

#SBATCH -A [project_code]\n#SBATCH -M snowy\n

With:

  • [project_code]: the project code, for example uppmax2023-2-25
What is in the script file, for Bianca and Rackham?

A full example script would be:

#!/bin/bash\n#SBATCH -A uppmax2023-2-25\necho \"Hello\"\n
What is in the script file, for Snowy?

A full example script would be:

#!/bin/bash\n#SBATCH -A uppmax2023-2-25\n#SBATCH -M snowy\necho \"Hello\"\n
","tags":["sbatch"]},{"location":"software/sbatch/#more-parameters","title":"More parameters","text":"

See the Slurm documentation on sbatch

","tags":["sbatch"]},{"location":"software/sbatch/#troubleshooting","title":"Troubleshooting","text":"

See Slurm troubleshooting

","tags":["sbatch"]},{"location":"software/scancel/","title":"scancel","text":"

The job scheduler consists of many programs to manage jobs. scancel is a tool to cancel jobs that are in the job queue or are running.

Usage:

scancel [job_number]\n

Where the [job_number] is the number of the job. You can see the job number when submitting a job using sbatch and you can find it in the job queue (when doing squeue).

For example:

[sven@rackham3 ~]$ sbatch -A my_project my_script.sh \nSubmitted batch job 49311056\n[sven@rackham3 ~]$ scancel 49311056\n[sven@rackham3 ~]$ \n
","tags":["scancel"]},{"location":"software/screen/","title":"Running a detachable screen process in a job","text":"

When you run the interactive command, you get a command prompt in the screen program.

Warning

When running the screen program in other environments, you can detach from your screen and later reattach to it. Within the environment of the interactive command, you lose this ability: Your job is terminated when you detach. (This is a design decision and not a bug.)

In case you want the best of both worlds, i.e. to be able to detach and reattach to your screen program within a job, you need to start a job in some other way and start your screen session from a separate ssh login. Here is an example of how you can do this:

$ salloc -A project_ID -t 15:00  -n 1 --qos=short --bell --no-shell\nsalloc: Pending job allocation 46964140\nsalloc: job 46964140 queued and waiting for resources\nsalloc: job 46964140 has been allocated resources\nsalloc: Granted job allocation 46964140\nsalloc: Waiting for resource configuration\nsalloc: Nodes r174 are ready for job\n

Check the queue manager for the allocated node. In the example bellow, one core was allocated on r174 compute node.

$ squeue -j 46964140\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n          46964140      core no-shell     user  R       0:44      1 r174\n

You can start xterm terminal in this allocated session like this:

xterm -e ssh -AX r174 &\n

salloc command gives you a job allocation of one node for 15 minutes (the \"--no-shell\" option is important here). Instead you can log in to any node of any of your running jobs, started with e.g. the sbatch command.

You get a job number and from that you can find out the node name, in this example r174.

When you log in to the node with the ssh command, start the screen program:

screen\n

When you detach from the screen program, with e.g. the \"d\" command, you can later in the same ssh session or in another ssh session reattach to your screen session:

screen -r\n

When your job has terminated, you can neither reattach to your screen session nor log in to the node.

The screen session of the interactive command is integrated into your job, so e.g. all environment variables for the job is correctly assigned. For a separate ssh session, as in this example, that is not the case.

Please note that it is the job allocation that determines your core hour usage and not your ssh or screen sessions.

"},{"location":"software/screen/#tips","title":"Tips","text":"
  • Start a new screen session with a command:

    screen -dm your_command\n

    This will start a new screen session, run the command, and then detach from the session.

  • If you want to run multiple commands, you can do so like this:

    screen -dm bash -c \"command1; command2\"\n

    This will run command1 and command2 in order.

  • To reattach to the screen session, use:

    screen -r\n

    If you have multiple sessions, you'll need to specify the session ID.

  • To list your current screen sessions, use:

    screen -ls\n

Please note that when a program terminates, screen (by default) kills the window that contained it. If you don't want your session to get killed after the script is finished, add exec sh at the end. For example:

screen -dm bash -c 'your_command; exec sh'\n

This will keep the screen session alive after your_command has finished executing.

YouTube : How to use GNU SCREEN - the Terminal Multiplexer

"},{"location":"software/sftp/","title":"sftp","text":"

sftp is a tool to transfer data.

","tags":["sftp","SFTP"]},{"location":"software/sftp/#1-getting-help","title":"1. Getting Help","text":"

Once, you in the sftp prompt, check the available commands by typing ? or help at command prompt. This will print out a list of the available commands and give a short description of them. We'll cover the most common ones in this guide.

sftp> ?\nAvailable commands:\ncd path                       Change remote directory to 'path'\n...\n...\n...\n
","tags":["sftp","SFTP"]},{"location":"software/sftp/#2-check-present-working-directory","title":"2. Check Present Working Directory","text":"

The command lpwd is used to check the Local present working directory, whereas pwd command is used to check Remote working directory.

sftp> lpwd\nLocal working directory: /\nsftp> pwd\nRemote working directory: /tecmint/\nlpwd \u2013 print the current directory on your system\npwd \u2013 print the current directory on the ftp server\n
","tags":["sftp","SFTP"]},{"location":"software/sftp/#3-listing-files","title":"3. Listing Files","text":"

Listing files and directories in local as well as remote system.

On Remote

sftp> ls\n

On Local

sftp> lls\n
","tags":["sftp","SFTP"]},{"location":"software/sftp/#4-upload-file","title":"4. Upload File","text":"

Put single or multiple files in remote system.

sftp> put local.profile\n

Uploading local.profile to /tecmint/local.profile

","tags":["sftp","SFTP"]},{"location":"software/sftp/#5-upload-multiple-files","title":"5. Upload Multiple Files","text":"

Putting multiple files on remote system.

sftp> mput *.xls\n

Another alternative to uploading many files is to tar and/or compress the files to a single file before uploading. The file transfer will stop in between every file, so the more file you have to upload the more stops it will make. This can have a dramatic impact on transfer speed if there are 1000s of files that you want to transfer. Running tar and/or zip on the files before transferring them will package all the files into a single file, so there will be no stops at all during the transfer.

","tags":["sftp","SFTP"]},{"location":"software/sftp/#6-download-files","title":"6. Download Files","text":"

Getting single or multiple files in local system.

sftp> get SettlementReport_1-10th.xls\n

Fetching /tecmint/SettlementReport_1-10th.xls to SettlementReport_1-10th.xls Get multiple files on a local system.

sftp> mget *.xls\n

Note: As we can see by default the get command downloads the file to the local system with the same name. We can download remote file and store it with a different name by specifying the name at the end. (This applies only while downloading single file).

","tags":["sftp","SFTP"]},{"location":"software/sftp/#7-switching-directories","title":"7. Switching Directories","text":"

Switching from one directory to another directory in local and remote locations.

On Remote

sftp> cd test\n

On Local

sftp> lcd Documents\n
","tags":["sftp","SFTP"]},{"location":"software/sftp/#8-create-directories","title":"8. Create Directories","text":"

Creating new directories on remote and local locations.

sftp> mkdir test\nsftp> lmkdir Documents\n
","tags":["sftp","SFTP"]},{"location":"software/sftp/#9-remove-directory-or-file","title":"9. Remove Directory or File","text":"

Remove directory or file in remote system.

sftp> rm Report.xls\nsftp> rmdir sub1\n

Note: To remove/delete any directory from remote location, the directory must be empty.

","tags":["sftp","SFTP"]},{"location":"software/sftp/#10-exit-sftp-shell","title":"10. Exit sFTP Shell","text":"

The ! (exclamation mark) command drops us in local shell from where we can execute Linux commands. Type exit command where we can see sftp> prompt return.

sftp> !\n[root@sftp ~]# exit\nShell exited with status 1\nsftp>\n
","tags":["sftp","SFTP"]},{"location":"software/sinfo/","title":"sinfo","text":"

sinfo is a tool to view information about Slurm nodes and partitions.

How does that look like on Bianca?
[sven@sens2016001-bianca ~]$ sinfo\nPARTITION AVAIL  TIMELIMIT  NODES  STATE NODELIST\nall        down 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]\nall        down 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]\nall        down 10-00:00:0      1   idle sens2016001-b9\nnode         up 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]\nnode         up 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]\nnode         up 10-00:00:0      1   idle sens2016001-b9\ncore*        up 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]\ncore*        up 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]\ncore*        up 10-00:00:0      1   idle sens2016001-b9\ndevel        up    1:00:00    192 drain* sens2016001-b[10-200,1178]\ndevel        up    1:00:00     71   unk* sens2016001-b[1073-1084,1119-1177]\ndevel        up    1:00:00      1   idle sens2016001-b9\ndevcore      up    1:00:00    192 drain* sens2016001-b[10-200,1178]\ndevcore      up    1:00:00     71   unk* sens2016001-b[1073-1084,1119-1177]\ndevcore      up    1:00:00      1   idle sens2016001-b9\n

Although it may seem unexpected that only 1 node is idle, this is the expected behavior from a virtual cluster: most physical nodes are not allocated to this project and hence unavailable.

How does that look like on Rackham?
[sven@rackham3 ~]$ sinfo\nPARTITION AVAIL  TIMELIMIT  NODES  STATE NODELIST\nall        down 10-00:00:0     22   comp r[2,36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]\nall        down 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]\nall        down 10-00:00:0     72 drain$ r[1001-1072]\nall        down 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]\nall        down 10-00:00:0     45  down* r[13,23,57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479,483-484,1189-1190,1199,1212,1240]\nall        down 10-00:00:0      8  drain r[29,35,78,154,212,226,335,485]\nall        down 10-00:00:0    115    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482,486,1180-1181,1203,1208,1210-1211,1217,1223,1227,1231,1235,1237,1239,1242-1246]\nall        down 10-00:00:0    317  alloc r[1,3,6,9,19,25-28,30,32-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,336-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478,1179,1182-1188,1191-1198,1200-1202,1204-1207,1209,1213-1216,1218-1222,1224-1226,1228-1230,1232-1234,1236,1238,1241,1247-1250]\nall        down 10-00:00:0     13   idle r[8,10-12,14-18,20-22,24]\nall        down 10-00:00:0     10   down r[4-5,7,31,61,118,144-145,219,278]\ncore*        up 10-00:00:0     21   comp r[36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]\ncore*        up 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]\ncore*        up 10-00:00:0     72 drain$ r[1001-1072]\ncore*        up 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]\ncore*        up 10-00:00:0     41  down* r[57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479,1189-1190,1199,1212,1240]\ncore*        up 10-00:00:0      5  drain r[35,78,154,212,226]\ncore*        up 10-00:00:0    114    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482,1180-1181,1203,1208,1210-1211,1217,1223,1227,1231,1235,1237,1239,1242-1246]\ncore*        up 10-00:00:0    301  alloc r[33-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,340,342-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478,1179,1182-1188,1191-1198,1200-1202,1204-1207,1209,1213-1216,1218-1222,1224-1226,1228-1230,1232-1234,1236,1238,1241,1247-1250]\ncore*        up 10-00:00:0      6   down r[61,118,144-145,219,278]\nnode         up 10-00:00:0     22   comp r[2,36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]\nnode         up 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]\nnode         up 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]\nnode         up 10-00:00:0     38  down* r[13,23,57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479]\nnode         up 10-00:00:0      7  drain r[29,35,78,154,212,226,335]\nnode         up 10-00:00:0     96    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482]\nnode         up 10-00:00:0    268  alloc r[1,3,6,9,19,25-28,30,32-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,336-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478]\nnode         up 10-00:00:0     13   idle r[8,10-12,14-18,20-22,24]\nnode         up 10-00:00:0     10   down r[4-5,7,31,61,118,144-145,219,278]\ndevel        up    1:00:00      2  down* r[483-484]\ndevel        up    1:00:00      1  drain r485\ndevel        up    1:00:00      1    mix r486\ndevcore      up    1:00:00      2  down* r[483-484]\ndevcore      up    1:00:00      1  drain r485\ndevcore      up    1:00:00      1    mix r486\n
"},{"location":"software/singularity/","title":"Singularity User Guide","text":"

Singularity www.sylabs.io/docs provide tools for running containers that are more suitable to traditional HPC environments when some other tools such as Docker or lxc. These containers can be portable and could be run both on your desktop machine and our clusters.

One of the ways in which Singularity is more suitable for HPC is that it very actively restricts permissions so that you do not gain access to additional resources while inside the container. One consequence of this is that some common tools like ping or sudo do not work when run within a container (as a regular user).

Singularity is installed and usable to run custom container images on the clusters bianca and rackham.

"},{"location":"software/singularity/#pulling-an-existing-singularity-image","title":"Pulling an existing Singularity image","text":"

It's possible to download and run pre-built images from the Singularity hub https://singularity-hub.org and the Singularity library (https://cloud.sylabs.io) using the singularity pull sub command such as:

singularity pull library://ubuntu\n

Which will download the requested image and place it in the current directory. You can also upload and run the image directly yourself.

"},{"location":"software/singularity/#creating-a-singularity-container","title":"Creating a Singularity container","text":"

See creating a Singularity container for the multiple ways how to build a Singularity container.

"},{"location":"software/singularity/#examples","title":"Examples","text":"
  • Create a Singularity container from conda
  • Create a Singularity container for an R package
  • Create a Singularity container from Docker Hub
  • Create a Singularity container from a docker pull
"},{"location":"software/singularity/#running-an-existing-image","title":"Running an existing image","text":"

Once you have an image, you can \"run\" it with a command such as

singularity run singularityhub-ubuntu-14.04.img\n

which will try to execute a \"run\" target in the container. There are also the shell and exec subcommands for starting a shell and running a specific command respectively.

"},{"location":"software/singularity/#access-to-uppmax-file-systems","title":"Access to UPPMAX file systems","text":"

By default, singularity will try to help and map the UPPMAX file systems from the current cluster so that they can be accessed from within the container. For CentOS7 based clusters (snowy, rackham, bianca), this works as expected.

Singularity is installed on the system (on each separate node) and does not require any module load to be available.

It's possible to run Docker containers. You can try to run

singularity shell docker://debian:stretch\n

but note that Docker containers are typically designed to run with more privileges than are allowed with Singularity, so it's quite possible things do not work as expected.

"},{"location":"software/singularity/#not-all-images-may-work-everywhere","title":"Not all images may work everywhere","text":"

Images run with the same linux kernel as the rest of the system. For HPC we systems, the kernel used tend to be quite old for stability reasons. This is not normally a problem, but can cause issues if the libraries of the images you try to run expects functionality added in newer kernels. How and what works is difficult to know without trying, but we have successfully started a shell in an image for the currently most recent Ubuntu release (17.04).

"},{"location":"software/software-table/","title":"Software","text":"Category Name Module Cluster Versions Licence Alignment blast blast rackham bianca miarka snowy 2.14.1+, 2.15.0+ Public Domain Alignment bowtie2 bowtie2 rackham bianca miarka snowy 2.5.2 GPL v3 Alignment cactus cactus rackham bianca miarka snowy 2.7.2, 2.8.2 Copyright Alignment ClipKIT ClipKIT rackham bianca miarka snowy 2.2.4 MIT Alignment CrossMap CrossMap rackham bianca miarka snowy 0.6.6 GPL v2+ Alignment diamond diamond rackham bianca miarka snowy 2.0.6, 2.1.9 GPL v3 Alignment fasta fasta rackham bianca miarka snowy 36.3.8i Apache 2.0 Alignment hmmer hmmer rackham bianca miarka snowy 3.4 custom open-source Alignment last last rackham bianca miarka snowy 1505 None Alignment lastz lastz rackham bianca miarka snowy 1.04.22 MIT Alignment mapAD mapAD rackham bianca miarka snowy 0.42.1 MIT Alignment mapcaller mapcaller rackham bianca miarka snowy 0.9.9.41 MIT Alignment MashMap MashMap rackham bianca miarka snowy 3.0.4 Mixed Alignment minimap2 minimap2 rackham bianca miarka snowy 2.26-r1175 MIT Alignment miniprot miniprot rackham bianca miarka snowy 0.12, 0.13 MIT Alignment MMseqs2 MMseqs2 rackham bianca miarka snowy 14-7e284, 15-6f452 GPL v3 Alignment ngmlr ngmlr rackham bianca miarka snowy 0.2.7-20210816-a2a31fb MIT Alignment pbmm2 pbmm2 rackham bianca miarka snowy 1.13.0 BSD 3 Alignment randfold randfold rackham bianca miarka snowy 2.0.1 None Alignment RMBlast RMBlast rackham bianca miarka snowy 2.2.28, 2.14.1+ Public domain Alignment skani skani rackham bianca miarka snowy 0.2.1 MIT Alignment SNAP-aligner SNAP-aligner rackham bianca miarka snowy 2.0.3 Apache 2.0 Alignment spaln spaln rackham bianca miarka snowy 3.0.3 GPL v2 and custom Alignment star star rackham bianca miarka snowy 2.7.8a, 2.7.11a MIT Alignment trf trf rackham bianca miarka snowy 4.10.0-rc.2 Gnu Afferoo GPL v3 Alignment US-align US-align rackham bianca miarka snowy 20230727-e5c6270 Custom open source AS-IS Alignment wfmash wfmash rackham bianca miarka snowy 0.12.5 MIT Annotation af2complex af2complex rackham bianca miarka snowy 1.4.0 None Annotation alphapulldown alphapulldown rackham bianca miarka snowy 1.0.4 GPL-3.0 Annotation augustus augustus rackham bianca miarka snowy 3.5.0, 3.5.0-20231223-33fc04d Artistic License 1.0 Annotation bali-phy bali-phy rackham bianca miarka snowy 4.0-beta15 None Annotation bcftools-score bcftools-score rackham bianca miarka snowy 1.18-20231207-9c8e21e MIT Annotation braker braker rackham bianca miarka snowy 3.0.3, 3.0.7, 3.0.8 Artistic 1.0 Annotation colabfold-local colabfold-local rackham bianca miarka snowy 1.5.5 MIT Annotation compleasm compleasm rackham bianca miarka snowy 0.2.2, 0.2.5, 0.2.6 Apache 2.0 and others Annotation CookHLA CookHLA rackham irma bianca miarka snowy 1.0.0 None Annotation DeepLoc DeepLoc rackham bianca miarka snowy 2.0 None Annotation dorado dorado rackham bianca miarka snowy 0.6.1, 0.8.2 PLC 1.0 Annotation duplex-tools duplex-tools rackham bianca miarka snowy 0.3.3 MPL 2.0 Annotation funannotate funannotate rackham bianca miarka snowy 1.8.1, 1.8.17 BSD-2 Annotation GeneMark GeneMark rackham bianca miarka snowy 4.69-es, 4.71-es, 4.72-es Custom Annotation GeneMark-ETP GeneMark-ETP rackham bianca miarka snowy 1.02-20231213-dd8b37b Creative Commons Attribution NonCommercial ShareAlike 4.0 License Annotation GenomeThreader GenomeThreader rackham bianca miarka snowy 1.7.4 Custom AS IS Annotation HATCHet HATCHet rackham bianca miarka snowy 2.0.1 BSD-3 Annotation HATK HATK rackham bianca miarka snowy 2.0beta AS IS Annotation hybpiper hybpiper rackham bianca miarka snowy 2.1.6 GPLv3 Annotation igv-reports igv-reports rackham bianca miarka snowy 1.12.0-python3.9.5 MIT Annotation InterProScan None None 5.65-97.0, 5.67-99.0, .z None Annotation IsoQuant IsoQuant rackham bianca miarka snowy 3.3.1 Custom Annotation jcvi jcvi rackham bianca miarka snowy 1.3.8 BSD2 Annotation kmersGWAS kmersGWAS rackham irma bianca miarka snowy 20221010-a706bb7 None Annotation LDAK LDAK rackham bianca miarka snowy 5.2 Open source AS IS Annotation LOHHLA LOHHLA rackham bianca miarka snowy 20210129-00744c5 None Annotation LOHHLA-slagtermaarten LOHHLA-slagtermaarten rackham bianca miarka snowy 20200219-b38c477 None Annotation macse macse rackham bianca miarka snowy 2.07 None Annotation MakeHub MakeHub rackham bianca miarka snowy 1.0.5-20200210-1ecd6bb, 1.0.8-20240217-31cc299 GPL v3 Annotation metaeuk metaeuk rackham bianca miarka snowy 6-a5d39d9 GPL v3 Annotation OBITools OBITools rackham bianca miarka snowy 1.2.13 CeCILL Annotation OBITools3 OBITools3 rackham bianca miarka snowy 3.0.1b24 CeCILL Annotation omm-macse omm-macse rackham bianca miarka snowy 12.01 None Annotation ORFfinder ORFfinder rackham irma bianca miarka snowy 0.4.3 Public domain Annotation OrthoFinder OrthoFinder rackham bianca miarka snowy 2.5.2, 2.5.5 GPL v3 Annotation polysolver polysolver rackham bianca miarka snowy v4 None Annotation ProtHint ProtHint rackham bianca miarka snowy 2.6.0-20231027-103304c GeneMark license Annotation pullseq pullseq rackham bianca miarka snowy 20230518-7381691 None Annotation pVACtools pVACtools rackham bianca miarka snowy 3.1.1 BSD3 Annotation pycoQC pycoQC rackham bianca miarka snowy 2.5.2 GPL-3.0 Annotation RepeatMasker RepeatMasker rackham bianca miarka snowy 4.1.5 None Annotation sepp sepp rackham bianca miarka snowy 4.3.10_python_3.7.2, 4.5.1, 4.5.2 GPL v3 Annotation sequenza-utils sequenza-utils rackham bianca miarka snowy 3.0.0 None Annotation SignalP SignalP rackham bianca miarka snowy 6.0h None Annotation snpEff snpEff rackham bianca miarka snowy 5.2 MIT Annotation soprano soprano rackham bianca miarka snowy 20240418-938604e GPL-3.0 Annotation svtools svtools rackham bianca miarka snowy 0.5.1 MIT Annotation tabixpp tabixpp rackham bianca miarka snowy 1.1.2 MIT Annotation TEspeX TEspeX rackham bianca miarka snowy 2.0.1 GPL v3 Annotation TransDecoder TransDecoder rackham bianca miarka snowy 5.7.1-20230913-8b926ac None Annotation TrEMOLO TrEMOLO rackham bianca miarka snowy 2.2-beta1 GPL v3 Annotation trex trex rackham bianca miarka snowy 20230904-df86afe, 20231120-d9c840a None Annotation TSEBRA TSEBRA rackham bianca miarka snowy 1.1.2.4 Artistic License 2.0 Annotation vartrix vartrix rackham bianca miarka snowy 1.1.22 MIT Annotation vcf2maf vcf2maf rackham bianca miarka snowy 1.6.21 Apache 2.0 Annotation WhatsHap WhatsHap rackham bianca miarka snowy 2.3-20240529-be88057 MIT Assembly assembly-stats assembly-stats rackham bianca miarka snowy 1.0.1-20211102-c006b9c GPL v3 Assembly Flye Flye rackham bianca miarka snowy 2.9.5 BSD-3-Clause Assembly GetOrganelle GetOrganelle rackham bianca miarka snowy 1.7.3.3, 1.7.7.0 GPLv3 Assembly hifiasm hifiasm rackham bianca miarka snowy 0.16.1-r375, 0.19.8-r603, 0.20.0-r639 MIT Assembly hifiasm-meta hifiasm-meta rackham bianca miarka snowy 0.3.2-r74 MIT Assembly IPA IPA rackham irma bianca miarka snowy 1.8.0 BSD 3-clause Assembly L_RNA_scaffolder L_RNA_scaffolder rackham bianca miarka snowy 20190530-98f19e3 None Assembly MBG MBG rackham bianca miarka snowy 1.0.14 MIT Assembly metaMDBG metaMDBG rackham bianca miarka snowy 0.3-20240117-57f4493 MIT Assembly miniasm miniasm rackham bianca miarka snowy 0.3-r179-20191007-ce615d1 MIT Assembly Polypolish Polypolish rackham bianca miarka snowy 5.0 GPL-3.0 Assembly purge_dups purge_dups rackham bianca miarka snowy 1.2.5, 1.2.6 MIT Assembly Redundans Redundans rackham bianca miarka snowy 2.0.1 GPL v3 Assembly shovill shovill rackham bianca miarka snowy 1.0.0, 1.1.0 GPL-3.0 Assembly SKESA SKESA rackham bianca miarka snowy 2.4.0 Public domain Assembly spades spades rackham bianca miarka snowy 4.0.0 GPL v2 Assembly StringTie StringTie rackham bianca miarka snowy 2.2.1 MIT Assembly Trycycler Trycycler rackham bianca miarka snowy 0.5.4 GPL-3.0 Assembly vg vg rackham bianca miarka snowy 1.29.0, 1.48.0 mixed Bioinformatics alignment AGE AGE bianca irma milou rackham snowy 0.4 None Bioinformatics alignment GEM-Tools GEM-Tools rackham irma bianca snowy 1.7.1 None Bioinformatics alignment HISAT2 HISAT2 bianca irma rackham snowy 2.0.1-beta, 2.0.5, 2.1.0, 2.2.1 GPL v3 Bioinformatics alignment infernal infernal bianca miarka milou rackham snowy 1.0.2, 1.1.1, 1.1.2 BSD Bioinformatics alignment Kalign Kalign rackham irma bianca snowy 1.04, 2.04 None Bioinformatics alignment LEON-BIS LEON-BIS bianca irma milou rackham snowy 20130322 None Bioinformatics alignment MafFilter MafFilter bianca irma milou rackham snowy 1.1.2 None Bioinformatics alignment MAFFT MAFFT bianca irma rackham snowy 7.205, 7.245, 7.310, 7.407 BSD (main), mixed open-source (extensions) Bioinformatics alignment MUMmer MUMmer rackham irma bianca snowy 3.9.4alpha, 3.22, 3.23, 4.0.0beta2, 4.0.0rc1 Artistic License 2.0 Bioinformatics alignment unimap unimap rackham irma bianca snowy 0.1-r46-dirty MIT Bioinformatics annotation AAT AAT bianca irma milou rackham snowy r03052011 Custom \"AS IS\" Bioinformatics annotation GEMINI None None 0.16.3, 0.18.3, 0.19.0, 0.20.0, 0.20.1, .gemini_0.18.3 None Bioinformatics annotation HaMStR None None 13.2.3, .HaMStR, .HaMStR-old None Bioinformatics annotation InterProScan InterProScan bianca miarka rackham snowy 5.52-86.0 Misc Bioinformatics annotation libBigWig libBigWig rackham irma bianca snowy 0.4.4 MIT Bioinformatics annotation ProtHint ProtHint rackham miarka bianca snowy 2.4.0 GeneMark license https://github.com/gatech-genemark/ProtHint/blob/master/LICENSE Bioinformatics annotation tmhmm tmhmm rackham miarka bianca snowy 2.0c Custom as-is Bioinformatics annotation VIBRANT VIBRANT rackham irma bianca snowy 1.2.1 GNU General Public License Bioinformatics assembly A5-miseq None None 20140113, 20140604, 20160825, .A5-miseq None Bioinformatics assembly abyss abyss bianca irma milou rackham snowy 1.3.5, 1.3.5-max, 1.3.7, 1.3.7-k128, 1.3.7-max, 1.5.2, 1.9.0, 1.9.0-k128, 2.0.2, 2.0.2-k128 GPL v3 Bioinformatics assembly allpathslg None bianca irma rackham snowy 47300, 49618, 52485, 52488 None Bioinformatics assembly AMOS AMOS bianca irma milou rackham snowy 3.0.0, 3.1.0 Artistic Bioinformatics assembly ARC_assembler ARC_assembler milou 1.1.3 Apache 2.0 Bioinformatics assembly ARCS ARCS rackham irma bianca snowy 1.0.6, 1.1.1 GPL v3 Bioinformatics assembly BESST None None 1.0.4.3, 1.0.4.4, .besst None Bioinformatics assembly DBG2OLC DBG2OLC rackham snowy miarka bianca 20151208 None Bioinformatics assembly DISCOVARdenovo None bianca miarka rackham snowy 51885, 52488 None Bioinformatics assembly FALCON FALCON rackham miarka bianca snowy 0.3.0, 0.4.1, 2018.31.08-03.06 Clear BSD Bioinformatics assembly FALCON-integrate FALCON-integrate bianca miarka milou rackham snowy 20161113 Custom \"as is\" Bioinformatics assembly Flye Flye rackham miarka bianca snowy 2.3.5, 2.4.2, 2.8.1 -d Bioinformatics assembly GAA GAA rackham miarka bianca snowy 1.1 GPL v2+ Bioinformatics assembly GARM GARM bianca irma milou rackham snowy 0.7, 0.7.3 None Bioinformatics assembly IDBA None bianca miarka rackham snowy 1.1.1, 1.1.1-384, 1.1.3 None Bioinformatics assembly LINKS LINKS rackham miarka bianca snowy 1.8.7 GPL v3 Bioinformatics assembly MaSuRCA MaSuRCA rackham miarka bianca snowy 2.0.3.1, 2.1.0, 2.2.1, 2.3.2, 3.1.3, 3.2.1, 3.2.2, 3.2.3, 3.3.5, 3.4.2 GPL v3 Bioinformatics assembly MetAMOS MetAMOS bianca irma milou rackham snowy 1.5rc3 GPLv2 and other open source Bioinformatics assembly Metassembler Metassembler milou 1.5 open-source Bioinformatics assembly MHAP MHAP milou 1.6 Apache 2.0 Bioinformatics assembly Platanus-allee Platanus-allee bianca irma rackham snowy 2.0.2 GPL v3 Bioinformatics assembly quickmerge quickmerge rackham irma bianca snowy 0.3-9233726 GPL v3 Bioinformatics assembly Unicycler Unicycler bianca irma rackham snowy 0.4.8 GPL v3 Bioinformatics assembly wtdbg2 wtdbg2 rackham irma bianca snowy 2.4 GPL v3 Bioinformatics misc AdapterRemoval AdapterRemoval bianca irma milou rackham snowy 2.1.7, 2.2.2 GPL v3 Bioinformatics misc AdmixTools AdmixTools bianca irma milou rackham snowy 5.0-20170312, 5.0-20171024, 7.0.1, 20160803 Custom \"as-is\" open source Bioinformatics misc ADMIXTURE ADMIXTURE bianca irma milou rackham snowy 1.3.0 Not open source Bioinformatics misc AlienTrimmer AlienTrimmer bianca irma milou rackham snowy 0.4.0 GPL Bioinformatics misc ANGSD ANGSD rackham irma bianca snowy 0.917, 0.917-g6522d3e, 0.921, 0.933 None Bioinformatics misc Athlates Athlates bianca irma milou rackham snowy 20140426 Custom Bioinformatics misc ATLAS_aDNA ATLAS_aDNA bianca irma milou rackham snowy 20170510 None Bioinformatics misc ATSAS ATSAS bianca irma milou rackham snowy 2.8.3-1 Academic use only Bioinformatics misc bam2fastx bam2fastx rackham irma bianca snowy 1.3.0-80dbf79 BSD 3-clause Bioinformatics misc bambam bambam bianca irma rackham snowy 1.4 MIT Bioinformatics misc Bamsurgeon Bamsurgeon rackham irma bianca snowy 1.3 MIT License Bioinformatics misc bcftools bcftools bianca irma rackham snowy 1.8, 1.10 \"MIT/Expat or GPL v3\" Bioinformatics misc BEETL BEETL bianca irma milou rackham snowy 1.0.2, 1.1.0 BSD 2-Clause Bioinformatics misc bgen bgen rackham irma bianca snowy 1.1.4 Boost Software License v1.0 Bioinformatics misc BioPerl BioPerl bianca miarka rackham snowy 1.6.924_Perl5.18.4, 1.7.1_Perl5.24.1, 1.7.2_Perl5.24.1, 1.7.2_Perl5.26.2 Perl Bioinformatics misc bonito None rackham irma bianca snowy 0.3.7-cpu, 0.3.8, 0.4.0 Oxford Nanopore Public License 1.0 Bioinformatics misc BraCeR BraCeR rackham irma bianca snowy 2019_10_03_22e49cb Apache 2.0 Bioinformatics misc cdbfasta cdbfasta bianca irma rackham snowy 1.00 \"Artistic 2.0\" Bioinformatics misc cellranger-ATAC None bianca irma rackham snowy 1.2.0, 2.0.0 None Bioinformatics misc CheckM CheckM bianca irma rackham snowy 1.0.11, 1.0.12, 1.1.3 \"\"\"\"\"\"\"\"\"\"\"\"GPL v3\"\"\"\"\"\"\"\"\"\"\"\" Bioinformatics misc CITE-seq-Count CITE-seq-Count rackham irma bianca snowy 1.4.3 MIT Bioinformatics misc CleaveLand4 CleaveLand4 bianca irma milou rackham snowy 4.3 GPL v3 Bioinformatics misc CNV-seq CNV-seq milou 20140812 Misc open source Bioinformatics misc CNVnator CNVnator bianca irma milou rackham snowy 0.3.2, 0.3.3 Creative Commons Public License Bioinformatics misc CONCOCT CONCOCT rackham irma bianca snowy 0.4.0, 0.4.0~, 0.5.0, 1.1.0 FreeBSD Bioinformatics misc Corset Corset bianca irma milou rackham snowy 1.04, 1.07 GPL v3 Bioinformatics misc DATES DATES rackham irma bianca snowy 753 None Bioinformatics misc DosageConvertor DosageConvertor rackham irma bianca snowy 1.0.4 custom Bioinformatics misc DWGSIM DWGSIM bianca irma milou rackham snowy 0.1.11-6e9a361 GPL v2 Bioinformatics misc EnsEMBL-API EnsEMBL-API rackham irma bianca snowy 87, 94 Apache License 2.0 Bioinformatics misc EPACTS EPACTS bianca miarka milou rackham snowy 3.2.6, 3.2.6_milou, 3.3.0-a5209db GPL v3 Bioinformatics misc ExpansionHunter None bianca irma rackham snowy 2.5.3, 2.5.5 None Bioinformatics misc FastANI FastANI rackham irma bianca snowy 1.2 Apache 2.0 Bioinformatics misc fastp fastp rackham irma bianca snowy 0.20.0, 0.23.1 MIT Bioinformatics misc fcGENE fcGENE rackham irma bianca snowy 1.0.7 GPL Bioinformatics misc Filtlong Filtlong rackham irma bianca snowy 0.2.0 GPL v3 Bioinformatics misc FLASH FLASH bianca irma milou rackham snowy 1.2.11 GPL Bioinformatics misc FusionCatcher None rackham irma bianca snowy 1.00, 1.10, 1.33 GPLv3 Bioinformatics misc GEM GEM bianca irma rackham snowy 2.7, 3.4 \"\"\"\"\"\"\"\"\"\"\"Research only\"\"\"\"\"\"\"\"\"\"\" Bioinformatics misc GEMMA GEMMA rackham irma bianca snowy 0.98.1 GPL3 Bioinformatics misc GeneMark GeneMark rackham miarka bianca snowy 2.3-es, 4.32-es, 4.33-es, 4.33-es_Perl5.24.1, 4.38-es, 4.57-es, 4.62-es, 4.68-es Custom Bioinformatics misc Genepop Genepop rackham irma bianca snowy 4.7 CeCILL (GPL compatible) Bioinformatics misc GenomeTools GenomeTools rackham irma bianca snowy 1.5.8, 1.5.9, 1.6.1 ISC Bioinformatics misc GERP++ GERP++ bianca irma milou rackham snowy 20110522 GPL v3+ Bioinformatics misc GTOOL GTOOL milou 0.7.5 \"\"\"\"\"\"\"\"\"\"\"As is\" open source\"\"\"\"\"\"\"\"\"\" Bioinformatics misc HiCUP HiCUP rackham irma bianca snowy 0.7.2 GPL v3 or later Bioinformatics misc htslib htslib bianca irma rackham snowy 1.8, 1.10 \"MIT/Expat and modified 3-clause BSD\" Bioinformatics misc IGV None bianca irma rackham snowy 2.3.17, 2.3.40, 2.3.92, 2.4.2, 2.8.13 None Bioinformatics misc IGVtools IGVtools bianca irma milou rackham snowy 2.3.17, 2.3.40, 2.3.91, 2.3.98 LGPL Bioinformatics misc IM IM bianca irma milou rackham snowy 20091217 None Bioinformatics misc IMa2p IMa2p milou 2015-08-09 GPL v3 Bioinformatics misc IMPUTE2 IMPUTE2 bianca irma milou rackham snowy 2.3.2 \"\"\"\"\"\"\"\"\"\"\"As is\" open source\"\"\"\"\"\"\"\"\"\" Bioinformatics misc ITSx None None 1.0.9, 1.0.11, 1.1-beta, .itsx None Bioinformatics misc KAT KAT bianca irma rackham snowy 2.0.4, 2.0.6, 2.0.8, 2.1.1, 2.3.4, 2.4.2, 2.4.2_py3.5.0, 2.4.2_py3.7.2 GPL v3 Bioinformatics misc KmerGenie None None 1.6741, 1.7039, .kmergenie None Bioinformatics misc Kraken None rackham irma bianca miarka snowy 0.10.5-beta, 1.0, 1.1-352e780, 1.1.1, 1.1.1-20210927-375654f GPL v3 Bioinformatics misc Kraken2 Kraken2 bianca irma rackham snowy 2.0.8-beta MIT Bioinformatics misc Krona Krona bianca miarka milou rackham snowy 2.7 Custom open source Bioinformatics misc LASER LASER bianca irma rackham snowy 2.01, 2.02, 2.04 GPL v3 Bioinformatics misc LatentStrainAnalysis LatentStrainAnalysis milou 20160322 MIT Bioinformatics misc LDhelmet LDhelmet bianca irma rackham snowy 1.7, 1.9, 1.10 GPL v3 Bioinformatics misc LTR_Finder LTR_Finder bianca irma rackham snowy 1.0.5, 1.0.7 Free for non-commercial use Bioinformatics misc LUMPY LUMPY rackham irma bianca snowy 0.2.12, 0.2.13, 0.2.13-97cf18c, 0.2.13-213a417, 0.3.0 MIT Bioinformatics misc MACE MACE milou 1.2 None Bioinformatics misc MACS MACS bianca miarka rackham snowy 2.1.0, 2.1.2, 2.2.6, 3.0.0a6 \"\"\"\"\"BSD 3-clause\"\"\"\"\" Bioinformatics misc MAGeCK MAGeCK rackham irma bianca snowy 0.5.6, 0.5.9.4 BSD 3-clause Bioinformatics misc medaka medaka rackham irma bianca snowy 0.7.1 MPL 2.0 Bioinformatics misc Meerkat Meerkat milou 0.189 Misc non-commercial open source Bioinformatics misc MetaBat MetaBat bianca irma milou rackham snowy 0.26.3, 2.12.1 BSD-like Bioinformatics misc METAL METAL rackham miarka bianca snowy 2011-03-25 open source Bioinformatics misc MetaSV MetaSV bianca irma milou rackham snowy 0.5.4 BSD 2-clause Bioinformatics misc Metaxa2 Metaxa2 rackham irma bianca snowy 2.1.3, 2.2 GPL v3 Bioinformatics misc MethPipe MethPipe rackham irma bianca snowy 3.4.3, 4.1.1 GPL v3+ Bioinformatics misc MethylDackel MethylDackel rackham irma bianca snowy 0.2.1, 0.5.1 MIT Bioinformatics misc Mothur Mothur bianca miarka rackham snowy 1.40.5, 1.41.0 GPL v3 Bioinformatics misc ms ms rackham irma bianca snowy 20071014 open source Bioinformatics misc MultiQC MultiQC bianca irma rackham snowy 0.6, 0.7, 0.8, 1.8, 1.9, 1.10 MIT Bioinformatics misc NanoComp NanoComp bianca irma rackham snowy 1.9.2 GPL v3 Bioinformatics misc NanoPlot NanoPlot rackham irma bianca snowy 1.33.1 GPL3 license Bioinformatics misc NPStat NPStat rackham irma bianca snowy 1 GPL v3 license Bioinformatics misc nseg nseg rackham irma bianca snowy 1.0.1 Public Domain Bioinformatics misc ont_fast5_api ont_fast5_api rackham irma bianca snowy 3.1.6 Mozilla Public License 2.0 Bioinformatics misc PCAngsd PCAngsd bianca miarka rackham snowy 0.982 \"\"\"\"\"\"GPL v3\"\"\"\"\"\" Bioinformatics misc PennCNV PennCNV milou 20151014 Public domain Bioinformatics misc phantompeakqualtools phantompeakqualtools bianca miarka milou rackham snowy 1.1 MIT Bioinformatics misc phaser phaser rackham irma bianca snowy 20210423-5d4926d GNU Bioinformatics misc piper piper bianca miarka rackham snowy 1.5.1 MIT Bioinformatics misc pizzly pizzly rackham irma bianca snowy 0.37.3 BSD 2-clause Bioinformatics misc plink2 plink2 rackham irma bianca snowy 2.00-alpha-2-20180704, 2.00-alpha-2-20190429, 2.00-alpha-2.3-20200124 GPL v3 Bioinformatics misc Porechop Porechop rackham irma bianca snowy 0.2.4 GPL v3 Bioinformatics misc prodigal prodigal bianca irma milou rackham snowy 2.6.3 GPL v3 Bioinformatics misc PRSice PRSice rackham irma bianca snowy 2.2.11.b GPL3 Bioinformatics misc psmc psmc rackham irma bianca snowy 0.6.5-r67-e5f7df5 MIT Bioinformatics misc RECON RECON rackham irma bianca snowy 1.08 GPL Bioinformatics misc RegScan RegScan rackham irma bianca snowy 0.5 None Bioinformatics misc RepeatScout RepeatScout rackham irma bianca snowy 1.0.5, 1.0.6 open source Bioinformatics misc schmutzi schmutzi rackham irma bianca snowy 20160424, 20200706-597c6bc GPL v3 Bioinformatics misc SeqAn None bianca irma rackham snowy 1.4.2, 2.4.0 None Bioinformatics misc SeqKit SeqKit rackham irma bianca snowy 0.15.0 MIT license Bioinformatics misc seqmonk seqmonk bianca irma milou rackham snowy 0.21.0, 0.27.0, 0.32.1, 0.34.1, 1.36.0, 1.37.1 GPL Bioinformatics misc SMC++ SMC++ rackham irma bianca snowy 1.15.2 GPL v3 Bioinformatics misc snippy snippy rackham miarka bianca snowy 4.0, 4.0.5, 4.6.0 GPL v2 license Bioinformatics misc spaceranger spaceranger bianca irma rackham snowy 1.0.0, 1.2.0 other Bioinformatics misc SweeD SweeD rackham irma bianca snowy 4.0.0 GPL v3 license Bioinformatics misc tabix tabix bianca miarka rackham snowy 0.2.6 MIT/Expat and modified 3-clause BSD Bioinformatics misc Tombo Tombo rackham irma bianca snowy 1.5.1 Mozilla Public License 2.0 Bioinformatics misc vawk vawk rackham irma bianca snowy 0.0.1 None Bioinformatics misc velocyto velocyto rackham irma bianca snowy 0.17.17 BSD 2-clause Bioinformatics phylogeny CONSEL CONSEL bianca irma milou rackham snowy 0.20 None Bioinformatics phylogeny ExaBayes ExaBayes bianca irma rackham snowy 1.5, 1.5-mpi GPL v3 Bioinformatics phylogeny FastML FastML bianca irma rackham snowy 3.1, 3.11 \"\"\"\"\"\"\"\"\"\"\"\"GPL v2+\"\"\"\"\"\"\"\"\"\"\"\" Bioinformatics phylogeny FastTree FastTree bianca irma milou rackham snowy 2.1.8, 2.1.10 Open source \"as is\" Bioinformatics phylogeny HyPhy HyPhy rackham irma bianca snowy 2.5.0, 2.5.0-mpi Custom open-source \"as is\" Bioinformatics phylogeny MEGAN None None 4.70.4, 5.1.5, 5.11.3, 6.3.5, 6.10.5, 6.20.17, .MEGAN None Bioinformatics phylogeny raxml None None 7.0.4, 7.2.7, 7.2.8, 7.3.0, 7.4.7, 8.0.20, 8.0.20-mpi, 8.2.0-gcc, 8.2.0-gcc-mpi, 8.2.0-icc, 8.2.0-icc-mpi, 8.2.0_gcc, 8.2.0_icc, 8.2.4-gcc, 8.2.4-gcc-mpi, 8.2.10-gcc, 8.2.10-gcc-mpi, 8.2.12-gcc, 8.2.12-gcc-mpi, .raxml None Bioinformatics phylogeny RAxML-NG RAxML-NG bianca irma rackham snowy 0.9.0-mpi GNU Affero GPL v3.0 Bioinformatics pipelines fermikit fermikit bianca irma milou rackham snowy 0.14-prerelease-96f7820, r178 None Bioinformatics pipelines nf-core nf-core rackham irma bianca snowy 1.12.1, 1.14, 2.1 MIT License Bioinformatics pipelines nf-core-pipelines nf-core-pipelines rackham irma bianca snowy latest MIT License Bioinformatics sw collections 454-dataanalysis 454-dataanalysis bianca irma milou rackham snowy 2.3, 2.5.3, 2.6, 2.9 None Bioinformatics sw collections 454-dataprocessing 454-dataprocessing bianca irma milou rackham snowy 2.3 None Bioinformatics sw collections ART ART rackham irma bianca snowy 2016-06-05 None Bioinformatics sw collections BEDOPS BEDOPS rackham irma bianca snowy 2.4.3, 2.4.28, 2.4.39 GPL v2 Bioinformatics sw collections BEDTools BEDTools rackham irma bianca snowy 2.21.0, 2.23.0, 2.25.0, 2.26.0, 2.27.1, 2.29.2 MIT Bioinformatics sw collections BioScope BioScope bianca irma rackham snowy 1.3.1 Commercial Bioinformatics sw collections CASAVA CASAVA bianca miarka milou rackham snowy 1.7.0, 1.8.2 None Bioinformatics sw collections cellranger None bianca irma rackham snowy 1.1.0, 1.3.0, 2.0.2, 2.2.0, 3.0.1, 4.0.0, 5.0.1, 6.0.2 None Bioinformatics sw collections cellranger-ARC cellranger-ARC bianca irma rackham snowy 1.0.0 other Bioinformatics sw collections cellranger-DNA cellranger-DNA bianca irma rackham snowy 1.1.0 other Bioinformatics sw collections GATK GATK bianca irma milou rackham snowy 3.5.0, 3.6, 3.7, 3.8-0, 4.1.0.0, 4.1.1.0, 4.beta.5, build-20160727 BSD Style Bioinformatics sw collections GATK-Queue GATK-Queue bianca irma milou rackham snowy 3.2.2, 3.6, 3.7, 3.8-0 MIT + academic non-commercial Bioinformatics sw collections GenomeSTRiP GenomeSTRiP bianca irma milou rackham snowy 2.00.1650, 2.00.1685, 2.00.1710 Custom \"as is\" redistributable Bioinformatics sw collections HiSeq HiSeq bianca irma milou rackham snowy 0.9 None Bioinformatics sw collections longranger longranger bianca irma rackham snowy 2.0.1, 2.1.1, 2.1.2, 2.1.4, 2.2.2 other Bioinformatics sw collections MEMEsuite MEMEsuite bianca irma rackham snowy 5.0.1 BSD Style Bioinformatics sw collections supernova None bianca irma rackham snowy 1.0.0, 1.1.1, 1.1.4, 2.0.0, 2.1.1 None Chemistry/physics ABINIT ABINIT bianca irma rackham snowy 8.10.3 GPL v3 Chemistry/physics ALPS ALPS rackham irma bianca snowy 2.3.0 ALPS Library License version 1.0, ALPS Application License version 1.0 Chemistry/physics DOCK DOCK bianca irma milou rackham 3.7 Free Academic License Chemistry/physics gromacs None None 4.5.5, 4.5.5_intel, 4.5.7.th, 4.6.3, 4.6.3_rackham, 4.6.5, 4.6.5.th, 4.6.5_rackham, 4.6.7.th, 4.6.7.th.dp, 5.0.4, 5.0.4_rackham, 5.0.7, 5.1.1, 5.1.1_rackham, 5.1.5, 2016.1, 2016.6, 2018.6, 2018.6.th, 2019.1, 2019.1.th, 2019.6.th, 2020-GPU, 2021.1.th, .4.6 None Chemistry/physics molcas molcas milou 7.8.082, 8.0.15-03-08 user group license Chemistry/physics molden molden rackham 5.1 None Chemistry/physics TmoleX TmoleX bianca irma milou rackham 18 proprietary group license Compilers and build tools ant ant bianca irma rackham snowy 1.9.8, 1.10.0 Apache 2.0 Compilers and build tools autoconf autoconf bianca irma milou rackham snowy 2.68, 2.69 GPL Compilers and build tools automake automake bianca irma rackham snowy 1.14.1, 1.16.1 GPL Compilers and build tools binutils binutils rackham bianca miarka snowy 2.26, 2.28, 2.38, 2.39, 2.41 GPL v3 Compilers and build tools ddt ddt bianca irma rackham snowy 3.2.1, 5.0.1, 6.0, 6.1, 7.0 Commercial Compilers and build tools flex flex rackham irma bianca snowy 2.6.4 modified BSD Compilers and build tools fpc fpc rackham irma bianca snowy 3.0.4 GPL and LGPL Compilers and build tools git git rackham bianca miarka snowy 2.5.0, 2.10.2, 2.16.1, 2.21.0, 2.24.0, 2.28.0, 2.34.1, 2.44.0 LGPL 2.1 Compilers and build tools git-lfs git-lfs rackham bianca miarka snowy 2.9.1, 3.5.1 MIT Compilers and build tools guile guile bianca irma milou rackham snowy 1.8.8 LGPL 2.1 Compilers and build tools java java bianca bianca irma irma milou milou rackham rackham snowy snowy jdk, OpenJDK, OpenJDK_11.0.2, OpenJDK_12+32, OpenJDK_17+35, OracleJDK, OracleJDK_11.0.9, sun_jdk1.6.0_04, sun_jdk1.6.0_18, sun_jdk1.6.0_45, sun_jdk1.7.0_25, sun_jdk1.8.0_40, sun_jdk1.8.0_92, sun_jdk1.8.0_151 Other Compilers and build tools julia julia rackham bianca miarka snowy 0.3.0-prerelease+3043, 0.3.11, 0.4.6, 1.1.1, 1.4.2, 1.6.1, 1.9.3 MIT Compilers and build tools libtool libtool bianca irma milou rackham snowy 2.4.6 GPL Compilers and build tools maven maven bianca irma rackham snowy 3.6.0 Apache 2.0 Compilers and build tools meson meson rackham bianca miarka snowy 0.49.2, 0.57.2, 1.1.0 None Compilers and build tools mono mono bianca irma milou rackham snowy 3.12.0, 5.8.1.0, 5.10.1.27 Custom open source Compilers and build tools ninja ninja rackham irma bianca snowy 1.9.0, 1.10.0 Apache 2.0 Compilers and build tools patchelf patchelf bianca miarka milou rackham snowy 0.1, 0.8 GPL v3 Compilers and build tools perl perl rackham bianca miarka snowy 5.18.2, 5.18.4, 5.22.2, 5.24.0, 5.24.1, 5.26.2, 5.32.1 GPL v1+ or Artistic License Compilers and build tools perl6 perl6 bianca irma rackham snowy rakudo-star-2017.04, rakudo-star-2019.03 Artistic License 2.0 Compilers and build tools perl_modules perl_modules rackham bianca miarka snowy 5.18.4, 5.24.1, 5.26.2, 5.32.1 GPL v1+ or Artistic License Compilers and build tools python None None 2.5, 2.6, 2.6.1, 2.6.5, 2.6.6, 2.7, 2.7.1, 2.7.2, 2.7.4, 2.7.6, 2.7.9, 2.7.11, 2.7.15, 2.7i, 3.1, 3.1.2, 3.1.3, 3.2, 3.2.4, 3.3, 3.3.1, 3.4.3, 3.5.0, 3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5, 3.10.8, 3.11.8, 3.12.1, 3.12.7, python-rpath.tcl, python-set-LD_LIBRARY_PATH.tcl, python-set-LD_LIBRARY_PATH_new.tcl None Compilers and build tools python3 None None 3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5, 3.11.4, 3.11.8, 3.12.1, 3.12.7, python3-rpath.tcl None Compilers and build tools ruby ruby bianca irma rackham snowy 2.4.1, 2.5.0, 2.6.2 Ruby license Compilers and build tools subversion None rackham irma bianca snowy 1.9.3, 1.10.6 Apache License Version 2.0 Engineering matlab None bianca rackham snowy 7.4, 7.8, 7.10, 7.13, 8.0, 8.1, .matlab, .matlab_ny, .matlab_ny~, R2014a, R2015a, R2015b, R2016a, R2017a, R2018a, R2018b, R2019a, R2020b None Geospatial CDO CDO rackham irma bianca snowy 1.9.5, 1.9.7.1, 1.9.7.1-intel18.3 GPL v2 Geospatial ecCodes ecCodes rackham irma bianca snowy 2.13.1 None Geospatial FYBA FYBA bianca irma milou rackham snowy 4.1.1 Custom open-source \"as is\" Geospatial GEOS GEOS rackham bianca miarka snowy 3.5.0, 3.9.1-gcc9.3.0, 3.12.0-gcc12.3.0 LGPL v2.1 Geospatial GOTM GOTM bianca irma rackham snowy 5.3-221-gac7ec88d GPL v2 Geospatial libgeotiff libgeotiff rackham bianca miarka snowy 1.4.1, 1.4.3, 1.7.1 Mixed Geospatial Magics Magics rackham irma bianca snowy 3.3.1, 3.3.1-intel18.3 None Geospatial NCO NCO bianca irma rackham snowy 4.8.1, 4.9.2, 4.9.3 BSD Geospatial PROJ None snowy rackham bianca irma 6.3.2, 8.1.0 MIT Geospatial PROJ.4 PROJ.4 bianca irma rackham snowy 4.9.2, 4.9.3, 4.9.3-intel18.3 MIT Geospatial QGIS QGIS rackham bianca miarka snowy 3.4.12, 3.32.3 GPL license Geospatial SHAPELIB SHAPELIB rackham irma bianca snowy 1.5.0 None Libraries ATLAS ATLAS bianca irma milou rackham snowy 3.10.3 BSD-style Libraries blas blas bianca irma milou rackham snowy 3.6.0 Open source Libraries boost boost rackham bianca miarka snowy 1.41.0_gcc9.3.0, 1.44.0, 1.45.0, 1.55.0, 1.55.0_gcc4.8.3, 1.58.0-gcc8.3.0, 1.59.0_gcc4.9.2, 1.59.0_intel15.3, 1.60.0_gcc5.3.0, 1.61.0_gcc5.3.0, 1.63.0_gcc6.3.0, 1.63.0_gcc6.3.0_mpi2.0.2, 1.63.0_intel17.1, 1.66.0, 1.66.0-gcc8.3.0, 1.70.0_gcc9.1.0, 1.70.0_gcc9.1.0_mpi3.1.3, 1.70.0_gcc9.3.0, 1.70.0_gcc9.3.0_mpi3.1.5, 1.70.0_intel18.3, 1.70.0_intel18.3_intelmpi18.3, 1.70.0_intel18.3_mpi3.1.3, 1.75.0-gcc9.3.0, 1.78.0_gcc11.2.0, 1.78.0_gcc11.2.0_mpi4.1.2, 1.79.0_gcc11.2.0_mpi4.1.2, 1.81.0-gcc10.3.0, 1.83.0-gcc12.3.0, 1.83.0-gcc12.3.0-mpi4.1.5 Boost licence Libraries bzip2 bzip2 rackham irma bianca miarka snowy 1.0.6, 1.0.8 Custom open-source \"AS IS\" Libraries cairo cairo rackham irma bianca snowy 1.14.8, 1.14.12, 1.17.2, 1.17.4 LGPL v2.1 or Mozilla Public License 1.1 Libraries deal.II deal.II bianca irma rackham snowy 9.1.1-gcc, 9.1.1-intel GNU LGPL v2.1 or later Libraries fftw fftw rackham irma bianca snowy 3.3.8 GPL Libraries freetype freetype rackham bianca miarka snowy 2.6, 2.7.1, 2.10.1, 2.12.1 Freetype, GPL v2 Libraries giflib giflib bianca irma milou rackham snowy 5.1.4 Custom \"as is\" Libraries glpk glpk bianca irma rackham snowy 4.63, 4.65 GPL Libraries gsl gsl rackham bianca miarka snowy 1.16, 2.1, 2.3, 2.5, 2.6, 2.7 GPL Libraries hdf4 hdf4 rackham irma bianca snowy 4.2.11_gcc4.9.2, 4.2.14-gcc6.3.0 None Libraries hdf5 hdf5 rackham bianca miarka snowy 1.8.16_gcc4.9.2, 1.8.16_gcc5.3.0, 1.8.18, 1.8.18_gcc6.3.0, 1.10.1, 1.10.5, 1.10.5-threadsafe-intel18.3, 1.10.9, 1.14.0 HDF5 License Libraries jemalloc jemalloc rackham bianca miarka snowy 3.6.0, 5.0.1, 5.3.0 Custom open-source Libraries libcurl libcurl rackham bianca miarka snowy 7.45.0, 8.4.0 None Libraries libharu libharu bianca irma milou rackham snowy 2.3.0 ZLIB/LIBPNG License Libraries libwebp libwebp rackham bianca miarka snowy 1.0.3, 1.2.0, 1.3.0 BSD 3-clause Libraries lpsolve lpsolve rackham irma bianca snowy 5.5.2.9 LGPL v2 Libraries netcdf netcdf bianca irma rackham snowy 4.7.1, 4.7.1-intel18.3 Custom open source \"as is\" Libraries NLopt NLopt rackham irma bianca snowy 2.6.1 MIT Libraries openblas openblas rackham bianca miarka snowy 0.2.14a, 0.2.19, 0.2.19-singlethread, 0.2.20, 0.2.20-openmp, 0.2.20-singlethread, 0.3.21, 0.3.26 BSD 3-clause Libraries pcre pcre bianca irma milou rackham snowy 8.40 BSD 3-clause Libraries PLplot PLplot rackham irma bianca snowy 5.15.0, 5.15.0-old_wxWidgets LGPL v2 Libraries Poppler Poppler rackham bianca miarka snowy 0.43.0, 0.54.0, 0.75.0, 23.02.0, 23.09.0 GPL v2 Libraries protobuf protobuf rackham bianca miarka snowy 3.11.4, 24.3-gcc12.3.0 Google open-source AS IS Libraries pslib pslib rackham irma bianca snowy 0.4.6 GPL and LGPL Libraries slurm-drmaa slurm-drmaa rackham bianca miarka snowy 1.1.2-slurm19.05.8, 1.1.4-slurm23.02.5 GPL-3.0 Libraries sparsehash None rackham irma bianca miarka snowy 2.0.2, 2.0.3, 2.0.4 BSD 3-clause Libraries szip None milou snowy 2.1.1, 2.1_gcc4.9.2, 2.1_gcc5.3.0 None Libraries tbb tbb milou 4.4u1_gcc4.9.2, 4.4u1_intel15.3 GPL v2 Libraries UDUNITS UDUNITS bianca irma rackham snowy 2.2.26 Custom open source \"as is\" Libraries wxWidgets wxWidgets rackham irma bianca snowy 3.1.3-gtk2, 3.1.3-gtk3, 3.1.3-qt5 wxWindows Library Licence 3.1, https://github.com/wxWidgets/wxWidgets/blob/master/docs/licence.txt Libraries Yeppp Yeppp bianca irma milou rackham snowy 1.0.0 Custom open source \"as is\" Libraries zlib zlib rackham bianca miarka snowy 1.2.8, 1.2.11, 1.2.13, 1.3 None Misc ABSOLUTE ABSOLUTE rackham bianca miarka snowy 2023-6c98496 BSD 3 Misc AdapterRemoval AdapterRemoval rackham bianca miarka snowy 2.3.1, 2.3.4 GPLv3 Misc AFNI AFNI rackham bianca miarka snowy 24.3.08 None Misc AGAT AGAT rackham bianca miarka snowy 1.0.0, 1.3.2 GPL v3 Misc alleleCount alleleCount rackham bianca miarka snowy 4.2.1 AGPL v3 Misc ANGSD ANGSD rackham bianca miarka snowy 0.700, 0.902, 0.915, 0.940-stable GPL v2 Misc any2fasta any2fasta rackham bianca miarka snowy 0.4.2 GPL v3.0 Misc Arlequin Arlequin rackham irma bianca miarka snowy 3.5.2.2 None Misc ARPIP ARPIP rackham bianca miarka snowy 2023.10.02-ee32c10 None Misc bamtools bamtools rackham bianca miarka snowy 2.5.2 MIT Misc bamUtil bamUtil rackham bianca miarka snowy 1.0.15 GPL v3 Misc bcftools bcftools rackham bianca miarka snowy 1.2, 1.19 None Misc Beagle Beagle rackham bianca miarka snowy 4.1 GPL v3 Misc bgen bgen rackham bianca miarka snowy 1.1.6 Boost Software License v1.0 Misc BioBakery BioBakery rackham bianca miarka snowy 3.0, 3.1, 3.8 MIT license Misc BioKIT BioKIT rackham irma bianca miarka snowy 0.0.9 Other-d Misc BioPerl None None 1.6.1, 1.6.1_PERL5.10.1, 1.6.1_PERL5.12.3, 1.6.922, 1.6.923_Perl5.18.4, 1.7.8-perl5.32.1, .BioPerl None Misc BUSCO BUSCO rackham bianca miarka snowy 4.1.4, 5.3.1, 5.5.0, 5.7.1 MIT Misc Cellsnp-lite Cellsnp-lite rackham irma bianca miarka snowy 1.2.2 Apache License 2-0 Misc CHEUI CHEUI rackham bianca miarka snowy 20230518-c1c9ab6 GPL Misc Chromium-cellranger-ATAC cellranger-ATAC rackham bianca miarka snowy 1.2.0, 2.0.0, 2.1.0 None Misc Citup Citup rackham irma bianca miarka snowy 0.1.0 Other-d Misc CRABS CRABS rackham irma bianca miarka snowy 0.1.2 MIT License-d Misc CRISPResso CRISPResso rackham bianca miarka snowy 1.0.7, 2.3.1 None Misc DamageProfiler DamageProfiler rackham bianca miarka snowy 1.1 GPL v3 Misc datamash datamash rackham bianca miarka snowy 1.8 None Misc DATES DATES rackham bianca miarka snowy 4010 None Misc dds-cli dds-cli rackham bianca miarka snowy milou transit latest Other Misc deepTools deepTools rackham bianca miarka snowy 3.5.5 mixed open-source Misc EAGLE EAGLE rackham bianca miarka snowy 1.1.3 GPL v3 Misc FAN-C FAN-C rackham bianca miarka snowy 0.9.26 None Misc FastANI FastANI rackham bianca miarka snowy 1.33, 1.34 Apache-2.0 Misc fastK fastK rackham bianca miarka snowy 1.1.0 AS IS Misc fastp fastp rackham bianca miarka snowy 0.23.4 MIT Misc fgbio fgbio rackham bianca miarka snowy 2.2.1-0 MIT Misc fineRADstructure fineRADstructure rackham bianca miarka snowy 0.3.1 Creative Commons 3.0 Misc FLAMES FLAMES rackham bianca miarka snowy 20221109-774e16a GPL v3 Misc FLASH FLASH rackham bianca miarka snowy 2.2.00 GPL v3 Misc freebayes freebayes rackham bianca miarka snowy 1.3.6, 1.3.8 MIT Misc fsl fsl rackham bianca miarka snowy 6.0, 6.0.3 None Misc gffread gffread rackham bianca miarka snowy 0.12.7 MIT Misc GLIMPSE GLIMPSE rackham bianca miarka snowy 1.1.1, 2.0.0 MIT Misc grenedalf grenedalf rackham bianca miarka snowy 0.3.0, 0.5.1, 0.6.0 GPL3 Misc GTDB-Tk GTDB-Tk rackham bianca miarka snowy 0.3.2, 1.5.0, 2.3.2, 2.4.0 GPLv3 Misc Guppy Guppy snowy miarka bianca 5.0.16-cpu, 5.0.16-gpu, 6.0.6-cpu, 6.0.6-gpu, 6.3.7-cpu, 6.3.7-gpu, 6.4.2-cpu, 6.4.2-gpu, 6.5.7-cpu, 6.5.7-gpu Custom Misc halla halla rackham bianca miarka snowy 0.8.20 MIT Misc HiCExplorer HiCExplorer rackham bianca miarka snowy 2.2-beta, 3.7.3 GPL v3 Misc htslib htslib rackham bianca miarka snowy 1.2, 1.19 None Misc HUMAnN HUMAnN rackham bianca miarka snowy 3.6, 3.8 MIT license Misc IGVtools IGVtools rackham bianca miarka snowy 2.8.13, 2.16.0 MIT Misc Juicebox Juicebox rackham irma bianca miarka snowy 1.11.08 MIT License Misc KING KING rackham bianca miarka snowy 2.3.2 None Misc kingfisher kingfisher rackham bianca miarka snowy 0.3.0 GPL v3 Misc KMC KMC rackham bianca miarka snowy 3.2.2 GPL v3 Misc KneadData KneadData rackham bianca miarka snowy 0.12.0 MIT license Misc Kraken2 Kraken2 rackham bianca miarka snowy 2.1.3-20231102-acc2248 MIT Misc KrakenUniq KrakenUniq rackham bianca miarka snowy 0.6, 1.0.0, 1.0.1 GPLv3, MIT Misc Krona Krona rackham bianca miarka snowy 2.7.1, 2.8.1-20211222-d1479b3 None Misc ldsc ldsc rackham bianca miarka snowy 1.0.0, 1.0.1, 2.0.1 GPL v3 Misc MACS MACS rackham bianca miarka snowy 1.4.1, 1.4.2, 3.0.0b1 BSD 3-clause Misc MAGeCK2 MAGeCK2 rackham irma bianca miarka snowy 20211209-435eacd BSD3 Misc mash mash rackham bianca miarka snowy 2.0, 2.3, 2.3-20210519-41ddc61 Custom open-source Misc meryl meryl rackham bianca miarka snowy 1.4.1 Mixed open source Misc METAL METAL rackham bianca miarka snowy 2020-05-05 open source Misc MetaXcan MetaXcan rackham irma bianca miarka snowy 20210925-cfc9e36 MIT License-d Misc metilene metilene rackham bianca miarka snowy 0.2-8 GPL v2 Misc mgatk mgatk rackham bianca miarka snowy 0.7.0 MIT Misc Minimac4 Minimac4 rackham bianca miarka snowy 4.1.6 GPL-3.0 Misc mirdeep2 mirdeep2 rackham bianca miarka snowy 2.0.1.3-20220221-c6440e2 GPL v3 Misc miRDP2 miRDP2 rackham bianca miarka snowy 1.1.4, 1.1.5 GPL v3 Misc modkit modkit rackham bianca miarka snowy 0.2.5-rc2, 0.3.1, 0.3.3, 0.4.1 GPL v3 Misc mosdepth mosdepth rackham bianca miarka snowy 0.3.3 MIT Misc Mothur Mothur rackham bianca miarka snowy 1.25.1, 1.30.1, 1.33.3, 1.36.1, 1.38.1, 1.48.0 None Misc MRIcroGL MRIcroGL rackham bianca miarka snowy 1.2.20220720 BSD 2 Misc msisensor-pro msisensor-pro rackham bianca miarka snowy 1.2.0 None Misc MultiQC MultiQC rackham bianca miarka snowy 1.22.2 MIT Misc NCBI-datasets NCBI-datasets rackham bianca miarka snowy 15.29.0, 16.35.0 Public Domain, US Government Misc ont_h5_validator ont_h5_validator rackham bianca miarka snowy 2.0.1 None Misc picard picard rackham bianca miarka snowy 3.1.1 MIT Misc Pisces Pisces rackham bianca miarka snowy 5.3.0.0 GPL 3 Misc plink2 plink2 rackham bianca miarka snowy 2.00-alpha-3.7-20221024, 2.00-alpha-5-20230923 GPL v3 Misc preseq preseq rackham bianca miarka snowy 3.2 GPL v3 Misc PROJ PROJ rackham bianca miarka snowy 9.1.1 MIT Misc PyClone-VI PyClone-VI rackham irma bianca miarka snowy 20210623-6607ea1 GPL V3-d Misc pyega3 pyega3 rackham bianca miarka snowy 5.1.0 None Misc Raremetal Raremetal rackham bianca miarka snowy 4.15.1 None Misc regenie regenie rackham bianca miarka snowy 3.4.1 MIT Misc removethis removethis rackham irma bianca miarka snowy 1 None Misc RepeatModeler RepeatModeler rackham bianca miarka snowy 2.0.4 None Misc RGT RGT rackham bianca miarka snowy 1.0.2 GPL v3 Misc rtgcore rtgcore rackham bianca miarka snowy 3.12.1 Custom Misc rtgtools rtgtools rackham bianca miarka snowy 3.12.1 BSD 2-clause Misc samtools samtools rackham bianca miarka snowy 1.2, 1.19 None Misc schmutzi schmutzi rackham bianca miarka snowy 1.5.7 GPL v3 Misc scikit-allel scikit-allel rackham bianca miarka snowy 1.3.5 None Misc scvi-tools scvi-tools rackham bianca miarka snowy 1.0.4 BSD-3 Misc SeqKit SeqKit rackham bianca miarka snowy 2.4.0 MIT Misc SeqLib SeqLib rackham bianca miarka snowy 1.2.0 Apache 2.0 Misc seqstats seqstats rackham bianca miarka snowy 20170404-e6f482f MIT Misc SHAPEIT SHAPEIT rackham bianca miarka snowy v4.2.2, v5.1.1 None Misc slivar slivar rackham irma bianca miarka snowy 0.2.7 MIT License-d Misc SMC++ SMC++ rackham bianca miarka snowy 1.15.4, 1.15.5.dev12+g8bdecdf GPL v3 Misc smudgeplot smudgeplot rackham bianca miarka snowy 0.3, 0.4.0 Apache-2.0 Misc sracat sracat rackham bianca miarka snowy 20210916-b896745 BSD 3-Clause Misc sratools sratools rackham bianca miarka snowy 3.0.7 custom Misc Stacks Stacks rackham bianca miarka snowy 2.66 GPL v3 Misc SVDB SVDB rackham bianca miarka snowy 2.8.1 MIT Misc truvari truvari rackham bianca miarka snowy 4.3.1 MIT Misc vcflib vcflib rackham bianca miarka snowy 1.0.9 MIT Misc vcfstats vcfstats rackham bianca miarka snowy 0.4.2 Other Misc vep vep rackham bianca miarka snowy 110.1, 111.0, 113.0 Apache-2.0 Misc XP-CLR XP-CLR rackham bianca miarka snowy 1.1.2 MIT Misc applications agrep agrep bianca irma milou rackham snowy 3.41.5 ISC Open Source Misc applications awscli awscli rackham bianca miarka snowy 1.11.140, 1.16.225, 1.29.52 Apache 2.0 Misc applications circos circos rackham irma bianca snowy 0.69-9 GPL Misc applications cmake cmake rackham irma bianca snowy 3.5.1, 3.7.2, 3.13.2, 3.17.3 BSD 3-clause Misc applications cowsay cowsay bianca irma milou rackham snowy 3.03 Artistic License Misc applications Cromwell Cromwell rackham bianca miarka snowy 71, 86 BSD 3-Clause Misc applications doxygen doxygen bianca irma milou rackham snowy 1.8.11 Gnu GPL 2 Misc applications emacs emacs rackham bianca miarka snowy 25.1, 25.2, 27.2, 28.2 GNU Misc applications gawk gawk bianca irma milou rackham snowy 4.1.4 GPL v3 Misc applications gdl gdl bianca irma rackham snowy 1.0.0-rc.1 GPL v2 Misc applications GhostPDL GhostPDL rackham irma bianca snowy 9.53.3 Gnu GPL Affero v3 Misc applications gnuplot None None 4.4.3, 4.6.5, 5.0.7, 5.2.7, .gnuplot None Misc applications Graphviz Graphviz rackham bianca miarka snowy 2.40.1, 9.0.0 Common Public License Version 1.0 Misc applications groff groff bianca irma milou rackham 1.22.3 GPL Misc applications h5utils h5utils bianca irma milou rackham snowy 1.12.1 GPL v2+ Misc applications haskell-stack None bianca irma milou rackham snowy 1.0.4.3, 1.4.0, 1.7.1 None Misc applications jq jq rackham miarka bianca snowy 1.6 MIT Misc applications MariaDB MariaDB bianca irma milou rackham snowy 10.2.11 GPL v2 Misc applications mbuffer mbuffer bianca irma milou rackham snowy 20151002 GPL v3 Misc applications mcl mcl rackham bianca miarka snowy 14-137, 22-282 GPL v3 Misc applications metaWRAP metaWRAP rackham irma bianca snowy 1.3.2 MIT Misc applications openbabel openbabel rackham bianca miarka snowy 3.1.1-gcc9.3.0, 3.1.1-gcc12.3.0 GPL v2 Misc applications OpenBUGS OpenBUGS rackham irma bianca snowy 3.2.3 GPL v2 Misc applications p7zip p7zip bianca irma milou rackham snowy 16.02 LGPLv2 Misc applications pandoc pandoc rackham irma bianca snowy 1.16.0.2, 2.2.3.2, 2.10.1 GPL 2 Misc applications PostgreSQL PostgreSQL bianca irma milou rackham snowy 10.3 PostgreSQL Licence Misc applications povray povray bianca irma rackham 3.7 AGPL v3 Misc applications ROOT ROOT rackham irma bianca snowy 6.04.08, 6.06.08, 6.20.04 LGPL 2.1 Misc applications SAIGE SAIGE rackham irma bianca snowy 0.42.1 GPL v3 Misc applications SCons None None 2.5.0, .scons None Misc applications sqlite sqlite rackham bianca miarka snowy 3.8.5, 3.11.1, 3.16.2, 3.24.0, 3.34.0, 3.45.0 public domain Misc applications swig swig rackham bianca miarka snowy 3.0.7, 3.0.12, 4.1.1 GPL with other advice Misc applications texinfo texinfo rackham bianca miarka snowy 6.0, 6.5, 6.6, 6.8, 7.1 GPL v3+ Misc applications texlive texlive rackham bianca miarka snowy 2015, 2016, 2018, 2019, 2021, 2023-08-14, 2024-04-24 None Misc applications tinyutils None None 1.1, 1.2, 1.3, 1.4, .tinyutils-1.1 None Misc applications tmux tmux rackham bianca miarka snowy 2.5, 3.1b, 3.3a Custom open-source AS IS Misc applications vim None rackham irma bianca snowy 8.0-1360, 8.1-1053, 8.2.3701 VIM License Misc applications xz xz rackham bianca miarka snowy 5.2.2, 5.2.6, 5.4.5 Mixed open-source Parallel gnuparallel gnuparallel rackham bianca miarka snowy 20140222, 20150522, 20170122, 20180822, 20230422 GPL v3+ Phylogeny Dsuite Dsuite rackham bianca miarka snowy 0.5-r57 open-source Phylogeny FastME FastME rackham bianca miarka snowy 2.1.6.2, 2.1.6.4 GPL Phylogeny FastTree FastTree rackham bianca miarka snowy 2.1.11 GPL v2+ Phylogeny G-Nomix G-Nomix rackham bianca miarka snowy 2022-09-18-de952a2 free for academic use Phylogeny HyPhy HyPhy rackham bianca miarka snowy 2.5.51-mpi None Phylogeny iqtree iqtree rackham bianca miarka snowy 2.2.2.6-omp-mpi GPL v2 Phylogeny KIN KIN rackham bianca miarka snowy 3.1.3, 3.1.3-20230612-76dc469 GPL v3 Phylogeny MetaPhlAn4 MetaPhlAn4 rackham bianca miarka snowy 4.0 MIT license Phylogeny paml paml rackham bianca miarka snowy 4.10.7 other Phylogeny pathPhynder None rackham irma bianca snowy 1.a-20221011-a407a97, 2020-12-19-b8532c0 MIT Phylogeny phylip phylip rackham bianca miarka snowy 3.697 None Phylogeny PhyloPhlAn PhyloPhlAn rackham bianca miarka snowy 3.0.3 MIT license Phylogeny phyx phyx rackham bianca miarka snowy 1.3 GPL-3.0 Phylogeny read2tree read2tree rackham bianca miarka snowy 0.1.5-20240117-ff2d167 MIT Phylogeny snphylo snphylo rackham bianca miarka snowy 20180901 None Pipelines 3D-DNA 3D-DNA rackham irma bianca miarka snowy 20190801-529ccf4 MIT License Pipelines biomodal biomodal rackham bianca miarka snowy 1.0.2 commercial Pipelines cactus_atac cactus_atac rackham bianca miarka snowy 1.0.0 MIT Pipelines cutadapt cutadapt rackham bianca miarka snowy 4.5, 4.8 MIT Pipelines GenoPredPipe GenoPredPipe rackham bianca miarka snowy 20211104-02777ce, 20221121-e3caf6b GNU GPL v3 Pipelines happy happy rackham bianca miarka snowy 1.0.0 MIT Pipelines Juicer Juicer rackham bianca miarka snowy 1.6, 2.0 MIT License Pipelines Juicer_tools Juicer_tools rackham irma bianca miarka snowy 1.6, 1.22.01 MIT License Pipelines MPRASuite MPRASuite rackham bianca miarka snowy 1.0.3 MIT Pipelines nextNEOpi nextNEOpi rackham bianca miarka snowy 1.4.0 https://github.com/icbi-lab/nextNEOpi/blob/master/LICENSE Pipelines nf-core nf-core rackham bianca miarka snowy 2.4.1, 2.6, latest MIT License Pipelines Panaroo Panaroo rackham bianca miarka snowy 1.2.10, 1.3.2 MIT Pipelines ParseBiosciences-Pipeline ParseBiosciences-Pipeline rackham bianca miarka snowy 1.4.0 Custom Pipelines qiime2 qiime2 rackham bianca miarka snowy 2024.1, 2024.2 BSD 3-Clause Pipelines SALSA SALSA rackham irma bianca miarka snowy 20220408-ed76685 MIT License Pipelines Seurat Seurat rackham bianca miarka snowy 5.0.2, 5.1.0 MIT Pipelines SMRT SMRT rackham bianca miarka snowy 13.0.0.207600 None Pipelines snakemake snakemake rackham bianca miarka snowy 5.32.2, 8.20.1 MIT License Pipelines star-fusion star-fusion rackham bianca miarka snowy 1.9.1, 1.10.1 BSD 3-clause Pipelines ViWrap ViWrap rackham bianca miarka snowy 1.3.0 GPL v3 Statistics nonmem None rackham irma bianca snowy 7.3.0, 7.4.3, 7.4.4, 7.5.0 Commercial Statistics PsN PsN rackham irma bianca snowy 5.0.0 GPL 2+ Statistics R None None 2.8, 2.8.1, 2.10, 2.10.1, 2.11.1, 2.12.1, 2.12.2, 2.13.0, 2.14.0, 2.15.0, 2.15.1, 2.15.2, 3.0.1, 3.0.2, 3.1.0, 3.2.2, 3.2.3, 3.3.0, 3.3.1, 3.3.2, 3.3.2_rackham, 3.4.0, 3.4.3, 3.5.0, 3.5.2, 3.6.0, 3.6.1, 4.0.0, 4.0.4, 4.1.1, 4.2.1, 4.3.1, 4.3.2, 4.4.1, .R, R.tcl None Statistics R_packages R_packages rackham bianca miarka snowy 3.3.0, 3.3.1, 3.3.2, 3.4.0, 3.4.3, 3.5.0, 3.5.2, 3.6.0, 3.6.1, 4.0.0, 4.0.4, 4.1.1, 4.3.1 Various Statistics RStudio RStudio rackham bianca miarka snowy 1.0.136, 1.0.143, 1.0.153, 1.1.423, 1.1.463, 1.4.1106, 2022.02.0-443, 2022.02.3-492, 2022.07.1-554, 2023.06.0-421, 2023.06.2-561, 2023.12.1-402, 2024.04.2-764 Custom Sw_collections bbmap bbmap rackham bianca miarka snowy 39.06, 39.08 Public Domain Sw_collections BEDOPS BEDOPS rackham bianca miarka snowy 2.4.41 GPL v2 Sw_collections BEDTools BEDTools rackham bianca miarka snowy 2.20.1, 2.31.1 MIT Sw_collections cellbender cellbender rackham bianca miarka snowy 0.3.0 BSD-3 Sw_collections Chromium-cellranger cellranger rackham bianca miarka snowy 4.0.0, 5.0.1, 6.0.2, 6.1.2, 7.0.0, 7.0.1, 7.1.0, 8.0.1 None Sw_collections Chromium-cellranger-ARC cellranger-ARC rackham bianca miarka snowy 1.0.0, 2.0.2 https://support.10xgenomics.com/docs/license Sw_collections DRAGEN DRAGEN rackham bianca miarka snowy 4.1.5 None Sw_collections GATK GATK rackham bianca miarka snowy 1.0.4105, 1.0.5365, 1.0.5909, 1.2.12, 1.4.5, 1.4.21, 1.5.11, 1.5.21, 2.1.13, 2.3.6, 2.5.2, 2.7.2, 2.8.1, 3.1.1, 3.2.0, 3.2.2, 3.3.0, 3.4-46, 3.4.0, 3.4.46, 4.0.8.0, 4.1.4.1, 4.3.0.0 None Sw_collections MEMEsuite MEMEsuite rackham bianca miarka snowy 4.11.1, 4.11.2_1, 5.1.1, 5.5.1 University of California open-source AS IS Sw_collections SpeedPPI SpeedPPI rackham bianca miarka snowy 2023.07.11-37d0a03 None Uncategorized alphafold alphafold rackham bianca miarka snowy 2.3.1 None Uncategorized alphafold_dataset alphafold_dataset rackham bianca miarka snowy 2.0.0, 2.1.1, 2.3.1 None Uncategorized Amber None snowy 16-GPU, 18-GPU None Uncategorized amber_previous None None amber.tcl, ambertools18_intel_python2 None Uncategorized AmpliconNoise None bianca miarka rackham snowy 1.27 None Uncategorized AnnotSV None bianca irma rackham snowy 1.1.1 None Uncategorized annovar_data None bianca miarka rackham snowy 2019.10.21, 2021.05.18 None Uncategorized Ansys Ansys rackham bianca miarka snowy 19.1, 19.5, 2020R1, 2023R2 commercial software Uncategorized apr None rackham irma bianca snowy 1.7.0 Apache License 2.0 Uncategorized apr-util None rackham irma bianca snowy 1.6.1 Apache License Version 2.0 Uncategorized archspec archspec rackham bianca miarka snowy 0.2.4 Apache 2.0, MIT Uncategorized aria2 aria2 rackham bianca miarka snowy 1.36 OpenSSL Uncategorized Armadillo None rackham irma bianca snowy 7.900.0, 9.700.2 Apache License V2.0 Uncategorized ascp ascp rackham bianca miarka snowy 4.2.0.183804, 4.4.3.891 None Uncategorized athena None bianca miarka rackham snowy 1.1 None Uncategorized BackSPIN None rackham irma bianca snowy 20171211_2fbcf5d BSD 2-Clause Uncategorized BAM-matcher None None 20160611, .bammatcher, .bammatcher~ None Uncategorized Bandage None milou 0.8.0 None Uncategorized bazel bazel rackham bianca miarka snowy 7.0.0-pre.20230917.3 None Uncategorized bcftools-MoCha None rackham irma bianca snowy 1.9-20191129, 1.11-20210315, 2019-11-29 None Uncategorized BclConverter None bianca irma milou rackham snowy 1.7.1 None Uncategorized BioBakery_data BioBakery_data rackham bianca miarka snowy 3.0-20210423, 3.1-20231102 None Uncategorized BisSNP None None 0.82.2, .bissnp None Uncategorized BLAKE2 BLAKE2 rackham bianca miarka snowy 20230212-ed1974e CC0-1.0 or OpenSSL or Apache Public Licence 2.0 Uncategorized Bracken None rackham irma bianca snowy 2.5 GPL v3 Uncategorized bubblewrap bubblewrap rackham irma bianca miarka snowy 0.6.2 LGPL v2+ Uncategorized BUSCO_data BUSCO_data rackham bianca miarka snowy latest None Uncategorized CAMMiQ None rackham irma bianca miarka snowy 20211015-6142150 MIT License Uncategorized CAP3 None milou 08-06-13, .CAP3, CAP3 None Uncategorized capnproto capnproto rackham bianca miarka snowy 1.0.2 MIT Uncategorized CAT None rackham irma bianca snowy 20190926-e25443 Apache License 2.0 Uncategorized cellranger-ARC-data None bianca irma rackham snowy 2020-A None Uncategorized cellranger-ATAC-data None bianca irma rackham snowy 1.2.0, 2.0.0 None Uncategorized cellranger-data cellranger-data rackham bianca miarka snowy 1.1.0, 1.2.0, 3.0.0, 2020-A, 2024-A None Uncategorized cellranger-DNA-data None bianca irma rackham snowy 1.0.0 None Uncategorized cellranger-VDJ-data cellranger-VDJ-data rackham bianca miarka snowy 4.0.0, 5.0.0, 7.1.0 None Uncategorized chain_files chain_files rackham bianca miarka snowy 20230825 Open access Uncategorized ChEMBL ChEMBL rackham bianca miarka snowy 22.1, 33 Creative Commons Attribution-ShareAlike 3.0 Unported license Uncategorized ChimeraSlayer None bianca miarka rackham snowy 20110519 None Uncategorized Chromium-cellranger-DNA None rackham irma bianca snowy 1.1.0 https://support.10xgenomics.com/docs/license Uncategorized Chromium-longranger None rackham irma bianca snowy 2.2.2 Misc non-commercial Uncategorized Chromium-spaceranger None rackham irma bianca snowy 1.0.0 None Uncategorized circlator None rackham irma bianca snowy 1.5.5 GNU General Public License v3.0 Uncategorized clapack clapack rackham bianca miarka snowy 3.2.1 Public domain Uncategorized clearcut None bianca irma rackham snowy 1.0.9 None Uncategorized ClonalFrameML None bianca irma milou rackham snowy 1.11-4f13f23 None Uncategorized conda None rackham irma bianca snowy latest \u00a9 Copyright 2017, Anaconda, Inc. Revision 6be5194b. Uncategorized coreutils None bianca irma milou rackham snowy 8.25, 8.27 None Uncategorized CPLEXOptimizationStudio None rackham irma bianca miarka snowy 12.9.0, 20.1 IBM Academic Uncategorized CROP None rackham irma bianca miarka snowy 1.33 Other-d Uncategorized CS-Rosetta None rackham irma bianca snowy 1.01_Rosetta_3.7 None Uncategorized CST_Studio CST_Studio rackham bianca miarka snowy 2023.0 None Uncategorized CTAT_RESOURCE_LIB None bianca miarka rackham snowy 2017-11, 2018-02, 2019-08 None Uncategorized cuda cuda rackham bianca miarka snowy 11.2.1, 11.7.0, 11.8.0, 12.0.0, 12.1.0, 12.1.1, 12.2.0, 12.2.2 EULA Uncategorized cuDNN cuDNN rackham bianca miarka snowy 8.1.0.77, 8.4.1.50, 8.6.0.163, 8.7.0.84, 8.8.0.121, 8.9.2.26 SLA Uncategorized CUT-RUNTools None rackham irma bianca miarka snowy 2 MIT Uncategorized cyrus-sasl None rackham irma bianca snowy 2.1.27 None Uncategorized darsync darsync rackham bianca miarka snowy 20240208-7ff09d9 None Uncategorized dbCAN dbCAN rackham bianca miarka snowy 11 None Uncategorized DBdeployer DBdeployer rackham irma bianca miarka snowy latest Apache-2 Uncategorized ddt-perf None milou rackham 7.0 None Uncategorized desmond desmond rackham irma bianca miarka snowy 2022-2 gpl Uncategorized Dfam Dfam rackham bianca miarka snowy 3.7, 3.8 Creative Commons Zero CC0 Uncategorized Dfam-TE-Tools None rackham irma bianca snowy 1.4 CC0 1.0 Universal Uncategorized DFTB+ None rackham snowy 19.1 GNU LGPL-3 Uncategorized dlib None bianca irma rackham snowy 19.15 None Uncategorized dssp None None 2.0.4 None Uncategorized edlib edlib rackham bianca miarka snowy 1.2.7 MIT Uncategorized Eigen None bianca irma rackham snowy 3.3.4 None Uncategorized eLSA None bianca irma milou rackham 20160907-febe2d7a57c8 None Uncategorized ESPResSo None rackham 4.1.4 GPLv3 Uncategorized estout None rackham irma bianca snowy 20200417-d392e71 MIT license Uncategorized fastq_screen_data fastq_screen_data rackham bianca miarka snowy 20220330 None Uncategorized freesurfer freesurfer rackham bianca miarka snowy 6.0.0, 7.4.1 Custom open-source Uncategorized funannotate_data funannotate_data rackham bianca miarka snowy 1.8.17 BSD-2 Uncategorized GAAS None rackham irma bianca miarka snowy 1.2.0 GPL v3 Uncategorized gamess None bianca irma rackham 20070324R1, 20070324R1-kalkyl, 20101001R1-kalkyl-intel-12.1-mvapich2, 20110818R1-kalkyl-intel12.0-mvapich2, 20170930 None Uncategorized gaussian None milou rackham snowy .g09, .gaussian, .gv, .gv~, g03, g03.d02, g03.e01, g09, g09.a02, g09.c01, g09.d01 None Uncategorized gaussview None None 5.0.8, .gv, .gv~ None Uncategorized gcc None None 4.2.3, 4.3.0, 4.3.2, 4.3.2-test, 4.3.2t, 4.4, 4.4.2, 4.4.3, 4.4.4, 4.5.0, 4.6.2, gcc, gcc4.3, gcc4.4, gcc4.5, gcc4.6.tcl None Uncategorized gdb None rackham irma bianca miarka snowy 11.2 Other-d Uncategorized gdc None None 2.064.2_4.8, .gdc None Uncategorized genomescope None rackham irma bianca snowy 1.0.0_d2aefdd Apache-2.0 Uncategorized genomescope2.0 None rackham irma bianca snowy 1.0.0_5034ed4 Apache-2.0 Uncategorized GetOrganelleDB GetOrganelleDB rackham bianca miarka snowy 0.0.1, 0.0.2 GPLv3 Uncategorized github-cli github-cli rackham bianca miarka snowy 2.63.2 MIT Uncategorized glib glib rackham irma bianca miarka snowy 2.72.1 LGPL 2.1+ Uncategorized GlimmerHMM None rackham irma bianca snowy 3.0.4 Open Source, licence file in cluded Uncategorized GMP GMP rackham bianca miarka snowy 6.3.0 LGPL v3; GPL v2 Uncategorized go go rackham bianca miarka snowy 1.11.5, 1.20.3 Go licence Uncategorized GoogleCloudSDK GoogleCloudSDK rackham bianca miarka snowy 447.0.0, 455.0.0 None Uncategorized grocsvs None rackham irma bianca snowy 0.2.5 MIT Uncategorized gromacs-plumed None rackham 2019.4.th None Uncategorized gromacs-saxs None rackham 2020.04-9edbdbc None Uncategorized gstreamer None milou 0.10.31 None Uncategorized GTDB GTDB rackham bianca miarka snowy R04-RS89, R202, R214.1, R220 None Uncategorized Gurobi Gurobi rackham bianca miarka snowy 9.5.1, 10.0.2, 11.0.3 Academic License Uncategorized gzip gzip rackham bianca miarka snowy 1.12, 1.13 GPL v3 Uncategorized HG002_Q100_T2T_assembly HG002_Q100_T2T_assembly rackham bianca miarka snowy 0.7 CC0 Uncategorized HHsuite None None 2.0.16, 2.0.16~ None Uncategorized HLA-LA None rackham irma bianca snowy 1.0.1-20201001-f636b62 None Uncategorized hyperqueue hyperqueue rackham 0.18.0 MIT license Uncategorized iGenomes None rackham irma bianca snowy latest None Uncategorized ImageJ ImageJ rackham bianca miarka snowy 1.52j, 1.54g None Uncategorized IMAP None rackham irma bianca snowy 1.0 None Uncategorized Immcantation None rackham irma bianca snowy 4.0.0 AGPL-3 Uncategorized imputor None bianca miarka rackham snowy 20180829 None Uncategorized InterOp None bianca miarka rackham snowy 1.1.4 None Uncategorized jamovi jamovi rackham bianca miarka snowy 2.3.21 Mixed Uncategorized jansson None rackham irma bianca snowy 2.12 MIT Uncategorized JasPer None bianca irma rackham snowy 2.0.14 None Uncategorized JsonCpp JsonCpp rackham bianca miarka snowy 1.9.5 Dual Public Domain/MIT Uncategorized kermit None rackham irma bianca snowy 1.0 MIT Uncategorized Kraken_data None bianca irma rackham snowy latest None Uncategorized KrakenUniq_data KrakenUniq_data rackham irma bianca miarka snowy latest GPLv3, MIT Uncategorized KyotoTycoon None bianca irma rackham snowy stable-20170410 None Uncategorized Label-Studio Label-Studio rackham bianca miarka snowy 1.10.1dev Apache-2.0 Uncategorized lammps None rackham 29Oct2020_gcc_openmpi, 30Jul2016_intel_mkl, 31Mar2017_gcc_openmpi None Uncategorized libarchive libarchive rackham bianca miarka snowy 3.6.2 New BSD Uncategorized libb2 libb2 rackham bianca miarka snowy 0.98.1 CC0-1.0 Uncategorized libdeflate libdeflate rackham bianca miarka snowy 1.19 MIT Uncategorized libicu None rackham irma bianca miarka snowy 5.2-4 MIT and UCD and public domain Uncategorized liblzma None milou 5.2.2 None Uncategorized librdf librdf rackham bianca miarka snowy 1.0.17 LGPL 2.1, GPL 2, Apache 2 Uncategorized librsvg None rackham irma bianca snowy 2.48.4 LGPL 2.1 Uncategorized libSBML libSBML rackham bianca miarka snowy 5.20.2 LGPL v2.1 Uncategorized libtiff libtiff rackham bianca miarka snowy 4.5.0 A IS Uncategorized libvips libvips rackham bianca miarka snowy 8.15.2 LGPL-2.1 Uncategorized lighter None bianca irma rackham snowy 1.1.1 None Uncategorized LLVM None rackham irma bianca miarka snowy 13.0.1 Various Uncategorized LoFreq None None 2.1.2, .lofreq None Uncategorized longranger-data None bianca irma rackham snowy 2.0.0, 2.1.0 None Uncategorized LRSDAY None rackham irma bianca snowy 1.5.0 MIT Uncategorized LTR_retriever None rackham irma bianca snowy 2.9.0 GPL v3 Uncategorized lz4 None rackham irma bianca snowy 1.9.2 BSD 2-Clause & GPLv2 Uncategorized lzo None rackham irma bianca snowy 2.1 GPL v2+ Uncategorized m4 None bianca irma milou rackham snowy 1.4.17 None Uncategorized mathematica None rackham snowy 11.2 None Uncategorized mauve None bianca miarka milou rackham snowy 2015-02-13 None Uncategorized mesos None rackham irma bianca snowy 1.9.0 Apache-2.0 Uncategorized metaMATE None rackham irma bianca miarka snowy 20220327-3cdacd7 Other-d Uncategorized metaSNV None rackham irma bianca miarka snowy 1.0.3, 2.0.1 misc free software Uncategorized miniconda2 None irma rackham snowy 4.5.4 None Uncategorized miniconda3 None irma rackham snowy 4.5.4 None Uncategorized miniforge miniforge rackham bianca miarka snowy latest None Uncategorized mlst None bianca miarka rackham snowy 2.12 None Uncategorized MMseqs2_data MMseqs2_data rackham bianca miarka snowy 20230125, 20230816, 20240202 None Uncategorized Moses Moses rackham bianca miarka snowy 4.0 LGPL v2.1 Uncategorized mp-tools mp-tools rackham bianca miarka snowy latest None Uncategorized MPFR MPFR rackham bianca miarka snowy 4.2.1 LGPL v3+ Uncategorized mpjexpress None milou .mpjexpress, v0.38 None Uncategorized MultiBUGS None rackham miarka bianca snowy 2.0 LGPL v3.0 Uncategorized MUMPS MUMPS rackham 5.5.0, 5.5.0-hybrid CeCILL-C license Uncategorized nanopolish None rackham irma bianca snowy 0.12.0 MIT Uncategorized ncbi_taxonomy None rackham irma bianca miarka snowy latest None Uncategorized ncftp None bianca irma rackham snowy 3.2.6 None Uncategorized ncview None rackham snowy 2.1.7, 2.1.7-intel-2019b GPL 3 license Uncategorized ngsF-HMM None milou 20160614 None Uncategorized NINJA None rackham irma bianca snowy 0.97-cluster_only MIT Uncategorized NMRPipe None rackham irma bianca snowy 10.4 https://www.ibbr.umd.edu/nmrpipe/terms.html Uncategorized nullarbor None bianca miarka rackham snowy 2.0.20180819 None Uncategorized octave None bianca irma rackham snowy 5.1.0 None Uncategorized opam opam rackham irma bianca miarka snowy 2.1.2 LGPL 2.1 with special modifications Uncategorized OpenFOAM None rackham irma bianca snowy 6, 7, v1912 GNU GPLv3 Uncategorized OpenJPEG OpenJPEG rackham bianca miarka snowy 2.3.0, 2.5.0 BSD 2-clause Uncategorized openslide openslide rackham bianca miarka snowy 4.0.0 LGPL Uncategorized ORCA None None 4.0.1.2, 4.2.1, 5.0.4, 6.0.0, 6.0.1, .4.0.1.2 None Uncategorized panther panther rackham bianca miarka snowy 14.1, 15.0, 17.0, 18.0 Custom Uncategorized paraview None None 5.6, 5.9.1, .paraview_noQT, .paraview_noQT~ None Uncategorized PCAP None milou PCAP None Uncategorized pcre2 pcre2 rackham bianca miarka snowy 10.42 PCRE2 Licence Uncategorized PGAP None rackham irma bianca miarka snowy 2022-02-10.build5872 Mixed Uncategorized PHP PHP rackham bianca miarka snowy 7.4.32 PHP License v.3.0.1 Uncategorized PhylogicNDT None rackham irma bianca miarka snowy 2020 None Uncategorized phylophlan None bianca irma milou rackham snowy 0.99 None Uncategorized pigz pigz rackham bianca miarka snowy 2.4, 2.8 Custom AS IS Uncategorized plumed None rackham irma bianca snowy 2.6.0, 2.7.1-gcc LGPL-3.0 Uncategorized pm-tools pm-tools rackham bianca miarka snowy latest None Uncategorized python_GIS_packages python_GIS_packages rackham bianca miarka snowy 3.10.8 Various Uncategorized python_ML_packages python_ML_packages snowy 3.9.5-cpu, 3.9.5-gpu, 3.11.8-cpu, 3.11.8-gpu Various Uncategorized PyTorch PyTorch rackham bianca miarka snowy 1.12.0-cpu BSD-modified Uncategorized qctool None rackham bianca irma snowy 2-5559450, 2-beta None Uncategorized QoRTs None rackham irma bianca snowy 1.3.6 MIT Uncategorized QuantumESPRESSO None rackham 6.7MaX-Release GNU GPLv2 Uncategorized rawcopy None rackham irma bianca snowy 1.1 None Uncategorized RDKit RDKit rackham irma bianca miarka snowy 20220301 BSD 3-Clause-d Uncategorized readline None rackham irma bianca miarka snowy 6.2-11 GPL v3 Uncategorized rfmix None bianca irma rackham snowy v2 None Uncategorized Rosetta None rackham irma bianca snowy 3.7, 2019.4 https://els.comotion.uw.edu/licenses/86 Uncategorized RSPt None rackham 2019-08-30, 2020-06-10 GPL2 license Uncategorized rspt None None 1951_intel_mkl, 2003_intel_mkl, 2018-03-31_intel_mkl, rspt.tcl None Uncategorized rust rust rackham bianca miarka snowy 1.43.1, 1.67.0, 1.77.0 Apache 2.0\\, MIT Uncategorized sbcl None None 1.0.12, .sbcl None Uncategorized scikit-learn None rackham irma bianca snowy 0.22.1 BSD 3 Uncategorized sda-cli sda-cli rackham bianca miarka snowy 0.1.0, 0.1.3 AGPL-3.0 Uncategorized SDSL None rackham irma bianca miarka snowy 2.1.1 GPLv3 Uncategorized Siesta None rackham irma bianca snowy 4.1-b4, 4.1-MaX-1.0 GNU GPLv3 Uncategorized siesta None None 3.2-pl4_intel_mkl, 4.1-b3_intel_mkl, siesta-2018.tcl, siesta.tcl None Uncategorized silva None bianca irma rackham snowy 128, 132, 138.1 None Uncategorized simpleitk None rackham irma bianca snowy 1.2.4 Apache 2.0 Uncategorized Singular None rackham irma bianca snowy 4.1.2 GNU GPL Uncategorized smoove None rackham irma bianca snowy 0.2.5 Apache 2.0 Uncategorized snp-sites None bianca irma rackham snowy 2.4.0 None Uncategorized snpEff_data snpEff_data rackham bianca miarka snowy 5.1, 5.2 MIT Uncategorized SourceTracker None bianca miarka rackham snowy 0.9.5 None Uncategorized spaceranger-data None bianca irma rackham snowy 1.0.0, 2020-A None Uncategorized spm None bianca irma rackham 12 None Uncategorized SPRKKR None rackham snowy 7.7.1, 7.7.3 GPL license Uncategorized SQuIRE None rackham irma bianca miarka snowy 0.9.9.9a-beta, 885bf4d-20190301 GPL v3 Uncategorized SRPRISM None rackham irma bianca miarka snowy 3.2.7 Public domain Uncategorized SuiteSparse SuiteSparse rackham bianca miarka snowy 5.8.1, 7.7.0 Multiple Uncategorized sumaclust None bianca irma rackham snowy 1.0.00 None Uncategorized sunstudio None None 12.1, 12.1u1, .sunstudio None Uncategorized SvABA None rackham irma bianca snowy 1.1.3-20201112-0f60e36 GPL v3 Uncategorized svviz None rackham irma bianca snowy 1.6.4 MIT Uncategorized swarm None bianca irma rackham snowy 1.2.19 None Uncategorized SymEngine None rackham irma bianca snowy 0.7.0 MIT Uncategorized tapestri None rackham irma bianca snowy 2.0.1 None Uncategorized tcl None bianca irma rackham snowy 8.6.8 None Uncategorized TelomereHunter None bianca irma rackham snowy 1.0.4 None Uncategorized tesseract None rackham irma bianca snowy 4.1.3 Apache-2.0 Uncategorized TEtranscripts None rackham irma bianca snowy 2.2.1 GPL3 Uncategorized totalview None None 8.8.0-2, 8.9.2-0, 8.9.2-2, 8.10.0-1, 8.15.0-15, totalview None Uncategorized turbo None None 6.00, 6.01, 6.1, 6.02, .turbo None Uncategorized turbomole None bianca irma milou rackham 7.3 None Uncategorized umap None bianca irma rackham snowy 0.3.7 None Uncategorized utf8proc None rackham irma bianca snowy 2.5.0 MIT Uncategorized Vampire Vampire rackham snowy 6.0 GPL Uncategorized vasp None None 4.6.34_intel_mkl, 4.6_intel_mkl, 5.2.8_intel_mkl, 5.2.11_intel_mkl, 5.2.12_intel_mkl, 5.2_intel_mkl, 5.3.3_intel_mkl, 5.4.1_intel_mkl, 5.4.1_openmpi_intelmkl, 5.4.4_intel_mkl, .vasp_acml, vasp, vasp.save, vasp.tcl, vasp.tcl_2013, vasp.tcl_2014, vasp.tcl_2017, vasp.tcl_2017_intelmpi, vasp.tcl_2017_openmpi, vasp_new None Uncategorized vasp_acml None None 4.6.28, 4.6.34, 4.6.36, .vasp_acml None Uncategorized vasp_goto None None 4.6.28, 4.6.34, 4.6.36, .vasp_goto None Uncategorized VMD VMD rackham bianca miarka snowy 1.9.4-alpha-57 University of Illinois Open Source License Uncategorized VSCodium VSCodium rackham bianca miarka snowy latest MIT Uncategorized vtk None None 6.1.0, .vtk None Uncategorized Whisper Whisper rackham bianca miarka snowy 0.5.1, 20240930 None Uncategorized Whisper-gui Whisper-gui rackham bianca miarka snowy 0.1, 0.2, 0.3, 0.3.1 None Uncategorized Whispercpp Whispercpp rackham bianca miarka snowy 1.7.3 None Uncategorized WhisperX WhisperX rackham bianca miarka snowy 3.1.1 None Uncategorized wine wine rackham bianca miarka snowy 7.0 None Uncategorized WPS None rackham snowy 4.1 None Uncategorized WPS-geog None rackham snowy 4 Custom open-source AS IS Uncategorized WRF None bianca irma rackham snowy 4.1.3, 4.1.3-dmpar None Uncategorized WRFg None rackham snowy 3.8.1 Custom AS-IS Uncategorized XCrySDen None rackham irma bianca snowy 1.5.60, 1.6.2 GNU GPLv2 Uncategorized zig zig rackham bianca miarka snowy 0.9.1, 0.11.0 MIT Uncategorized zsh None bianca irma rackham snowy 5.7.1 None Uncategorized zstd zstd rackham irma bianca miarka snowy 1.5.2 GPL v2"},{"location":"software/spack/","title":"Spack on UPPMAX","text":""},{"location":"software/spack/#introduction","title":"Introduction","text":"

Spack is a simple package management tool or installer that also installs dependencies automatically to the main software. Installing a new software version does not break existing installations, so many configurations can coexist on the same system.

It offers a simple spec syntax so that users can specify versions and configuration options concisely. Spack is also simple for package authors: package files are written in pure Python, and specs allow package authors to maintain a single file for many different builds of the same package.

Spack documentation

The UPPMAX staff has already other ways to install most software applications. Please use Spack only if other ways to install your tool is not possible or very difficult, e.g. requiring very many dependencies and it is not available through, e.g. EasyBuild (that the staff can manage centrally). One or the reasons is that SPACK produces very many small files and that having two parallel build systems centrally may make things a little complex.

This guide may change with time. Please come back and see updates.

This version assumes no available SPACK module, which may come in the near future. You have your own instance of Spack but can get a configuration file provided by UPPMAX.

"},{"location":"software/spack/#first-steps-installing-your-own-instance-of-spack","title":"First steps: Installing your own instance of SPACK","text":"

You may want to use your project folder if you want your colleagues to be able to run the application. Then change directory to a good place before installing Spack.

cd <good place>\n
"},{"location":"software/spack/#step-1-clone-spack","title":"Step 1: clone spack","text":"
module load git\ngit clone -c feature.manyFiles=true https://github.com/spack/spack.git \ncd spack\n

To get version v0.18:

git checkout releases/v0.18\n

Next, add Spack to your path. Spack has some nice command-line integration tools, so instead of simply appending to your PATH variable, source the Spack setup script.

source <root dir of spack>/spack/share/spack/setup-env.sh\n

Adding this line to your ~/.bashrc as well will activate the \"spack commands\" each time you start a new terminal session.

"},{"location":"software/spack/#orientation-of-the-spack-files","title":"Orientation of the SPACK files","text":"

The Spack oriented files are stored in two places:

  • Spack directory
    • the cloned git repository
    • directories (important in bold)
      • bin spack executables
      • etc configuration files
      • lib libraries
      • share documentation, scripts etc...
      • var other settings
      • opt produced after first installation, contains all packages (tools, dependencies and libraries)
        • tools are found in a tree: ...opt/spack/linux-<arch>/<compiler>/tool/
  • .spack
    • local config and packages files
    • directories (important in bold)
      • bootstrap
      • cache
      • reports
      • linux
        • \u200bcompilers.yaml
        • packages.yaml

The .yaml files in the .spack/linux directory contains information which tolls you want to include from the UPPMAX system.

  • The compilers.yaml file lists the compilers (intel or gcc) modules available to build your software tool.
  • The packages.yaml file lists tools available already as modules.

By default, these files are empty but you can copy working \"central\" files that can be extended for your needs. The content of the files can be larger than the needed packages/compilers, i.e. only the packages /dependencies needed for your installation will be \"taken\" from these files and the rest will be ignored. Therefore, the UPPMAX staff may update these central files once in a while.

"},{"location":"software/spack/#get-templates","title":"Get templates","text":"

Do the following to get these templates (be sure to not overwrite old versions of these .yaml files that you configured yourself and might need).

cp /sw/build/spack/0.17.1/src/spack/share/spack/templates/compilers.yaml ~/.spack/linux/\ncp /sw/build/spack/0.17.1/src/spack/share/spack/templates/packages.yaml ~/.spack/linux/\n
"},{"location":"software/spack/#install-your-program","title":"Install your program","text":"

Check available software applications via Spack:

spack list\nspack list <search string>\n

Check already installed software applications with spack

spack find\nspack find <search string>\n

Some installations won't need any compilers or \"large dependencies\". The installation is straightforward:

spack install <tool>\n

Example:

spack install zlib\n

In other cases, for larger applications tools that require larger dependencies (that we might already have as modules), watch the installation documentation to see what is needed. Any recommended compiler? You can also check with a \"dry run\" before installing, to see what Spack \"thinks\" its needs to install. Use the spec command:

spack spec -I <tool>\n

To check the presently, for Spack, available compilers, type:

spack compilers\n

If your desired compiler is not there you can add it by first loading the module and then integrate it into the compilers.yaml file with a spack command:

Example:

module load intel/20.4\nspack compiler add\n

You can check if the compiler was added, either in the .spack/linux/compilers.yaml file or directly by:

spack compilers\n

To install a tool with a certain compiler version, if there are several compilers added for Spack, use \"%\". For specific version of the software tool or package, use \"@\".

spack install <tool>%<compiler>@<compiler-version>\n

Example:

spack install zlib%gcc@5.3.0\n

Large application tools may take a couple of hours so might be good to run in an interactive session (4 cores, -n 4).

spack install -j 4 <tool>\n

Use dependencies already available from our environment module system) ('module load').

cat .spack/linux/packages.yaml\n

Fill it with text,defining the spack name and lmod module names (be careful with indentations) Then install you tool, as above. To install a specific version of a dependency with Spack, use the command \"^\":

spack install <tool>%<compiler>@<compiler-version>^<dependency>@<version>\n

Here is a summarizing table

Command Option @ Which version % which compiler ^ which dependency"},{"location":"software/spack/#use-your-tool","title":"Use your tool","text":"
$ spack load <tool>  \n# module load of the install dependencies will not be needed here, since their paths are integrated in spack\n$ <tool> [<arguments>]\n
"},{"location":"software/spack/#develop","title":"Develop","text":"

More to come... Meanwhile:

Developer guide

Developer workflows tutorial

The builds are by default located here: <spack-root>/opt/spack/linux-centos7-broadwell/<compiler-version>/

"},{"location":"software/spack/#packages-and-environments","title":"Packages and environments","text":"

More to come... Meanwhile:

Packaging guide

Environments guide

Environments tutorial

"},{"location":"software/spack/#garbage-collection","title":"Garbage collection","text":"

Installing and uninstalling software will in the end use up your disk space so it is good practice to do some garbage collection

spack gc\n
"},{"location":"software/squeue/","title":"squeue","text":"

The job scheduler consists of many programs to manage jobs. squeue is a tool to view information about the job queues.

","tags":["squeue"]},{"location":"software/squeue/#view-all-jobs","title":"View all jobs","text":"","tags":["squeue"]},{"location":"software/squeue/#view-all-jobs-in-the-bianca-or-rackham-queue","title":"View all jobs in the Bianca or Rackham queue","text":"

View all jobs in the Bianca or Rackham queue:

squeue\n
How does that look like?

Your output will be similar to this:

[sven@rackham1 ~]$ squeue | head -n 1; squeue | shuf | head\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n          49086999      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49086465      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49085829      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49086067      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49086600      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49087075      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49080199      node /proj/sn torsteng PD       0:00      1 (Priority)\n          49088741      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49086825      core sbatch_l matca755 PD       0:00      1 (Priority)\n          49087385      core sbatch_l matca755 PD       0:00      1 (Priority)\n
","tags":["squeue"]},{"location":"software/squeue/#view-all-jobs-in-snowy-queue","title":"View all jobs in Snowy queue","text":"

View all jobs in the Snowy queue:

squeue -M snowy\n
How does that look like?

Your output will be similar to this:

[sven@rackham1 ~]$ squeue -M snowy\nCLUSTER: snowy\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n           9642748      core blast2un qiuzh610 PD       0:00      1 (Nodes required for job are DOWN, DRAINED or reserved for jobs in higher priority partitions)\n           9642749      core blast2un qiuzh610 PD       0:00      1 (Priority)\n           9642750      core blast2un qiuzh610 PD       0:00      1 (Priority)\n           9642751      core blast2un qiuzh610 PD       0:00      1 (Priority)\n           9640955      core interact    teitu  R 1-00:09:18      1 s201\n           9642778      core snakejob yildirim  R       9:18      1 s25\n           9641765      core Ridge_al yildirim  R   17:28:32      1 s201\n           9642747      core blast2un qiuzh610  R      31:48      1 s33\n           6968659      core  bpe_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)\n           6968658      core  bpe_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)\n           6968656      core word_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)\n           6968644      core word_nmt  matsten RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)\n           9642777      node P20608_5    teitu PD       0:00      1 (Resources)\n           9642764      node     flye   octpa7  R    8:14:14      1 s9\n           9641505      node Fed_3_10  koussai  R   21:48:40      1 s73\n           9639430      node hmm_alig   ninaza  R 8-16:57:07      1 s149\n           9642775      node rhd0_st3    ariah  R      31:58      8 s[123-124,126-129,131,133]\n           9642763      node rhd1_st3    ariah  R   13:57:58      8 s[121,139,141,143-145,147-148]\n           9639541   veryfat interact  nikolay PD       0:00      1 (ReqNodeNotAvail, UnavailableNodes:s230)\n           9545835   veryfat     BAND    baldo PD       0:00      1 (AssocMaxCpuMinutesPerJobLimit)\n           9639540   veryfat interact  nikolay  R 7-21:34:31      1 s229\n
","tags":["squeue"]},{"location":"software/squeue/#view-your-jobs-in-the-queue","title":"View your jobs in the queue","text":"","tags":["squeue"]},{"location":"software/squeue/#view-your-jobs-in-the-bianca-or-rackham-queue","title":"View your jobs in the Bianca or Rackham queue","text":"

View your jobs in the in the Bianca or Rackham queue:

squeue --me\n
How does that look like?

Your output will be similar to this, when you have no jobs in the queue:

[sven@rackham1 ~]$ squeue -u $USER\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n

Or alternatively:

squeue -u $USER\n
","tags":["squeue"]},{"location":"software/squeue/#view-your-jobs-in-the-snowy-queue","title":"View your jobs in the Snowy queue","text":"

View your jobs in the in the Snowy queue:

squeue -M snowy --me\n
How does that look like?

Your output will be similar to this, when you have no jobs in the queue:

[sven@rackham1 ~]$ squeue -u $USER -M snowy\nCLUSTER: snowy\n             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)\n
","tags":["squeue"]},{"location":"software/ssh/","title":"ssh","text":"

From Wikipedia:

The Secure Shell Protocol (SSH) is a cryptographic network protocol for operating network services securely over an unsecured network.

At UPPMAX we allow users to login via SSH, using the program ssh.

  • to use graphical applications, use SSH X forwarding, i.e. ssh -X when logging in
  • to login via SSH, see how to create and use an SSH key for the different HPC clusters
","tags":["ssh","SSH"]},{"location":"software/ssh/#ssh-key-management","title":"SSH key management","text":"

For WSL2 under Windows10 or Windows11, here as a neat way to get persistent key-manager in WSL2 (credits: original source).

sudo apt-get install keychain\n

Replace XXXX with the output of hostname command on the command line.

/usr/bin/keychain -q --nogui $HOME/.ssh/id_ed25519_key\nsource $HOME/.keychain/XXXX-sh\n

Remove -q to get some information if you want

* keychain 2.8.5 ~ http://www.funtoo.org\n* Found existing ssh-agent: 4487\n* Known ssh key: /home/user/.ssh/id_ed25519_key\n

First time you login, you will be asked for the password and the key will be handled by the key-manager. Check with

ssh-add -l\n256 SHA256:wLJvQOM....   ....cTTtiU MyNewKey (ED25519)\n
","tags":["ssh","SSH"]},{"location":"software/ssh/#mobaxterm","title":"MobaXterm","text":"

In MobaXterm you can use the internal MobAgent or/and the Peagent from the PuTTy tools.

","tags":["ssh","SSH"]},{"location":"software/ssh/#optional-ssh-config","title":"OPTIONAL: SSH config","text":"

Example $HOME/.ssh/config file to make your work easier.

Host rackham\nUser username\nHostName rackham.uppmax.uu.se\nServerAliveInterval 240\nServerAliveCountMax 2\n\n# Default settings\n#=======================================\nHost *\nForwardAgent no\nForwardX11 yes\nForwardX11Trusted yes\nServerAliveInterval 120\n#=======================================\n

Now

# without config\nssh -X username@rackham.uppmax.uu.se\n# with config\nssh rackham\n\n# without config\nscp local_file username@rackham.uppmax.uu.se:remote_folder/\n# with config\nscp local_file rackham:remote_folder/\n\nrsync ...\nsftp ...\n
","tags":["ssh","SSH"]},{"location":"software/ssh/#links","title":"Links","text":"
  • SSH Tips by Pavlin Mitev
","tags":["ssh","SSH"]},{"location":"software/ssh_client/","title":"SSH client","text":"

An SSH client is a program that allows on to use SSH.

","tags":["ssh","SSH","client","clients","ssh client","SSH client","ssh clients","SSH clients"]},{"location":"software/ssh_client/#overview-of-ssh-clients","title":"Overview of SSH clients","text":"Operating system SSH Client Recommended? Allows graphics? [1] Description Linux ssh Yes Yes Start from a terminal MacOS ssh Yes Yes [2] Start from a terminal, needs install for graphics [2] Windows MobaXterm Yes Yes Easiest for Windows users [5] Windows PuTTY Neutral Yes [3] Needs install for graphics [3] Windows ssh Neutral Unknown Start from CMD, later Windows versions [4] Windows ssh Neutral Unknown Start from PowerShell [4]
  • [1] The technical question is 'Allows X forwarding', as this is the way graphical displays are allowed
  • [2] After installing XQuartz
  • [3] After installing Xming
  • [4] Untested
  • [5] MobaXterm has a built-in X server
","tags":["ssh","SSH","client","clients","ssh client","SSH client","ssh clients","SSH clients"]},{"location":"software/ssh_client/#using-ssh-with-different-terminals-that-do-not-allow-for-graphics","title":"Using ssh with different terminals that do not allow for graphics","text":"MacWindows
  • Start terminal (e.g. from Launchpad) or iTerm2 to run ssh
ssh [username]@rackham.uppmax.uu.se\n
  • where [username] is your UPPMAX username, for example ssh sven@rackham.uppmax.uu.se
  • iTerm2 goodies:

    • You can save hosts for later.
    • Drag and drop scp
  • Start a terminal (see below) to run ssh:
$ ssh [username]@rackham.uppmax.uu.se\n
  • where [username] is your UPPMAX username, for example ssh sven@rackham.uppmax.uu.se

  • The ssh (secure shell) client putty

    • You can save hosts for later.
    • No graphics.
  • Windows Powershell terminal can also work

    • Cannot save hosts
    • no graphics
    • PowerShell
  • Windows command prompt can also work

    • Cannot save hosts
    • no graphics
    • Command Prompt
  • Git bash

","tags":["ssh","SSH","client","clients","ssh client","SSH client","ssh clients","SSH clients"]},{"location":"software/ssh_client/#using-ssh-with-different-terminals-that-allow-for-graphics","title":"Using ssh with different terminals that allow for graphics","text":"MacWindows
  • Download XQuartz or other X11 server for Mac OS from https://www.xquartz.org/
How do I know XQuartz has been installed?

As far as we know: you cannot check this directly: you will have to find out by running an application of Rackham that uses this. See below :-)

  • Start terminal (e.g. from Launchpad) or iTerm2 to run ssh:
$ ssh -X [username]@rackham.uppmax.uu.se\n

where [username] is your UPPMAX username and -X enables X forwarding. For example, if your UPPMAX username is sven, this would be ssh -X sven@rackham.uppmax.uu.se

How do I know XQuartz has been installed?

See SSH X forwarding.

Spoiler: use xeyes

  • Download and install ONE of the X-servers below (to enable graphics)

    • GWSL (recommended because of hardware integration)
    • X-ming
    • VCXSRV
  • or...

  • Install a ssh (secure shell) program with built-in X11 and sftp file manager

    • MobaXterm
    • sftp frame makes it easy to move, upload and download files.
    • ... though downloading from remote host to local is usually easier.
    • tabs for several sessions
  • Start local terminal and an SSH session by:
$ ssh -X [username]@rackham.uppmax.uu.se\n

where [username] is your UPPMAX username and -X enables X forwarding. For example, if your UPPMAX username is sven, this would be ssh -X sven@rackham.uppmax.uu.se

  • Or even better, create and save a SSH session, as shown in image below.
    • This allows you to use MobaXterm as a file manager and to use the built-in graphical texteditor.
    • You can rename the session in the Bookmark settings tab.

","tags":["ssh","SSH","client","clients","ssh client","SSH client","ssh clients","SSH clients"]},{"location":"software/ssh_key_use/","title":"Create and use an SSH key pair","text":"

Here we show how to create and use an SSH key pair for use with our clusters:

  • Create and use an SSH key pair for Bianca
  • Create and use an SSH key pair for Dardel
  • Create and use an SSH key pair for Rackham
","tags":["ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair"]},{"location":"software/ssh_key_use_bianca/","title":"Create an SSH key pair for use with Bianca","text":"

This page describes how to create and use an SSH key for the Bianca cluster.

","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#procedure","title":"Procedure","text":"

This procedure will fail if:

  • You are outside of the university networks, see how to get inside the university networks. This video shows it will fail when being outside of the university networks
  • You use Ubuntu 24.04 Noble, as demonstrated by this video, where a password is still requested after doing this procedure on Rackham

Here is the procedure:

","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#1-create-an-ssh-key-pair","title":"1. Create an SSH key pair","text":"

On your local computer, create an SSH key pair with the following command:

Can I also do this from Rackham?

Yes.

In that case, read 'Rackham' instead of 'local computer'

ssh-keygen -a 100 -t ed25519 -f ~/.ssh/id_ed25519_uppmax_login -C \"My comment\"\n

Here is a description of the flags:

  • -a 100: 100 rounds of key derivations, making your key's password harder to brute-force, as is recommended here
  • -t ed25519: type of encryption scheme
  • -f ~/.ssh/id_ed25519_uppmax_login: specify filename, following the naming scheme as suggested here
  • -C \"My comment\": a comment that will be stored in the key, so you can find out what it was for
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#2-add-the-content-of-your-public-key-to-biancas-authorized-keys","title":"2. Add the content of your public key to Bianca's authorized keys","text":"

Add the content of the public key id_ed25519_uppmax_login.pub on your local computer to the Bianca's $HOME/.ssh/authorized_keys.

There are multiple ways to do so.

Can I use ssh-copy?

No.

You can not use ssh-copy.

One way is to, on your local computer, view the content of the file:

cat $HOME/.ssh/id_ed25519_uppmax_login.pub\n

Then copy that line to your clipboard.

How does that look like?
$ cat $HOME/.ssh/id_ed25519_uppmax_login.pub\nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFGXV8fRK+cazt8qHX+fGS+w6WPOuE82Q19A12345678 Sven's key to UPPMAX\n

On Bianca, to edit the authorized keys file, do:

nano $HOME/.ssh/authorized_keys\n

In nano, paste the line in your clipboard. Save the file and close nano.

The public key must be one line

The public key you've just copy-pasted must be one line. It must not be wrapped/split over multiple lines.

How can I check?

On Bianca, do:

cat .ssh/authorized_keys \n

You should find your public key there. It looks similar to this:

[sven@sens2017625-bianca ~]$ cat .ssh/authorized_keys \nssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFGXV8fRK+cazt8qHX+fGS+w6WPOuE82Q19A12345678 Sven's key to UPPMAX\n
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#3-set-the-right-permissions","title":"3. Set the right permissions","text":"

On Bianca, do:

chmod 700 .ssh/authorized_keys\nchmod 700 .ssh\nchmod 700 ~\n
How can I check?

You can check by doing the following and observing similar output:

ls -ld .ssh\n

Output should be:

drwx--S--- 2 sven sven 4096 Jan  8 10:26 .ssh\n

Second checkL

[richel@sens2017625-bianca ~]$ ls -ld .ssh/authorized_keys \n

Output should be similar to:

-rwx------ 1 sven sven 104 Jan  8 10:26 .ssh/authorized_keys\n

Third check:

ls -l .ssh\n

Output should be similar to:

total 1\n-rw-r----- 1 user user 743 May  7  2019 authorized_keys\n

or

total 1\n-rwx------ 1 sven sven 104 Jan  8 10:26 authorized_keys\n
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#4-log-in-to-bianca-via-the-console-using-an-ssh-key","title":"4. Log in to Bianca via the console using an SSH key","text":"

Log in to Bianca via the console using an SSH key, using ssh -A:

ssh -A [username]-[project]@bianca.uppmax.uu.se\n

For example:

ssh -A sven-sens12345@bianca.uppmax.uu.se\n

You will still get one login, which is the one that asks your UPPMAX password and 2FA.

If all worked, there will be no need anymore to again type the UPPMAX password.

","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#troubleshooting","title":"Troubleshooting","text":"

To debug, run SSH commands with the -vv flag.

How does that look like?
...\ndebug1: Requesting authentication agent forwarding.\ndebug2: channel 1: request auth-agent-req@openssh.com confirm 0\n...\n\ndebug1: client_input_channel_open: ctype auth-agent@openssh.com rchan 2 win 65536 max 16384\ndebug1: client_request_agent: bound agent to hostkey\ndebug2: fd 8 setting O_NONBLOCK\ndebug1: channel 2: new [authentication agent connection]\ndebug1: confirm auth-agent@openssh.com\nLast login: Tue Jul 11 18:44:21 2023 from 172.18.144.254\n _   _ ____  ____  __  __    _    __  __\n| | | |  _ \\|  _ \\|  \\/  |  / \\   \\ \\/ /   | System:    sens2017625-bianca\n| | | | |_) | |_) | |\\/| | / _ \\   \\  /    | User:      user\n| |_| |  __/|  __/| |  | |/ ___ \\  /  \\    |\n \\___/|_|   |_|   |_|  |_/_/   \\_\\/_/\\_\\   |\n\n  ###############################################################################\n
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#on-linux-it-still-asks-for-a-password","title":"On Linux, it still asks for a password","text":"

From this post and its answer:

On Bianca, do:

chmod 700 .ssh/authorized_keys \nchmod 700 .ssh\nchmod 700 ~\n

On your local computer, do:

chmod 700 .ssh/authorized_keys \nchmod 700 .ssh\nchmod 700 ~\n
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_bianca/#links","title":"Links","text":"
  • Notes from Pavlin Mitev
","tags":["Bianca","ssh","SSH","ssh key","SSH key","ssh keys","SSH keys","ssh key pair","SSH key pair","create"]},{"location":"software/ssh_key_use_dardel/","title":"Create and use an SSH key pair for Dardel","text":"

This page describes how to create and use an SSH key for the Dardel cluster.

This guide will show you:

  • 1. How to create SSH keys
  • 2. How to add an SSH key to the PDC Login Portal

This makes it possible for you to login to Dardel.

PDC has a more comprehensive guide on how to do this on various operating systems if you want a more in-depth guide.

Warning

  • To be able to transfer from Rackham you have to do the following steps on Rackham.
  • You can also do the steps for you local computer to be able to log in directly from your terminal and not via Rackham.
","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#1-how-to-create-ssh-keys","title":"1. How to create SSH keys","text":"

To create an SSH key, one needs to

  • start generating the key
  • specify the filename
  • specify the password
","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#11-start-generating-the-key","title":"1.1 Start generating the key","text":"
  • Add the content of your public key id_ed25519.pub. To create a SSH key, run the following command:
ssh-keygen -t ed25519\n

This will start the creating of a SSH key using the ed25519 algorithm.

","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#12-specify-where-to-save-the-file","title":"1.2 Specify where to save the file","text":"

The program will ask you where to save the file,

user@rackham ~ $ ssh-keygen -t ed25519\nGenerating public/private ed25519 key pair.\nEnter file in which to save the key (/home/user/.ssh/id_ed25519):\n

If you press enter it will save the new key using the suggested name, /home/user/.ssh/id_ed25519

If it asks you if you want to overwrite, you probably want to press n since you already have one created and might want to use that one instead. If you overwrite it you will lose access to wherever the old key file is used, so just run the ssh-keygen command above again and type in a new name for the file.

/home/user/.ssh/id_ed25519 already exists.\nOverwrite (y/n)?\n
","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#13-specify-the-password","title":"1.3 Specify the password","text":"

The next step is to add a password to your key file. This makes sure that even if someone manages to copy your key they will not be able to use it without the password you set here. Type in a password you will remember, press enter, type it in again and press enter.

Enter passphrase (empty for no passphrase):\nEnter same passphrase again:\n

The key will now be created and you can add it to the PDC Login Portal.

How does this look like?

This is output similar to what you will see:

Your identification has been saved in /home/user/.ssh/id_ed25519\nYour public key has been saved in /home/user/.ssh/id_ed25519.pub\nThe key fingerprint is:\nSHA256:g+rvY4HoDNlim+Bj43L3pxr56hrlwC4hzPa/yE/2YqE user@rackham\nThe keys randomart image is:\n+--[ED25519 256]--+\n|.o               |\n|o   .            |\n| . = .           |\n|    B ..         |\n| + * B..S        |\n|= + o =          |\n|*+.oo=..         |\n|+=oE+ B          |\n| o +*X o         |\n+----[SHA256]-----+\n
","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#2-how-to-add-an-ssh-key-to-the-pdc-login-portal","title":"2. How to add an SSH key to the PDC Login Portal","text":"

To add an SSH key to the PDC login portal, one needs to:

  • Open the PDC login portal
  • Start adding a new key
  • Actually adding that public key
  • Allow the key to be used from UPPMAX
","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#21-open-the-pdc-login-portal","title":"2.1. Open the PDC login portal","text":"

Go to the PDC Login Portal

How does that look like?

That will look like this:

Example PDC login portal without any SSH keys yet. We will need to add an SSH key that allows access from UPPMAX to PDC

","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#22-start-adding-a-new-key","title":"2.2. Start adding a new key","text":"

Click the Add new key link:

How does adding an SSH key pair look like?

That will look like this:

Example of the first step of adding an SSH key pair to the PDC portal. The 'SSH public key' is copy-pasted from cat ~/id_ed25519_pdc.pub on Rackham. The 'Key name' can be chosen freely. Note that this SSH key cannot be used yet for UPPMAX, as it only allows one IP address.

How does it look like when the key is added?

That will look like this:

Example PDC login portal with one key. Note that the second column only has one IP address and is still missing *.uppmax.uu.se.

","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#23-actually-adding-the-public-key","title":"2.3. Actually adding the public key","text":"

Here you can either upload the public part of the key file you created before, or you can enter the information manually.

Forgot where the key was?

Here is how to the display the SSH public key content at the default location:

cat ~/.ssh/id_ed25519.pub\n

Else, the SSH keys are where you created them in step 1.2 :-)

How does the content of a public SSH key look like?

When displaying the content of a public SSH key, it will show text like this:

ssh-ed25519 AAAA69Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se\n

Copy the content of the SSH public key. Paste it into the field SSH public key, make up a name for the key so you know which computer it is on and fill it into the field Key name.

","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_dardel/#24-allow-the-key-to-be-used-from-uppmax","title":"2.4. Allow the key to be used from UPPMAX","text":"

Once you have added you key you have to add UPPMAX as allowed to use the key. Click on Add address for it and add *.uppmax.uu.se.

Address specifies which IP address(es) are allowed to use this key and the field is prefilled with the IP of the computer you are on at the moment.

How does it look like to edit an SSH key so that can be used for UPPMAX?

That will look like this:

Example of the second step of adding an SSH key pair to the PDC portal. Here the custom address *.uppmax.uu.se is added, so that this SSH key can be used for UPPMAX.

How does it look like to have a key that can be used for UPPMAX?

That will look like this:

Example PDC login portal with one key. Note the *.uppmax.uu.se at the bottom of the second column.

","tags":["ssh","SSH","key","ssh key","SSH key","Dardel"]},{"location":"software/ssh_key_use_rackham/","title":"Create and use an SSH key pair for Rackham","text":"

This page describes how to create and use an SSH key for the Rackham cluster.

","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#procedure","title":"Procedure","text":"Prefer a video?
  • Create and use an SSH key pair for Rackham when outside of SUNET (fails!)
  • Create and use an SSH key pair for Rackham on Ubuntu 24.04 Noble (fails!)

This figure shows the procedure:

flowchart TD\n  subgraph ip_inside_sunet[IP inside SUNET]\n    create[1.Create an SSH key pair]\n    add[2.Add your keys to an SSH agent]\n    copy[3.Copy the public key to Rackham]\n  end\n  create --> add\n  add --> copy

This procedure will fail if:

  • You are outside of the university networks, see how to get inside the university networks. This video shows it will fail when being outside of the university networks
  • You use Ubuntu 24.04 Noble, as demonstrated by this video, where a password is still requested after doing this procedure
","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#1-create-an-ssh-key-pair","title":"1. Create an SSH key pair","text":"

Create an SSH key pair with the following command:

ssh-keygen -a 100 -t ed25519 -f ~/.ssh/id_ed25519_uppmax_login -C \"My comment\"\n
  • -a 100: 100 rounds of key derivations, making your key's password harder to brute-force, as is recommended here
  • -t ed25519: type of encryption scheme
  • -f ~/.ssh/id_ed25519_uppmax_login: specify filename, following the naming scheme as suggested here
  • -C \"My comment\": a comment that will be stored in the key, so you can find out what it was for
","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#2-add-your-keys-to-an-ssh-agent","title":"2. Add your keys to an SSH agent","text":"

Add your newly generated ed25519 key to an SSH agent:

ssh-add ~/.ssh/id_ed25519_uppmax_login\n
","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#3-copy-the-public-key-to-rackham","title":"3. Copy the public key to Rackham","text":"

Copy the public key to Rackham or other server.

ssh-copy-id -i .ssh/id_ed25519_uppmax_login.pub [username]@rackham.uppmax.uu.se\n
  • -i .ssh/id_ed25519_uppmax_login.pub: the identity file, the public key's filename
  • [username]@rackham.uppmax.uu.se: your UPPMAX username, for example sven@rackham.uppmax.uu.se

After this, you can login to Rackham without specifying a password.

","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#troubleshooting","title":"Troubleshooting","text":"","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_key_use_rackham/#on-linux-it-still-asks-for-a-password","title":"On Linux, it still asks for a password","text":"

From this post and its answer:

On Rackham, do:

chmod 700 .ssh/authorized_keys \nchmod 700 .ssh\nchmod 700 ~\n

On your local computer, do:

chmod 700 .ssh/authorized_keys \nchmod 700 .ssh\nchmod 700 ~\n
","tags":["ssh","SSH","key","ssh key","SSH key","Rackham"]},{"location":"software/ssh_x_forwarding/","title":"SSH X forwarding","text":"

SSH X forwarding (or simply 'X forwarding') allows one to use graphics when using an SSH client.

For example, this is how UPPMAX user sven would login to Rackham using ssh with X forwarding enabled:

ssh -X sven@rackham.uppmax.uu.se\n

It is the -X that allows ssh to show graphics.

What is X?

In this context, the X window system.

How can I verify I allow X forwarding?

Using xyes.

","tags":["ssh","SSH","console","terminal","x-forwarding"]},{"location":"software/ssh_x_forwarding/#uppmax-clusters-that-allow-ssh-with-x-forwarding","title":"UPPMAX clusters that allow SSH with X forwarding","text":"Cluster Allows SSH with X forwarding Bianca No Rackham Yes Snowy Yes","tags":["ssh","SSH","console","terminal","x-forwarding"]},{"location":"software/ssh_x_forwarding/#ssh-clients","title":"SSH clients","text":"

See SSH clients.

","tags":["ssh","SSH","console","terminal","x-forwarding"]},{"location":"software/ssh_x_forwarding/#difference-between-ssh-x-and-ssh-y","title":"Difference between ssh -X and ssh -Y","text":"

Adapted from this AskUbuntu answer:

If you need graphics, ssh -X is more secure. However, it may be too secure for your software to run. In that case, run ssh -Y.

flowchart TD\n  need_graphics[Need graphics?]\n  ssh[Using 'ssh' works]\n  try_ssh_x[Try to use 'ssh -X'. Does it work?]\n  ssh_x[Use 'ssh -X']\n  ssh_y[Use 'ssh -Y']\n\n  need_graphics --> |no| ssh\n  need_graphics --> |yes| try_ssh_x\n  try_ssh_x --> |yes| ssh_x\n  try_ssh_x --> |no| ssh_y

Flowchart to determine to use ssh or ssh -X or ssh -Y.

Using ssh -Y? Let us know!

If you -a user- use ssh -Y when ssh -X does not work, let us know (see the UPPMAX support page here). It helps us choose which option to show at these documentation websites.

","tags":["ssh","SSH","console","terminal","x-forwarding"]},{"location":"software/tabix/","title":"tabix","text":"

tabix is a tool.

"},{"location":"software/tabix/#finding-tabix","title":"Finding tabix","text":"

To find the versions of tabix installed, use:

module spider tabix\n
How does that look like?

The output may look like this:

[sven@rackham1 sven]$ module spider tabix\n\n----------------------------------------------------------------------------\n  tabix: tabix/0.2.6\n----------------------------------------------------------------------------\n\n     Other possible modules matches:\n        tabixpp\n\n    You will need to load all module(s) on any one of the lines below before the\n \"tabix/0.2.6\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n       tabix - use tabix 0.2.6\n\n       Version 0.2.6\n\n\n\n\n----------------------------------------------------------------------------\n  To find other possible module matches execute:\n
"},{"location":"software/tensorflow/","title":"TensorFlow","text":"

TensorFlow is a library for machine learning and artificial intelligence.

TensorFlow is available in multiple variants:

  • TensorFlow as a Python package for CPU: works on Rackham
  • TensorFlow as a Python package for GPU works on Bianca and Snowy
"},{"location":"software/tensorflow/#tensorflow-as-a-python-package-for-cpu","title":"TensorFlow as a Python package for CPU","text":"

TensorFlow as a Python package for CPU that works on Rackham.

It is part of the python_ML_packages/[version]-cpu modules, where [version] is a version, for example, python_ML_packages/3.11.8-cpu.

How to test TensorFlow as a Python package for CPU?

On Rackham, load the module to get access to the library:

module load python_ML_packages/3.11.8-cpu\n

Start Python:

python\n

In Python, type:

import tensorflow as tf\nprint(tf.test.is_gpu_available())\n

This should print:

False\n

The output is correct: this is the CPU version.

"},{"location":"software/tensorflow/#tensorflow-as-a-python-package-for-gpu","title":"TensorFlow as a Python package for GPU","text":"

TensorFlow as a Python package for GPU that works on Bianca and Snowy.

It is part of the python_ML_packages/[version]-gpu modules, where [version] is a version, for example, python_ML_packages/3.9.5-gpu

You can load this package on nodes without GPU but python will not find TensorFlow!

If you want to work interactively and test things, first allocate resources as seen below:

"},{"location":"software/tensorflow/#on-snowy","title":"On Snowy","text":"
interactive -A <proj> -n 2 -M snowy --gres=gpu:1  -t 1:00:01\n
"},{"location":"software/tensorflow/#on-bianca","title":"On Bianca","text":"
interactive -A <proj> -n 1 -C gpu --gres=gpu:1 -t 01:10:00\n
How to test TensorFlow as a Python package for GPU?

Load the module to get access to the library:

module load python_ML_packages/3.9.5-gpu\n

Start Python:

python\n

In Python, type:

import tensorflow as tf\nprint(tf.test.is_gpu_available())\n

This should print something like:

2024-03-15 14:13:02.038401: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /device:GPU:0 with 13614 MB memory:  -> device: 0, name: Tesla T4, pci bus id: 0000:08:00.0, compute capability: 7.5\nTrue\n

The output is correct: this is the GPU version.

"},{"location":"software/terminal/","title":"Terminal","text":"

A terminal.

A terminal is a program that allows you to run commands.

How to copy-paste to/from a terminal?

This depends on the terminal you use, however, this is the most common options:

Press CTRL + SHIFT + C for copt, CTRL + SHIFT + V for pasting.

What does all the stuff on the line I can type on mean?

The text at the start of the line you can type on, is called the command prompt.

What is the command prompt?

The command prompt indicates that the terminal is waiting for user input.

Here is an example prompt:

[sven@rackham2 my_folder]$ \n
  • [ and ]: indicates the beginning and end of information
  • sven: the username
  • @: at which cluster
  • rackham2: the remote node's name, in this case Rackham's second login node
  • my_folder: (part of) the path of the user, in this case, a folder called my_folder. The indication ~ means that the user in the home folder
  • $: indicate to be ready for user input

The node's name is useful to find out where you are:

Name Location rackham1 to rackham4 A Rackham login node r1 and higher A Rackham compute node node bianca A Bianca login node b1 and higher A Bianca compute node","tags":["terminal"]},{"location":"software/test/","title":"Test","text":""},{"location":"software/test/#level-2","title":"Level 2","text":""},{"location":"software/test/#level-3","title":"Level 3","text":""},{"location":"software/text_editors/","title":"Text editors","text":"

There are many editors that can be used on the UPPMAX clusters:

Editor type Features Simple terminal editors Used in terminal, easy to learn, limited features Advanced terminal editors Used in terminal, harder to learn, powerful features Simple graphical editors Graphical, needs X forwarding, easy to learn, limited features Advanced graphical editors Graphical, needs X forwarding, harder to learn, powerful features

Try them out and pick one favorite editor!

Tip

These commands are useful in the command line when something is stuck or a program is limiting you to do further work.

  • ctrl-C interrupts a program or a command that is \"stuck\"
  • ctrl-D quits some programs from the program environment in the terminal
  • ctrl-Z pauses a program, can be continued in background (bg) or foreground (fg)
","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#simple-terminal-editors","title":"Simple terminal editors","text":"
  • nano: used in terminal, easy to learn, limited features
","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#advanced-terminal-editors","title":"Advanced terminal editors","text":"

Warning

  • we suggest that you learn this tools before trying to work with them on UPPMAX
  • If you start one of these editors you may have difficulties to exit!
  • emacs
  • vim
","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#simple-graphical-editors","title":"Simple graphical editors","text":"

To use a graphical editors you will need to:

  • work on an UPPMAX cluster that allows SSH X forwarding
  • login with SSH X forwarding enabled

See the SSH X forwarding page how to do so.

And what about Bianca?

Bianca is an UPPMAX cluster that does not allow X forwarding.

See the 'How to login to Bianca' page here for more details.

","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#gedit","title":"gedit","text":"

See gedit

","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#advanced-graphical-editors","title":"Advanced graphical editors","text":"","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/text_editors/#gvim","title":"gvim","text":"
  • vim with a GUI, lots of features, very fast
","tags":["text","editor","editors","text editor","text editors"]},{"location":"software/thinlinc/","title":"ThinLinc","text":"

Remote desktop environment for Rackham, using the webbrowser login.

ThinLinc provides for a remote desktop environment for the UPPMAX clusters.

There are two ways of connecting to the clusters using ThinLinc, using a local ThinLinc client or login using a webbrowser. Here are the differences:

Parameter Local ThinLinc client Web browser login Bianca use Impossible Possible Rackham use Recommended Possible Install ThinLinc client Nothing [1] Simplicity Easy Trivial Performance Higher Lower Recommended for Most use cases Small tasks, when other approach fails
  • [1] You already have a webbrowser installed :-)

The first is by using the web client and connect from the browser. This can be useful for smaller tasks or if you are unable to install software on the computer you are currently using. Please see below for more information.

The second option is to download the ThinLinc client, which offers higher performance and is recommended for most users. The client can be downloaded from the official download page.

  • ThinLinc on Bianca
  • ThinLinc on Rackham
  • ThinLinc on Snowy: same as ThinLinc on Rackham
"},{"location":"software/thinlinc/#thinlinc-usage","title":"ThinLinc usage","text":""},{"location":"software/thinlinc/#how-do-i-copypaste-within-a-thinlinc-session","title":"How do I copy/paste within a ThinLinc session?\"","text":"
  • Windows/Mac: Right-click and choose, or
  • Windows:
    • paste: shift+insert
    • copy: ctrl+insert
"},{"location":"software/thinlinc/#how-do-i-copypaste-between-thinlinc-and-locally","title":"How do I copy/paste between ThinLinc and locally?","text":"

ThinLinc has a clipboard where one can shuttle text via copy-pasting inside/outside the ThinLinc remote desktop environment.

  • Copy in ThinLinc by the ThinLinc command (see above) and it ends up here in the ThinLinc clipboard

    • Mark and copy with Windows/Mac command
    • Paste locally with Windows/Mac command
  • Copy from locally

    • paste in the ThinLinc clipboard with Windows/Mac command
    • paste to ThinLinc place by the ThinLinc command (see above)
"},{"location":"software/thinlinc/#settings","title":"Settings","text":"

Under the \"Screen\" tab, you can set the starting size of the session and choose to enable/disable Full screen mode. Typically, users prefer to turn off full screen mode.

Normally you don't have to change anything else here, and we have also disabled all \"local devices\" (USB-sticks, sound and printers) on server side. So no point to fiddle with these specific options.

"},{"location":"software/thinlinc/#thinlinc-error-no-agent-server-available","title":"ThinLinc error: no agent server available","text":"

ThinLinc error: no agent server available

Try again :-)

"},{"location":"software/thinlinc_on_bianca/","title":"ThinLinc on Bianca","text":"

Bianca's remote desktop, using a webbrowser

ThinLinc provides for a remote desktop environment for the UPPMAX clusters. This page describes how to use ThinLinc on Bianca.

For Bianca, there is only one way to access Bianca's remote desktop using a website: see the UPPMAX page 'Login to the Bianca remote desktop environment website'.

"},{"location":"software/thinlinc_on_rackham/","title":"ThinLinc on Rackham","text":"

Rackham's remote desktop environment accessed via a webbrowser

ThinLinc provides for a remote desktop environment for the UPPMAX clusters. This page describes how to use ThinLinc on Rackham.

There are two ways of connecting to the clusters using ThinLinc: using a local ThinLinc client or login using a webbrowser. See ThinLinc for a comparison.

"},{"location":"software/thinlinc_on_rackham/#local-thinlinc-client","title":"Local ThinLinc client","text":"

Rackham's remote desktop environment accessed via a local ThinLinc client

See the UPPMAX page 'Login to the Rackham remote desktop environment using a local ThinLinc client'.

"},{"location":"software/thinlinc_on_rackham/#web-browser-login","title":"Web browser login","text":"

Rackham's remote desktop environment accessed via a webbrowser

See the UPPMAX page 'Login to the Rackham remote desktop environment website'.

"},{"location":"software/tkinter/","title":"Tkinter","text":"

Tkinter is a package built with (every!) Python executable.

"},{"location":"software/tkinter/#use-tkinter","title":"Use Tkinter","text":"

Load a Python module:

module load python/3.12.1\n

Start Python:

python\n

Import thkinter in Python:

import tkinter\n
"},{"location":"software/tkinter/#history","title":"History","text":"

In January 2024, there was a Tkinter UPPMAX ticket. and documentation how to load tkinter.

At that time, doing:

module load python/3.11.4\n

and then in Python:

import turtle\n

results in:

Traceback (most recent call last):\n  File \"<string>\", line 1, in <module>\n  File \"/sw/comp/python3/3.11.4/rackham/lib/python3.11/turtle.py\", line 107, in <module>\n    import tkinter as TK\n  File \"/sw/comp/python3/3.11.4/rackham/lib/python3.11/tkinter/__init__.py\", line 38, in <module>\n    import _tkinter # If this fails your Python may not be configured for Tk\n    ^^^^^^^^^^^^^^^\nModuleNotFoundError: No module named '_tkinter'\n

With the application experts, we found out that python version 3.11.4 did not have tkinter built in. That Python version was rebuilt. Now all that is needed is to load a Python version and do a regular pip install. That is, this solution should work:

"},{"location":"software/tkinter/#links","title":"Links","text":"
  • Wikipedia page on Tkinter
"},{"location":"software/tracer/","title":"Tracer","text":"

Tracer is a tool to analyse the results of a BEAST or BEAST2 run.

Tracer is not an UPPMAX module.

Instead, it needs to be download and run:

"},{"location":"software/tracer/#1-download","title":"1. Download","text":"

Pick a Tracer release, such as Tracer v1.7.2 and download the Linux/UNIX version.

How does that look like?

Here is how the release page of Tracer v1.7.2 looks like:

Tracer

Download the file Tracer_v1.7.2.tgz.

How to download from the command-line?

Use wget on the URL to download from, for example:

wget https://github.com/beast-dev/tracer/releases/download/v1.7.2/Tracer_v1.7.2.tgz\n
"},{"location":"software/tracer/#2-extract","title":"2. Extract","text":"

Extract the downloaded file.

How to do so, using the remote desktop environment?

Right-click the file and click 'Extract here'.

How to do so, using the console environment?

Use tar on the file to extract:

tar zxvf  Tracer_v1.7.2.tgz\n
"},{"location":"software/tracer/#3-run","title":"3. Run","text":"

Use java to run the Tracer jar file:

java -jar lib/tracer.jar\n
How does that look like?

Here is how Tracer looks like in a console environment:

Tracer in a console environment

For this to work, one needs to login using SSH with X forwarding enabled.

Spoiler: use ssh -X

"},{"location":"software/tracer/#links","title":"Links","text":"
  • Tracer GitHub repository
"},{"location":"software/transit_file_transfer_using_filezilla/","title":"File transfer to/from Transit using FileZilla","text":"

There are multiple ways to transfer files to/from Transit using a graphical tool

Here it is shown how to transfer files using a graphical tool called FileZilla.

What is Transit?

See the page about the UPPMAX Transit server.

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#procedure","title":"Procedure","text":"

FileZilla connected to Transit

Would you like a video?

If you like to see how to do file transfer from/to Transit using FileZilla, watch the video here

FileZilla is a secure file transfer tool that works under Linux, Mac and Windows.

To transfer files to/from Transit using FileZilla, do:

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#2-start-filezilla","title":"2. Start FileZilla","text":"","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#3-from-the-menu-select-file-site-manager","title":"3. From the menu, select 'File | Site manager'","text":"Where is that?

It is here:

The FileZilla 'File' menu contains the item 'Site manager'

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#4-click-new-site","title":"4. Click 'New site'","text":"Where is that?

It is here:

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#5-create-a-name-for-the-site","title":"5. Create a name for the site","text":"

Create a name for the site, e.g. Transit.

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#6-setup-the-site","title":"6. Setup the site","text":"

For that site, use all standards, except:

  • Set protocol to 'SFTP - SSH File Transfer Protocol'
  • Set host to transit.uppmax.uu.se
  • Set user to [username], e.g. sven
How does that look like?

It looks similar to this:

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#7-click-connect","title":"7. Click 'Connect'","text":"","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#8-you-will-be-asked-for-your-password","title":"8. You will be asked for your password","text":"

You will be asked for your password, hence type [your password], e.g. VerySecret. You can save the password.

How does that look like?

It looks similar to this:

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#9-transfer-files-between-local-and-transit","title":"9. Transfer files between local and Transit","text":"

Now you can transfer files between your local computer and Transit.

How does that look like?

It looks like this:

","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#where-do-my-files-end-up","title":"Where do my files end up?","text":"

They seem to end up in your Transit home folder.

Its location is at /home/[user_name], for example, at /home/sven.

However, this is not the case: upon closing FileZilla, the files you've uploaded are gone.

You do need to transfer these files to other HPC clusters before closing FileZilla. For detailed instructions, see the guides at the respective cluster, among others:

  • Bianca file transfer using Transit
  • Rackham file transfer using Transit
","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#extra-material","title":"Extra material","text":"","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_filezilla/#winscp","title":"WinSCP","text":"

WinSCP is a secure file transfer tool that works under Windows.

To transfer files to/from Transit using WinSCP, do:

  • Start WinSCP
  • Create a new site
  • For that site, use all standards, except:
    • Set file protocol to 'SFTP'
    • Set host name to transit.uppmax.uu.se
    • Set user name to [username], e.g. sven
","tags":["transfer","FileZilla","Transit"]},{"location":"software/transit_file_transfer_using_scp/","title":"Data transfer to/from Transit using SCP","text":"

Data transfer to/from Transit using SCP is one of the ways ways to transfer files to/from Transit.

What is Transit?

Transit is an UPPMAX service to send files around. It is not a file server.

See the page about Transit for more detailed information.

What are the other ways to transfer files from/to Transit?

Other ways to transfer data to/from Transit are described here

One cannot transfer files to/from Transit using SCP. SCP is an abbreviation of 'Secure copy protocol', however, it is not considered 'secure' anymore: instead it is considered an outdated protocol. The program scp allows you to transfer files to/from Transit using SCP, by coping them between your local computer and Transit.

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_scp/#how-to-transfer-files-between-a-local-computer-and-transit","title":"How to transfer files between a local computer and Transit","text":"

The process is:

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_scp/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_scp/#2-start-a-terminal-on-your-local-computer","title":"2. Start a terminal on your local computer","text":"

Start a terminal on your local computer

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_scp/#3a-using-scp-to-download-from-transit","title":"3a. Using scp to download from Transit","text":"

In the terminal, copy files using scp to download files from Transit:

scp [username]@transit.uppmax.uu.se:/home/[username]/[remote_filename] [local_folder]\n

where [remote_filename] is the path to a remote filename, [username] is your UPPMAX username, and [local_folder] is your local folder, for example:

scp sven@transit.uppmax.uu.se:/home/sven/my_remote_file.txt /home/sven\n

If asked, give your UPPMAX password.

You can get rid of this prompt if you have setup SSH keys

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_scp/#3b-using-scp-to-upload-to-transit","title":"3b. Using scp to upload to Transit","text":"

This is how you would copy a file from your local computer to Transit:

scp [local_filename] [username]@transit.uppmax.uu.se:/home/[username]\n

where [local_filename] is the path to a local filename, and [username] is your UPPMAX username, for example:

scp my_file.txt sven@transit.uppmax.uu.se:/home/sven\n

However, Transit is not a file server. The scp command will complete successfully, yet the file will not be found on Transit.

If asked, give your UPPMAX password. You can get rid of this prompt if you have setup SSH keys

","tags":["transfer","data transfer","file transfer","scp","SCP","Transit","transit"]},{"location":"software/transit_file_transfer_using_sftp/","title":"Data transfer to/from Transit using SFTP","text":"

Data transfer to/from Transit using SFTP is one of the ways ways to transfer files to/from Transit.

What is Transit?

See the page about the UPPMAX Transit server.

What are the other ways?

Other ways to transfer data to/from Transit are described here

One can transfer files to/from Transit using SFTP. SFTP is an abbreviation of 'SSH File Transfer Protocol', where 'SSH' is an abbreviation of 'Secure Shell protocol' The program sftp allows you to transfer files to/from Transit using SFTP.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#using-sftp","title":"Using SFTP","text":"

The procedure is described in the following steps.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#1-get-inside-sunet","title":"1. Get inside SUNET","text":"

Get inside SUNET.

Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#2-start-a-terminal-on-your-local-computer","title":"2. Start a terminal on your local computer","text":"

Start a terminal on your local computer.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#3-connect-sftp-to-transit","title":"3. Connect sftp to Transit","text":"

In the terminal, connect sftp to Transit by doing:

sftp [username]@transit.uppmax.uu.se\n

where [username] is your UPPMAX username, for example:

sftp sven@transit.uppmax.uu.se\n

If asked, give your UPPMAX password. You can get rid of this prompt if you have setup SSH keys.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#5-in-sftp-uploaddownload-files-tofrom-transit","title":"5. In sftp, upload/download files to/from Transit","text":"

In sftp, upload/download files to/from Transit.

For example, to upload a file to Transit:

put my_file.txt\n

Basic sftp command can be found here.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#where-do-my-files-end-up","title":"Where do my files end up?","text":"

They seem to end up in your Transit home folder.

Its location is at /home/[user_name], for example, at /home/sven.

However, this is not the case: upon closing sftp, the files you've uploaded are gone.

You do need to transfer these files to other HPC clusters before closing sftp. For detailed instructions, see the guides at the respective cluster, among others:

  • Rackham file transfer using SFTP
","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_sftp/#overview","title":"Overview","text":"
flowchart TD\n\n    %% Give a white background to all nodes, instead of a transparent one\n    classDef node fill:#fff,color:#000,stroke:#000\n\n    %% Graph nodes for files and calculations\n    classDef file_node fill:#fcf,color:#000,stroke:#f0f\n    classDef calculation_node fill:#ccf,color:#000,stroke:#00f\n\n    user(User)\n      user_local_files(Files on user computer):::file_node\n\n    subgraph sub_inside[SUNET]\n      subgraph sub_transit_shared_env[Transit]\n          login_node(login/calculation/interactive node):::calculation_node\n          files_in_transit_home(Files in Transit home folder):::file_node\n      end\n    end\n\n    %% Shared subgraph color scheme\n    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc\n    style sub_inside fill:#fcc,color:#000,stroke:#fcc\n    style sub_transit_shared_env fill:#ffc,color:#000,stroke:#ffc\n\n    user --> |logs in |login_node\n    user --> |uses| user_local_files\n\n    login_node --> |can use|files_in_transit_home\n    %% user_local_files <--> |graphical tool|files_in_transit_home\n    %% user_local_files <--> |SCP|files_in_transit_home\n    user_local_files <==> |SFTP|files_in_transit_home\n\n    %% Aligns nodes prettier\n    user_local_files ~~~ login_node

Overview of file transfer on Transit The purple nodes are about file transfer, the blue nodes are about 'doing other things'. The user can be either inside or outside SUNET.

","tags":["transfer","SFTP","sftp","Transit"]},{"location":"software/transit_file_transfer_using_winscp/","title":"File transfer to/from Transit using WinSCP","text":"

There are multiple ways to transfer files to/from Transit using a graphical tool

Here it is shown how to transfer files using a graphical tool called WinSCP.

What is Transit?

See the page about the UPPMAX Transit server.

What are the other ways?

Other ways to transfer data to/from Transit are described here

","tags":["transfer","data transfer","file transfer","Transit","transit","WinSCP"]},{"location":"software/transit_file_transfer_using_winscp/#procedure","title":"Procedure","text":"

WinSCP is a secure file transfer tool that works under Windows.

To transfer files to/from Transit using WinSCP, do:

  • Start WinSCP
  • Create a new site
  • For that site, use all standards, except:
    • Set file protocol to 'SFTP'
    • Set host name to transit.uppmax.uu.se
    • Set user name to [username], e.g. sven
","tags":["transfer","data transfer","file transfer","Transit","transit","WinSCP"]},{"location":"software/uquota/","title":"uquota","text":"

uquota is an UPPMAX tool to determine how much storage space is left in all projects.

See the help file:

uquota --help\n
How does that look like?

Your output will be similar to this:

[sven@rackham1 ~]$ uquota --help\nusage: uquota [-h] [-q] [-d] [-u USER] [-p PROJECTS_FILE] [--include-expired]\n              [--random-usage] [--only-expired] [--sort-by-col SORT_BY_COL]\n              [-s] [-f]\n\noptional arguments:\n  -h, --help            Ask for help\n  -q, --quiet           Quiet, abbreviated output\n  -d, --debug           Include debug output\n  -u USER, --user USER\n  -p PROJECTS_FILE, --projects-file PROJECTS_FILE\n  --include-expired     Include expired projects\n  --random-usage        removed option, don't use\n  --only-expired        Only show expired projects\n  --sort-by-col SORT_BY_COL\n                        Index (0-4) of column to sort by. Default is 0.\n  -s, --slow            Deprecated. Previously ran 'du' command\n  -f, --files           Reports on number of files. Only for home directories\n

Usage:

uquota\n
How does that look like?

Your output will be similar to this:

[sven@rackham3 ~]$ uquota\nYour project     Your File Area       Unit        Usage  Quota Limit  Over Quota\n---------------  -------------------  -------  --------  -----------  ----------\nhome             /home/sven           GiB          24.7           32\nhome             /home/sven           files       79180       300000\nnaiss2024-22-49  /proj/worldpeace     GiB           5.1          128\nnaiss2024-22-49  /proj/worldpeace     files       20276       100000\n

If you find out that your home folder is full, but do not know which folder takes up most space, use the command below to find it:

du --human --max-depth 1 .\n
How does that look like?

Your output will be similar to this:

[sven@rackham2 ~]$ du --human --max-depth 1 .\n28K ./bin\n52M ./.config\n8.0K ./glob\n1.5G ./users\n484K ./.ssh\n9.7M ./.lmod.d\n514M ./.gradle\n4.0K ./.oracle_jre_usage\n84K ./.pki\n3.2G ./.singularity\n4.0K ./.git-credential-cache\n8.0K ./.keras\n6.1G ./.cache\n344M ./R\n740K ./.local\n8.0K ./.nv\n32M ./.nextflow\n88K ./.r\n140K ./.dbus\n48K ./.subversion\n8.0K ./.gnupg\n480K ./.java\n8.0K ./.vscode-oss\n29M ./.mozilla\n41M ./private\n64K ./.ipython\n8.0K ./.rstudio-desktop\n4.0K ./.allinea\n8.8M ./.beast\n688K ./.gstreamer-0.10\n8.4G ./.apptainer\n4.0K ./my_best_folder\n3.7G ./GitHubs\n260K ./.kde\n24K ./.jupyter\n849M ./.conda\n4.7M ./lib\n176M ./.vscode-server\n16K ./.MathWorks\n8.2M ./.matlab\n25G .\n
"},{"location":"software/valgrind/","title":"Valgrind","text":"

There are multiple profilers available on UPPMAX. This page describes Valgrind.

Valgrind is a suite of simulation-based debugging and profiling tools for programs.

Valgrind contains several tools:

  • memcheck, for detecting memory-management problems in your program
  • cachegrind, for cache profiling
  • helgrind, finds data races in multithreaded programs
  • callgrind, a call graph profiler
  • drd, a thread error detector
  • massif, a heap profiler
  • ptrcheck, a pointer checking tool
  • lackey, a simple profiler and memory tracer

Valgrind works best with the GCC and Intel compilers.

There is a system valgrind-3.15.0 from 2020.

First load compiler:

module load gcc\n

or

module load intel\n

then you can use valgrind by:

valgrind [options] ./your-program [your programs options]\n
"},{"location":"software/valgrind/#how-to-use-valgrind-with-mpi-programs","title":"How to use valgrind with MPI programs","text":"

Load your compiler, openmpi and the valgrind module as before:

module load gcc/10.3.0 openmpi/3.1.6\n

or

module load intel/20.4 openmpi/3.1.6\n

As of now, Valgrind seems not compatible with openmpi/4.X.X.

Then run:

LD_PRELOAD=$VALGRIND_MPI_WRAPPER\nmpirun -np 2 valgrind ./your-program\n
"},{"location":"software/vartrix/","title":"VarTrix","text":"

VarTrix is 'a software tool for extracting single cell variant information from 10x Genomics single cell data' (as quoted from the VarTrix repository).

To use VarTrix on an UPPMAX cluster, do

module load bioinfo-tools\n

After this, search for the module of your favorite Vartrix version, using:

module spider vartrix\n
How does that look like?

The output will look similar to:

[sven@rackham3 vartrix]$ module spider vartrix\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n  vartrix: vartrix/1.1.22\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n\n    You will need to load all module(s) on any one of the lines below before the \"vartrix/1.1.22\" module is available to load.\n\n      bioinfo-tools\n\n    Help:\n      vartrix - use vartrix \n\n      Description\n\n      Single-Cell Genotyping Tool\n\n      Version 1.1.22\n\n      https://github.com/10XGenomics/vartrix\n\n      Usage:\n\n          Example:\n\n          vartrix --bam $VARTRIX_TEST/test_dna.bam \\\n                  --cell-barcodes $VARTRIX_TEST/dna_barcodes.tsv \\\n                  --fasta $VARTRIX_TEST/test_dna.fa  \\\n                  --vcf $VARTRIX_TEST/test_dna.vcf\n

Then load your favorite version, for example:

module load vartrix/1.1.22\n
"},{"location":"software/vartrix/#links","title":"Links","text":"
  • vartrix repository
"},{"location":"software/venv_on_rackham/","title":"venv on Rackham","text":"Want to see a video?

You can find the video 'How to use a Python venv on the Rackham UPPMAX cluster' here

"},{"location":"software/vim/","title":"vim","text":"

UPPMAX has multiple text editors available. This page describes the vim text editor.

vim is an advanced terminal editor that is fast fast and powerful, once you learn it.

Start vim on a terminal with:

vi\n

Then:

  • Insert mode: type like normal text editor. Press i to enter insert mode
  • Command mode: give commands to the editor. Press Escape to enter command mode)
  • Cheat sheet: https://coderwall.com/p/adv71w/basic-vim-commands-for-getting-started
"},{"location":"software/vscode/","title":"VSCode","text":"

Visual Studio Code ('VSCode') is an IDE that can be used for software development in many languages.

VSCode from a local computer working on Rackham.

If you can use VSCode, depends on the HPC cluster:

Cluster Works/fails Documentation page Bianca Fails [1] VSCode on Bianca Rackham Works VSCode on Rackham
  • [1] Use VSCodium on Bianca instead
"},{"location":"software/vscode_on_bianca/","title":"Using Visual Studio Code on Bianca","text":"

VSCode fails, use VSCodium instead

The approach below will fail (note that using VSCode on Rackham does work).

Instead, go to the page Using VSCodium on Bianca

","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#introduction","title":"Introduction","text":"

There are multiple IDEs on Bianca, among other VSCodium. Here we discuss that running VSCode on Bianca will fail.

Visual Studio Code ('VSCode') is an IDE that can be used for software development in many languages.

What is an IDE?

See the page on IDEs.

In this session, we show how to use VSCode on Bianca.

","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#procedure-to-start-vscode","title":"Procedure to start VSCode","text":"","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#1-install-vscode-on-your-local-computer","title":"1. Install VSCode on your local computer","text":"","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#2-start-vscode-on-your-local-computer","title":"2. Start VSCode on your local computer","text":"","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#3-in-vscode-install-the-vscode-remote-tunnels-plugin","title":"3. In VSCode, install the VSCode 'Remote Tunnels' plugin","text":"","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_bianca/#4-in-vscode-connect-to-bianca","title":"4. In VSCode, connect to Bianca","text":"

In VSCode, at the 'Remote Explorer' tab, click on 'SSH', then on 'New Remote'.

This is the step that fails

","tags":["VSCode","Bianca"]},{"location":"software/vscode_on_rackham/","title":"Connecting Visual Studio Code to Rackham","text":"

VSCode from a local computer working on Rackham.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#introduction","title":"Introduction","text":"

Visual Studio Code ('VSCode') is an IDE that can be used for software development in many languages.

What is an IDE?

See the page on IDEs.

In this session, we show how to connect VSCode on your local computer to work with your files on Rackham.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#procedure","title":"Procedure","text":"

Below is a step-by-step procedure to start VSCode.

Prefer a video?

See this YouTube video.

An older version of this procedure, where the 'Remote Tunnel' extension is used, can be seen in this YouTube video.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#1-install-vscode-on-your-local-computer","title":"1. Install VSCode on your local computer","text":"

Install VSCode on your local computer.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#2-start-vscode-on-your-local-computer","title":"2. Start VSCode on your local computer","text":"How does that look like?","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#3-in-vscode-install-the-vscode-remote-ssh-plugin","title":"3. In VSCode, install the VSCode 'Remote-SSH' plugin","text":"

In VSCode, install the VSCode 'Remote-SSH' plugin.

How does that look like?

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#4-in-the-remote-explorer-tab-at-ssh-click-the-plus","title":"4. In the 'Remote Explorer' tab, at SSH, click the plus","text":"

In VSCode, go to the 'Remote Explorer' tab. At the SSH section, click on the '+' (with tooltip 'New remote').

How does that look like?

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#5-give-the-ssh-command-to-connect-to-rackham","title":"5. Give the SSH command to connect to Rackham","text":"

In the main edit bar, give the SSH command to connect to Rackham, e.g. ssh sven@rackham.uppmax.uu.se

How does that look like?

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#6-pick-the-a-location-for-the-ssh-config-file","title":"6. Pick the a location for the SSH config file","text":"

In the dropdown menu, pick the a location for the SSH config file, e.g. the first, which is similar to /home/sven/.ssh/config.

How does that look like?

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#7-click-connect","title":"7. Click 'Connect'","text":"

In the bottom left of VSCode, click on the popup window 'Connect'.

How does that look like?

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#8-done","title":"8. Done","text":"

You are now connected: there is a new window with VSCode connected to Rackham.

How does that look like?

The window that is connected to a Rackham home folder:

Going to /proj/staff:

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#setting-up-vscode-for-rackham-and-snowy","title":"Setting up VSCode for Rackham and Snowy","text":"

Info

  • You can run VSCode on your local and still be able to work with modules loaded or environment created on Rackham.
  • Similarly it is possible to take advantage of Snowy GPUs meanwhile developing on your local computer.
","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#1-connect-your-local-vscode-to-vscode-server-running-on-rackham","title":"1. Connect your local VSCode to VSCode server running on Rackham","text":"

Perform steps mentioned under the section Procedure to start VSCode.

When you first establish the ssh connection to Rackham, your VSCode server directory .vscode-server will be created in your home folder /home/[username].

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#2-install-and-manage-extensions-on-remote-vscode-server","title":"2. Install and manage Extensions on remote VSCode server","text":"

By default all the VSCode extensions will get installed on your home folder /home/[username]. Due to less storage quota on home folder 32 GB, 300k files, can quickly fill up with extensions and other file operations. The default installation path for VSCode extensions can however be changed to your project folder which have way more storage space and file count capacity, 1TB, 1M files.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#21-manage-extensions","title":"2.1. Manage Extensions","text":"

Go to Command Palette Ctrl+Shift+P or F1. Search for Remote-SSH: Settings and then go to Remote.SSH: Server Install Path. Add Item as remote host rackham.uppmax.uu.se and Value as folder in which you want to install all your data and extensions /proj/uppmax202x-x-xx/nobackup (without a trailing slash /).

If you already had your vscode-server running and storing extensions in home directory. Make sure to kill the server by selecting Remote-SSH: KIll VS Code Server on Host on Command Palette and deleting the .vscode-server directory in your home folder.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#22-install-extensions","title":"2.2. Install Extensions","text":"

You can sync all your local VSCode extensions to the remote server after you are connected with VSCode server on Rackham by searching for Remote: Install Local Extensions in 'SSH: rackham.uppmax.uu.se' in Command Palette. You can alternatively, go to Extensions tab and select each individually.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#23-selecting-kernels","title":"2.3. Selecting Kernels","text":"

Request allocation in either Rackham or Snowy compute node depending on your need, for that use interactive slurm command. Load the correct module on Rackham/Snowy that you contains the interpret you want on your VSCode. For example in case you need ML packages and python interpreter, do module load python_ML_packages. Check the file path for python interpreter by checking which python and copy this path. Go to Command Palette Ctrl+Shift+P or F1 on your local VSCode. Search for \"interpreter\" for python, then paste the path of your interpreter/kernel.

venv or conda environments are also visible on VSCode when you select interpreter/kernel for python or jupyter server. For jupyter, you need to start the server first, check Point 3.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#3-working-with-jupyter-server-on-rackham-and-snowy","title":"3. Working with jupyter server on Rackham and snowy","text":"","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#rackham","title":"Rackham","text":"

Module load jupyter packages either from module load python or module load python_ML_packages as per your needs. For heavy compute and longer running jupyter server, allocate a Rackham compute node instead of using login node. Either request for rackham compute node by using, for example, interactive -A uppmax202x-x-xx -p node -N 1 -t 2:00:00 or move to the next step to run jupyter on login node itself. Start the jupyter server jupyter notebook --ip 0.0.0.0 --no-browser. Copy the jupyter server URL which goes something like http://r52.uppmax.uu.se:8888/tree?token=xxx, click on Select Kernel on VSCode and select Existing Jupyter Server. Past the URL here and confirm your choice.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscode_on_rackham/#snowy","title":"Snowy","text":"

Start an interactive session with GPU allocation on Snowy interactive -A uppmax202x-x-xx -p node -N 1 -t 02:00:00 --gres=gpu:1 -M snowy. Module load the jupyter packages module load python_ML_packages and start the jupyter server jupyter notebook --ip 0.0.0.0 --no-browser. This should start a jupyter server on Snowy compute node with one T4 GPU. Copy the URL of the running jupyter server which goes something like http://s193.uppmax.uu.se:8888/tree?token=xxx and paste it in the jupyter kernel path on your local VSCode. The application will automatically perform port forwarding to Rackham, which already is listening to Snowy compute nodes over certain ports.

","tags":["VSCode","Rackham","connect"]},{"location":"software/vscodium/","title":"VSCodium","text":"

VSCodium is the community edition of Visual Studio Code and is an IDE that can be used for software development in many languages.

VSCodium running on Bianca

If you can use VSCodium, depends on the HPC cluster:

Cluster Works/fails Documentation page Bianca Works VSCodium on Bianca Rackham Fails [1] VSCodium on Rackham
  • [1] Use VSCode on Rackham instead
","tags":["VSCodium"]},{"location":"software/vscodium_on_bianca/","title":"Using VSCodium on Bianca","text":"

VSCodium running on Bianca

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#introduction","title":"Introduction","text":"

There are multiple IDEs on Bianca, among other VSCodium. Here we discuss how to run VSCodium on Bianca.

VSCodium is the community edition of Visual Studio Code and can be used for software development in many languages.

What is an IDE?

See the page on IDEs.

In this session, we show how to use VSCodium on Bianca, using Bianca's remote desktop environment.

Forgot how to login to a remote desktop environment?

See the 'Logging in to Bianca' page.

As VSCodium is a resource-heavy program, it must be run on an interactive node.

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#procedure-to-start-vscodium","title":"Procedure to start VSCodium","text":"

Below is a step-by-step procedure to start RStudio. This procedure is also demonstrated in this YouTube video.

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#1-get-within-sunet","title":"1. Get within SUNET","text":"Forgot how to get within SUNET?

See the 'get inside the university networks' page here

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#2-start-the-bianca-remote-desktop-environment","title":"2. Start the Bianca remote desktop environment","text":"Forgot how to start Bianca's remote desktop environment?

See the 'Logging in to Bianca' page.

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#3-start-an-interactive-session","title":"3. Start an interactive session","text":"

Within the Bianca remote desktop environment, start a terminal. Within that terminal, start an interactive session with 1 core.

Forgot how to start an interactive node?

See the 'Starting an interactive node' page.

Spoiler: use:

interactive -A sens2023598 -n 1 -t 8:00:00\n
","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#4-load-the-modules-needed","title":"4. Load the modules needed","text":"

VSCodium needs the VSCodium/latest module.

In the terminal of the interactive session, do:

module load VSCodium/latest`\n
","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_bianca/#5-start-vscodium","title":"5. Start VSCodium","text":"

With the modules loaded, in that same terminal, start VSCodium:

code\n

VSCodium starts up quickly.

How does VSCodium look on Bianca?

","tags":["VSCodium","Bianca"]},{"location":"software/vscodium_on_rackham/","title":"Using VSCodium on Rackham","text":"

VSCodium on another cluster, as VSCodium on Rackham fails

VSCodium fails, use VSCode instead

The approach below will fail (note that using VSCodium on Bianca does work).

Instead, go to the page Using VSCode on Rackham

","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#introduction","title":"Introduction","text":"

VSCodium is the community edition of Visual Studio Code and can be used for software development in many languages.

What is an IDE?

See the page on IDEs.

In this session, we show how to use VSCodium on Rackham, using Rackham's remote desktop environment.

Forgot how to login to a remote desktop environment?

See the 'Logging in to Rackham' page.

As VSCodium is a resource-heavy program, it must be run on an interactive node.

","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#procedure-to-start-vscodium","title":"Procedure to start VSCodium","text":"","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#1-start-the-rackham-remote-desktop-environment","title":"1. Start the Rackham remote desktop environment","text":"Forgot how to start Rackham's remote desktop environment?

See the 'Logging in to Rackham' page.

","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#2-start-an-interactive-session","title":"2. Start an interactive session","text":"

Within the Rackham remote desktop environment, start a terminal. Within that terminal, start an interactive session with 1 core.

Forgot how to start an interactive node?

See the 'Starting an interactive node' page.

Spoiler: use:

interactive -A uppmax2023-2-25\n
","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#3-load-the-modules-needed","title":"3. Load the modules needed","text":"

VSCodium needs the VSCodium/latest module.

In the terminal of the interactive session, do:

module load VSCodium/latest`\n
","tags":["VSCodium","Rackham"]},{"location":"software/vscodium_on_rackham/#4-start-vscodium","title":"4. Start VSCodium","text":"

With the modules loaded, in that same terminal, start VSCodium:

code\n

VSCodium will give an error?

How does the VSCodium error look on Rackham?

","tags":["VSCodium","Rackham"]},{"location":"software/whisper/","title":"Whisper","text":"","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#introduction","title":"Introduction","text":"

This guide provides instructions for loading and using OpenAI's Whisper, an automatic speech recognition system. Whisper is available on Bianca. It can either be used through a User Interface or loaded as a Module.

AI tool caution

Like all other AI models, Whisper too hallucinates while transcribing or translating. ie, \"make-up\" words or even sentences, resulting in misinterpretation or misrepresentation of the speaker.

Quality of transcriptions/ translations and audio formats

Transcriptions (error rate):- Swedish: ~10% , English: ~5% , English with heavy accent: ~ 20%

Translations:- Any to English: \"DeepL\" level performance. Slightly better than google translate.

Supported file types: mp3, mp4, mpeg, mpga, m4a, wav, webm and wma.

Quality as a factor of duration of recordings:- A few minutes: Excellent A few minutes to an hour: Excellent at the beginning, then detoriates. An hour or more: Excellent at the beginning, then detoriates.

Quality as a factor of noise and count of speakers:- 2 speakers: Excellent Background noise: Good 2+ speakers: Very Good Conversational overlap: Average. Difficulty disambiguating speakers. Long silences: Good. Might repeat sentences and get stuck in loop.

Whisper also tries to give separate sentences for different speakers. But it is not guaranteed.

Recordings from Dictaphone

If you record using dictaphone such as Olympus DS-9000, it would by default record in .DS or .DS2 file formats which are NOT supported by Whisper. Make sure to change the settings on the dictaphone to .mp3 format before you start recording. Follow this guide to convert your DS or .DS2 recording to .mp3 using the software that comes with your dictaphone. Else, you can also download the sofware from here and then follow the same guide.

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#glossary","title":"Glossary","text":"

SUPR account : Gives access to project management account for submitting project proposals on SUPR. UPPMAX account : Gives access to UPPMAX servers, like Bianca. GUI : Graphical User Interface for taking transcription/translation inputs. WinSCP / FileZilla: user interface to send data from your computer to Bianca and vice-versa. Terminal : Black text-based environment that is used for performing jobs. Wharf: private folder in Bianca that is used to transfer data to and from your computer. Proj: project folder in Bianca that is shared among all project members. Job: A request for transcribing/translating one or many recordings. Slurm: \"job\" handler.

Checklist for new project

  • SUPR account
  • Submit project proposal
  • UPPMAX username and password
  • UPPMAX two factor authentication.

Checklist for existing project

  • SUPR account
  • Submit project proposal
  • UPPMAX username and password
  • UPPMAX two factor authentication.

|

|

|

|

|

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#accessing-your-project","title":"Accessing your project","text":"

Following steps are derived from UPPMAX User Accounts:

  1. Register an account on SUPR.

  2. Apply for a project for sensitive data at Bianca.

  3. Give adequate information while creating your proposal by following this template.

  4. Register an account for UPPMAX at SUPR by clicking \"Request Account at UPPMAX\" button. You will receive an UPPMAX username and password via email.

  5. Setup two factor authentication for this newly created UPPMAX account.

  6. Check access to your project on Bianca.

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#whisper-app","title":"Whisper App","text":"","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#step-1-data-transfer-from-local-computer-to-bianca","title":"Step 1: Data transfer from local computer to Bianca","text":"
  1. Transfer your data from your local computer to Wharf using WinSCP client (for Windows only) or FileZilla client (Mac, Windows or Linux). Instruction on how to do it is in their respective links.
","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#step-2-transcribingtranslating","title":"Step 2: Transcribing/Translating","text":"
  1. Login to Bianca. It requires your UPPMAX username (visible in SUPR), project name and two factor authentication code. Make sure you are inside SUNET for the link to work.

  2. Click on the Terminal icon on the bottom of the Desktop and enter the following command in it to load Whisper GUI.

    module load Whisper-gui\n

  3. You shall now see proj and wharf folders on your Desktop along with a Whisper application icon. wharf contains the data that was transferred in Step 1. (Next time you start transcribing/translating by logging in again to Bianca, you can start from this step and skip the previous one, since wharf and proj folder are already created.)

  4. Open wharf and proj folder. Select all the data that you transferred in wharf, drag and drop it into the proj folder. NOTE: if you drag and drop, it will cut-paste your data instead of copy-paste. Do not keep files in wharf for a long period, as this folder is connected to the outside world and hence is a security risk. proj, on the other hand, is safe to keep data in as it is cut-off from the internet, so move your data there.

  5. Click on Whisper application on Desktop. It would look like this:

  6. Select appropriate options, or use the following for the best results:

    Total audio length in hours: [give a rough average if transcribing files in bulk, rounding up to nearest hour] Model: large-v2 Language used in recordings (leave blank for autodetection): If your language of choice is unavailable, check the \"Languages available\" list for its availability and contact support. Initial Prompt: [leave blank]

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#step-3-monitoring-jobs","title":"Step 3: Monitoring jobs","text":"
  1. Your job will first wait in a queue and then start executing. To first check if your job is waiting in the queue, type squeue --me -o \"%.30j\" on terminal. If you see your job name Whisper_xxx it means it is in the queue, where xxx is the date and time of job submission, example: Whisper_2024-10-25_11-10-30.

  2. To check if your job has started executing, locate a file named [Whisper_xxx_yyy].out that will get created in Whisper_logs folder inside proj folder, where xxx is date and time of job submission and yyy is your username followed by a \"job id\", example: Whisper_2024-10-25_11-10-30_jayan_234.out. This contains a progress bar for each recording that you sent for transcribing/translating.

  3. If neither job name Whisper_xxx was found in queue, nor a [Whisper_xxx_yyy].out was created in Whisper_logs, contact support.

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#step-4-data-transfer-from-project-to-local-computer","title":"Step 4: Data transfer from project to local computer","text":"
  1. Drag and drop your transcriptions/translations from proj folder to wharf.

  2. Use WinSCP/FileZilla like you did in Step 1 and transfer your data from wharf to your local computer.

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#output-files","title":"Output files","text":"

By default you receive 5 types of output files for each file you transcribe/translate: With timestamps: .srt, .vtt, .tsv Without timestamps: .txt With detailed model metadata: .json. The most popular ones are .srt and .txt formats.

On Mac, .srt and .vtt can be opened in Word by: Tap with two fingers. Select Encoding as \"Unicode (UTF-8)\". Change the name of the file like some_name.docx and change type of file to .docx. Open the file and then Save As a new file.

Advance settings

Use below features only if transcriptions/translations are not satisfactory and for less spoken languages or languages that are not having good resources online for understanding :

  1. When asked for Initial Prompt, provide a list of comma separated words or sentences (less than 80 words) that describe what the recording is about or the words used by the speaker in the recording. It should be in written in same language as the language in spoken in the recordings.

  2. Try switching to Model: large-v3.

  3. Use combination of both 1 and 2.

  4. If you are sure about the language used in the recording, use the dropdown menu and select the appropriate language.

Languages available

Following languages are available for transcribing. If your language of choice does not appear in Whisper application but is listed here, contact support:

en: \"english\", zh: \"chinese\", de: \"german\", es: \"spanish\", ru: \"russian\", ko: \"korean\", fr: \"french\", ja: \"japanese\", pt: \"portuguese\", tr: \"turkish\", pl: \"polish\", ca: \"catalan\", nl: \"dutch\", ar: \"arabic\", sv: \"swedish\", it: \"italian\", id: \"indonesian\", hi: \"hindi\", fi: \"finnish\", vi: \"vietnamese\", he: \"hebrew\", uk: \"ukrainian\", el: \"greek\", ms: \"malay\", cs: \"czech\", ro: \"romanian\", da: \"danish\", hu: \"hungarian\", ta: \"tamil\", no: \"norwegian\", th: \"thai\", ur: \"urdu\", hr: \"croatian\", bg: \"bulgarian\", lt: \"lithuanian\", la: \"latin\", mi: \"maori\", ml: \"malayalam\", cy: \"welsh\", sk: \"slovak\", te: \"telugu\", fa: \"persian\", lv: \"latvian\", bn: \"bengali\", sr: \"serbian\", az: \"azerbaijani\", sl: \"slovenian\", kn: \"kannada\", et: \"estonian\", mk: \"macedonian\", br: \"breton\", eu: \"basque\", is: \"icelandic\", hy: \"armenian\", ne: \"nepali\", mn: \"mongolian\", bs: \"bosnian\", kk: \"kazakh\", sq: \"albanian\", sw: \"swahili\", gl: \"galician\", mr: \"marathi\", pa: \"punjabi\", si: \"sinhala\", km: \"khmer\", sn: \"shona\", yo: \"yoruba\", so: \"somali\", af: \"afrikaans\", oc: \"occitan\", ka: \"georgian\", be: \"belarusian\", tg: \"tajik\", sd: \"sindhi\", gu: \"gujarati\", am: \"amharic\", yi: \"yiddish\", lo: \"lao\", uz: \"uzbek\", fo: \"faroese\", ht: \"haitian creole\", ps: \"pashto\", tk: \"turkmen\", nn: \"nynorsk\", mt: \"maltese\", sa: \"sanskrit\", lb: \"luxembourgish\", my: \"myanmar\", bo: \"tibetan\", tl: \"tagalog\", mg: \"malagasy\", as: \"assamese\", tt: \"tatar\", haw: \"hawaiian\", ln: \"lingala\", ha: \"hausa\", ba: \"bashkir\", jw: \"javanese\", su: \"sundanese\", yue: \"cantonese\"

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#proposal-template","title":"Proposal template","text":"

Under the Basic Information section on NAISS SUPR, provide the following compulsory details pertaining to your project in the following fashion:

  • Project Title : Whisper service for [Name of the project]

  • Abstract: [What is the project about, give links, funding info, duration etc.]

  • Resource Usage: [Explain where transcriptions/translations are needed like interview recordings on device/ zoom or other forms of audio/video recordings from offline/online sources. Give the average and maximum number of recordings to be transcribed/translated. Give the average and maximum size of recordings in mins/hours. Mention if it is a transcribing or translation requirement. Mention the language spoken in the recordings, if known, and a rough estimate of number of recordings for each of these languages. Ignore the \"core-hours\" and \"hours required to analyse one sample\" requirement.]

  • Abridged Data Management Plan: [Address all points. Mention the recording file types example: .mp3, .mp4, .wav etc.]

  • Primary Classification: [Either follow the Standard f\u00f6r svensk indelning av forsknings\u00e4mnen link given or search by entering the field of research such as 'Social Work', 'Human Geography' etc. ]

  • Requested Duration: [Mention the duration for which Whisper service is strictly required. Mentioning more duration than actually required might reflect negatively when a new allocation is requested for the same or new project next time. It is possible to request for a shorter duration of 1 month at first and then ask for a new one once the need arises again in the future.]

Module Loading

To load the Whisper module, run the following command:

[jayan@sens2024544-bianca jayan]$ module load Whisper\n

This will also load the necessary dependencies, including python and ffmpeg.

[jayan@sens2024544-bianca jayan]$ module list\nCurrently Loaded Modules:\n1) uppmax   2) python/3.11.4   3) FFmpeg/5.1.2   4) Whisper/20240930\n
","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#command-line","title":"Command-line","text":"

The whisper command can be used to transcribe audio files. For example:

[jayan@sens2024544-bianca jayan]$ whisper audio.flac audio.mp3 audio.wav --model medium\n

For more ways to run whisper, for example on cpu node or do translations, check the correct flags by doing : whisper --help You can also check the source code with arguments here on the official GitHub repository.

","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#python","title":"Python","text":"example.py
import whisper\n\n# Load the model\nmodel = whisper.load_model(\"base\")\n\n# Transcribe an audio file\nresult = model.transcribe(\"/path/to/audiofile.mp3\")\n\n# Output the transcription\nprint(result[\"text\"])\n
","tags":["Whisper","transcriptions","AI"]},{"location":"software/whisper/#available-models","title":"Available Models","text":"

For making offline usage of Whisper more convenient, we provide pre-trained models as part of the Whisper module. You can list all the available models by:

[jayan@sens2024544-bianca jayan]$ ll /sw/apps/Whisper/0.5.1/rackham/models\ntotal 13457440\n-rw-rw-r-- 1 sw  145261783 Nov 10 14:22 base.en.pt\n-rw-rw-r-- 1 sw  145262807 Nov 10 14:23 base.pt\n-rw-rw-r-- 1 sw 3086999982 Nov 10 14:39 large-v1.pt\n-rw-rw-r-- 1 sw 3086999982 Nov 10 14:40 large-v2.pt\n-rw-rw-r-- 1 sw 3087371615 Nov 10 14:27 large-v3.pt\n-rw-rw-r-- 1 sw 1528006491 Nov 10 14:24 medium.en.pt\n-rw-rw-r-- 1 sw 1528008539 Nov 10 14:25 medium.pt\n-rw-rw-r-- 1 sw  483615683 Nov 10 14:23 small.en.pt\n-rw-rw-r-- 1 sw  483617219 Nov 10 14:23 small.pt\n-rw-rw-r-- 1 sw   75571315 Nov 10 14:22 tiny.en.pt\n-rw-rw-r-- 1 sw   75572083 Nov 10 14:22 tiny.pt\n
","tags":["Whisper","transcriptions","AI"]},{"location":"software/winscp/","title":"WinSCP","text":"

WinSCP is a secure file transfer tool that works under Windows.

  • Transfer file to/from Bianca using WinSCP
  • Transfer file to/from Rackham using WinSCP
  • Transfer file to/from Transit using WinSCP
","tags":["winscp","WinSCP","Windows","windows"]},{"location":"software/wrf/","title":"WRF user guide","text":""},{"location":"software/wrf/#introduction","title":"Introduction","text":"
  • The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

  • Model home page

  • ARW branch page

  • WRF Preprocessing System (WPS). The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

  • WRF is installed as modules for version 4.1.3 and compiled with INTEL and parallelized for distributed memory (dmpar) or hybrid shared and distributed memory (sm+dm). These are available as:

    • WRF/4.1.3-dmpar default as WRF/4.1.3
    • WRF/4.1.3-dm+sm
    • WPS is installed as version 4.1 and available as:

    • WPS/4.1

  • There are WPS_GEOG data available.

  • Set the path in namelist.wps to:
geog_data_path = '/sw/data/WPS-geog/4/rackham/WPS_GEOG'\n
  • Corine and metria data are included in the WPS_GEOG directory.
  • In /sw/data/WPS-geog/4/rackham you'll find GEOGRID.TBL.ARW.corine_metria that hopefully works. Copy to your WPS/GEOGRID directory and then link to GEOGRID.TBL file.
  • It may not work for a large domain. If so, either modify TBL file or use in inner domains only.

  • To analyse the WRF output on the cluster you can use Vapor, NCL (module called as NCL-graphics) or wrf-python (module called as wrf-python). For details on how, please confer the web pages below:

    • wrf-python,
    • Vapor or
    • NCL
      • is not updated anymore and the developers recommend GeoCAT which serves as an umbrella over wrf-python, among others.
"},{"location":"software/wrf/#get-started","title":"Get started","text":"
  • This section assumes that you are already familiar in running WRF. If not, please check the tutorial, where you can at least omit the first 5 buttons and go directly to the last button, or depending on your needs, also check the \u201cStatic geography data\u201d and \u201cReal-time data\u201d.

  • When running WRF/WPS you would like your own settings for the model to run and not to interfere with other users. Therefore, you need to set up a local or project directory (e.g. 'WRF') and work from there like for a local installation. You also need some of the content from the central installation. Follow these steps:

  • Create a directory where you plan to have your input and result files.

  • Standing in this directory copy the all or some of the following directories from the central installation.

    1. Run directory for real runs

      • cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/run .
      • You can remove *.exe files in this run directory because the module files shall be used.
    2. WPS directory if input data has to be prepared

      • cp -r /sw/EasyBuild/rackham/software/WPS/4.1-intel-2019b-dmpar/WPS-4.1 .
      • You can remove *.exe files in the new directory because the module files shall be used.
    3. Test directory for ideal runs

      • cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/test .
      • You can remove *.exe files because the module files shall be used.
  • When WRF or WPS modules are loaded you can run with \u201cungrib.exe\u201d or for instance \u201cwrf.exe\u201d, i.e. without the \u201c./\u201d.

  • Normally you can run ungrib.exe, geogrid.exe and real.exe and, if not too long period, metgrid.exe, in the command line or in interactive mode.
  • wrf.exe has to be run on the compute nodes. Make a batch script, see template below:
#!/bin/bash\n#SBATCH -J\n#SBATCH --mail-user\n#SBATCH --mail-type=ALL\n#SBATCH -t 0-01:00:0\n#set wall time c. 50% higher than expected\n#SBATCH -A\n#\n#SBATCH -n 40 -p node\n#this gives 40 cores on 2 nodes\nmodule load WRF/4.1.3-dmpar\n# With PMI jobs on very many nodes starts more efficiently.\nexport I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so\nexport I_MPI_PMI2=yes\nsrun -n 40 --mpi=pmi2 wrf.exe\n
"},{"location":"software/wrf/#running-smpardmpar","title":"Running smpar+dmpar","text":"

WRF compiled for Hybrid Shared + Distributed memory (OpenMP+MPI) can be more efficient than dmpar only. With good settings it runs approximately 30% faster and similarly less resources.

To load this module type:

module load WRF/4.1.3-dm+sm\n

The submit script can look like this:

#!/bin/bash -l\n#SBATCH -J <jobname>\n#SBATCH --mail-user <email address>\n#SBATCH --mail-type=ALL\n#SBATCH -t 0-01:00:0    #set wall time c. 50% higher than expected\n#SBATCH -A <project name>\n#\n#SBATCH -N 2  ## case with 2 nodes = 40 cores on Rackham\n#SBATCH -n 8  ## make sure that n x c = (cores per node) x N\n#SBATCH -c 5\n#SBATCH --exclusive\n# We want to run OpenMP on one unit (the cores that share a memory channel, 10 on Rackham) or a part of it.\n# So, for Rackham, choose -c to be either 10, 5 or 2.\n# c = 5 seems to be the most efficient!\n# Set flags below!\nnt=1\nif [ -n \"$SLURM_CPUS_PER_TASK\" ]; then\n  nt=$SLURM_CPUS_PER_TASK\nfi\nml purge > /dev/null 2>&1 # Clean the environment\nml WRF/4.1.3-dm+sm\nexport OMP_NUM_THREADS=$nt\nexport I_MPI_PIN_DOMAIN=omp\nexport I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so\nexport I_MPI_PMI2=yes\nsrun -n 8 --mpi=pmi2 wrf.exe\n
"},{"location":"software/wrf/#local-installation-with-module-dependencies","title":"Local installation with module dependencies","text":"

If you would like to change in the FORTRAN code for physics or just want the latest version you can install locally but with the dependencies from the modules

"},{"location":"software/wrf/#step-1-wrf-source-code-registration-and-download","title":"Step 1: WRF Source Code Registration and Download","text":"
  1. Register and download
  2. Identify download URLs you need (on Github for v4 and higher)

    1. WRF
    2. WPS
    3. Other?
  3. In folder of your choice at UPPMAX:

    1. wget <download url>
  4. tar zxvf <file>
"},{"location":"software/wrf/#step-2-configure-and-compile","title":"Step 2: Configure and compile","text":"
  • Create and set the environment in a SOURCEME file, see example below for a intel-dmpar build.
  • Loading module WRF sets most of the environment but some variables have different names in configure file.
  • Examples below assumes dmpar, but can be interchanged to dm+sm for hybrid build.
#!/bin/bash\n\nmodule load WRF/4.1.3-dmpar\n\nmodule list\n\nexport WRF_EM_CORE=1\n\nexport WRFIO_NCD_LARGE_FILE_SUPPORT=1\n\nexport NETCDFPATH=$NETCDF\n\nexport HDF5PATH=$HDF5_DIR\n\nexport HDF5=$HDF5_DIR\n
  • Then
source SOURCEME\n./configure\n
  • Choose intel and dmpar (15) or other, depending on WRF version and parallelization.
  • When finished it may complain about not finding netcdf.inc file. This is solved below as you have to modify the configure.wrf file.

  • Intelmpi settings (for dmpar)

DM_FC           =        mpiifort\n\nDM_CC           =        mpiicc -DMPI2_SUPPORT\n\n#DM_FC           =       mpif90 -f90=$(SFC)\n\n#DM_CC           =       mpicc -cc=$(SCC)\n
  • NetCDF-fortran paths
LIB_EXTERNAL    = add  flags \"-$(NETCDFFPATH)/lib -lnetcdff -lnetcdf\"  (let line end with \"\\\")\nINCLUDE_MODULES =    add flag \"-I$(NETCDFFPATH)/include\" (let line end with \"\\\")\nAdd the line below close to  NETCDFPATH:\nNETCDFFPATH     =    $(NETCDFF)\n

Then:

./compile em_real\n

When you have made modification of the code and once configure.wrf is created, just

source SOURCEME\n

and run:

./compile em_real\n
"},{"location":"software/wrf/#running","title":"Running","text":"

Batch script should include:

module load WRF/4.1.3-dmpar\n\nexport I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so\n\nexport I_MPI_PMI2=yes\n\nsrun -n 40 --mpi=pmi2 ./wrf.exe     #Note \u201d./\u201d, otherwise \u201dmodule version of wrf.exe\u201d is used\n
"},{"location":"software/xeyes/","title":"xeyes","text":"

xeyes is a program that shows two eyes. The x in its name refers to the X11 display server, which is one of many ways to display graphics on screen.

xeyes is used mostly diagnostically, i.e. to find out if one used SSH with X-forwarding. When xeyes is run, but does not show the eyes, it means that SSH with X-forwarding does not work.

","tags":["xeyes","eyes","console","terminal","x-forwarding"]},{"location":"software/xeyes/#how-to-run-xeyes","title":"How to run xeyes","text":"

In a terminal, type:

xeyes\n

If you've logged in via SSH with X-forwarding and it works correctly, you will see this:

If you've logged in without SSH with X-forwarding or the SSH client is not setup correctly, you will see:

The line that indicates the error is:

Error: Can't open display:\n
","tags":["xeyes","eyes","console","terminal","x-forwarding"]},{"location":"software/directly-from-IG/img/","title":"Put related images here","text":""},{"location":"storage/disk_storage_guide/","title":"Disk storage guide","text":""},{"location":"uppmax/gitlab/","title":"UPPMAX GitLab","text":"

For the UPPMAX staff, there is a GitLab page at https://gitlab.uppmax.uu.se/.

","tags":["UPPMAX","GitLab"]}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 000000000..0f8724efd --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 000000000..09568da2d Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/software/allinea-ddt.html b/software/allinea-ddt.html new file mode 100644 index 000000000..8a04c01c1 --- /dev/null +++ b/software/allinea-ddt.html @@ -0,0 +1,66 @@ +

UPPMAX has 96 licenses (one license per (MPI) process) that allows you to debug programs running in parallel with up to 6 nodes on 16 cores or any other combination. The licenses are shared between all users that are in active debugging session.

+ +

To use the graphical user interface (GUI) make sure you have X11 forwarded when connecting to Tintin.

+ +
+
+$ ssh -Y rackham.uppmax.uu.se
+
+ +

To use the program load the ddt module from you command line.

+ +
+
+$ module load ddt
+
+ +

Make sure you have compiled your code with debugging flag options!

+ +

To start the program run:

+ +
+
+$ ddt
+ 
+or
+ 
+$ ddt ./myprogram
+
+ +

Debugging Multithreaded programs:

+ +

Start an interactive job with multiple cores (e.g. "interactive -p core -n 20 -A snicXYZ -t 04:00:00") before starting DDT. In the run window, select the OpenMP box. You can change the number of OpenMP threads directly in the DDT window before running.

+ +

Debugging MPI programs:

+ +

To be able to debug MPI program select MPI option as well as the "Submit to Queue" option, and then click on "Change" to select submission script configuration for Rackham and provide the job specific options.

+ +

+ +

System> select "Auto-Detect" for "MPI/UPC Implementation" and tick "Create Root and Workers group automatically".

+ +

+ +

Job Submission> Browse and select "/sw/comp/ddt/7.0/templates/rackham-core.qtf" in the filed "Submission template file:". Make sure that "Quick Restart" is also ticked. This will allow you to restart your program without cancelling the allocated time and allocating it again. (There is also a rackham-node.qtf in the same directory as the rackham-core.qtf configuration, this will allow you to submit the node and devcore partitions as well.)

+ +

+ +

"Edit Queue Submission Parameters..." to specify Partition, Project and requested time. Failing to provide project number will cause failures in the submission process.

+ +

+ +

On the main configuration window the button "Run" will change to "Submit". Click on this button to submit your debugging session to the queue manager.

+ +

If you enable "Memory debugging", click the "Details" button and tick "Preload the memory debugging library" and select "C/Fortran threads" in the "Language:" field. Read the manual for more detail on the other options in this panel.

+ +

+ +

+ +

Useful links

+ +

Allinea DDT home page

+ +

Support, known issues and release history

+ +

User Guide

diff --git a/software/allinea-ddt/index.html b/software/allinea-ddt/index.html new file mode 100644 index 000000000..309f26a4b --- /dev/null +++ b/software/allinea-ddt/index.html @@ -0,0 +1,3280 @@ + + + + + + + + + + + + + + + + + + + + + + + Allinea DDT - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Allinea DDT

+

UPPMAX has many debuggers installed. +This page describes Allinea DDT ('Distributed Debugging Tool').

+

UPPMAX has 96 licenses (one license per (MPI) process) +that allows you to debug programs running in parallel +with up to 6 nodes on 16 cores or any other combination. +The licenses are shared between all users that are in active debugging session.

+

To use the graphical user interface +use ssh with forwarding +or ThinLinc.

+

To use the program load the ddt module +from you command line:

+
module load ddt
+
+

To start the program run:

+
ddt
+
+

or

+
ddt ./myprogram
+
+

ddt can only do debugging if you have compiled your code with debugging flag options.

+

Debugging Multithreaded programs

+

Start an interactive job with multiple +cores (e.g. interactive -p core -n 20 -A snicXYZ -t 04:00:00) +before starting DDT. +In the run window, select the OpenMP box. +You can change the number of OpenMP threads directly in the DDT window before running.

+

Debugging MPI programs

+

To be able to debug MPI program select +MPI option as well as the 'Submit to Queue' option, +and then click on 'Change' to select submission script configuration +for Rackham and provide the job specific options:

+
    +
  • 'System | MPI/UPC Implementation | check Auto-Detect'
  • +
  • 'System | MPI/UPC Implementation | tick Create Root and Workers group automatically'
  • +
  • Select a template file depending on the partition you want to use:
      +
    • core: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-core.qtf
    • +
    • node: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-node.qtf
    • +
    • devcore: 'Job Submission | Submission template file | Browse and select /sw/comp/ddt/7.0/templates/rackham-node.qtf
    • +
    +
  • +
  • 'Job Submission | tick Quick Restart': + allows you to restart your program without cancelling + the allocated time and allocating it again.
  • +
  • Edit Queue Submission Parameters to specify Partition, Project and requested time. + Failing to provide project number will cause failures in the submission process
  • +
+

On the main configuration window the button "Run" will change to "Submit". +Click on this button to submit your debugging session to the queue manager.

+

If you enable "Memory debugging", +click the "Details" button and tick 'Preload the memory debugging library' +and select "C/Fortran threads" in the "Language:" field. +Read the manual for more detail on the other options in this panel.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bash/index.html b/software/bash/index.html new file mode 100644 index 000000000..dd63041aa --- /dev/null +++ b/software/bash/index.html @@ -0,0 +1,3133 @@ + + + + + + + + + + + + + + + + + + + bash - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

bash

+

Bash is the default Unix shell, +a command-line interpreter and script host that provides +a traditional user interface for the linux operating system at UPPMAX. +Users direct the operation of the computer by entering command input as text +for a command line interpreter to execute or by creating +text scripts of one or more such commands.

+

Special bash files

+
    +
  • .bash_profile: is run whenever you login or when you start a login + shell (as in starting a job in the queue).
  • +
  • .bashrc: is run when an interactive shell that is not a login shell + is started, or if it is called from the .bash_profile + (as it is in the default configuration).
  • +
  • .bash_logout: is run when you log out.
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/beast/index.html b/software/beast/index.html new file mode 100644 index 000000000..c5c7e01cd --- /dev/null +++ b/software/beast/index.html @@ -0,0 +1,3205 @@ + + + + + + + + + + + + + + + + + + + + + + + BEAST - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

BEAST

+

BEAST is a tool for Bayesian phylogenetic analysis.

+
+Is BEAST2 a new version of BEAST? +

No.

+

Although BEAST and BEAST2 achieve a similar goal, +BEAST and BEAST2 are developed independently.

+

Hence,

+
    +
  • there are things BEAST can do that BEAST2 cannot, and vice versa
  • +
  • one cannot create a BEAST XML file and expect BEAST2 to be able to run it, and vice versa
  • +
+
+

Run Tracer

+

Tracer is a tool to analyse the results of a +BEAST (or BEAST2) run.

+

See Tracer how to use Tracer.

+

Tracer

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/beast2/index.html b/software/beast2/index.html new file mode 100644 index 000000000..6781e6d41 --- /dev/null +++ b/software/beast2/index.html @@ -0,0 +1,3584 @@ + + + + + + + + + + + + + + + + + + + + + + + BEAST2 - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

BEAST2

+

BEAST2 is a tool for Bayesian phylogenetic analysis.

+
+Is BEAST2 a new version of BEAST? +

No.

+

Although BEAST and BEAST2 achieve a similar goal, +BEAST and BEAST2 are developed independently.

+

Hence:

+
    +
  • there are things BEAST can do that BEAST2 cannot, and vice versa
  • +
  • one cannot create a BEAST XML file and expect BEAST2 to be able to run it, and vice versa
  • +
+
+

Using BEAST2

+

Here is how to use BEAST2 on the UPPMAX clusters.

+
+Prefer a video? +

This YouTube video shows +how to use BEAST2 on the UPPMAX clusters.

+
+

1. Load a beast2 module

+

First step is to load a BEAST2 module.

+

Here is how to find the BEAST2 versions on the UPPMAX clusters:

+
module spider beast2
+
+

When loading a BEAST2 module, also load bioinfo-tools:

+
module load bioinfo-tools beast2/2.7.4
+
+
+How does that look like? +
$ module load bioinfo-tools beast2/2.7.4
+beast2/2.7.4: Also loaded beagle/4.0.0
+beast2/2.7.4: Many Beast packages are available, to see the list, 'packagemanager -list'
+beast2/2.7.4: Use BEAST_XMX to specify the amount of RAM (default 5g), 'export BEAST_XMX=15g'. Do not exceed RAM available to your job.
+
+
+

2. Run BEAUti

+

Next step is to create a BEAST2 configuration file +using BEAUti. This graphical tool can be started using:

+
beauti
+
+

As BEAUti is a graphical program, +it needs SSH with X forwarding enabled enabled.

+
+How does that look like? +

Starting BEAUti results in the following pop-up window:

+

BEAUti

+
+

After using BEAUti, save the file with your BEAST2 model.

+

3. Run

+

A BEAST2 run takes a lot of computing power, +hence do not run it on a login node. +Instead, run it on an interactive node +or use a script.

+
+How to start an interactive node? +

View the UPPMAX documentation +'How to start an interactive node on Rackham'.

+
+

On an interactive node, run BEAST2 on the saved BEAST2 model:

+
beast beast2_setup.xml
+
+

When using a script, put that line in a script. +Below is an example script, called run_beast2.sh:

+
run_beast2.sh
#!/bin/bash
+#SBATCH -A uppmax2023-2-25
+module load bioinfo-tools beast2/2.7.4
+beast beast2_setup.xml
+
+
    +
  • In line 2, replace uppmax2023-2-25 with your UPPMAX project.
  • +
  • In line 3, you may want to replace beast2/2.7.4 with your favorite BEAST2 version
  • +
+

Then run this script using sbatch run_beast2.sh.

+

Note that this is a minimal script. +See the UPPMAX documentation on Slurm +for ways to improve this script.

+

View the trees using DensiTree

+

DensiTree is a tool that allows one to display the posterior tree distribution +of a BEAST2 run.

+

Run:

+
densitree [trees_filename]
+
+

where [trees_filename] is the name of the file containing the posterior trees, +resulting in, for example, densitree my_file.trees.

+

Densitree

+

Run Tracer

+

Tracer is a tool to analyse the results of a +(BEAST or) BEAST2 run.

+

See Tracer how to use Tracer.

+

Tracer

+

Show info

+
beast -beagle_info
+
+
+How does that look like? +

Here the command is run on a Rackham +compute node, using an interactive session.

+

Here an interactive session with 1 node:

+
interactive -A uppmax2023-2-25 -M snowy -N 1 -n 16 --exclusive -t 1-00:00:00
+
+
[sven@s93 ~]$ beast -beagle_info
+
+                        BEAST v2.7.4, 2002-2023
+             Bayesian Evolutionary Analysis Sampling Trees
+                       Designed and developed by
+ Remco Bouckaert, Alexei J. Drummond, Andrew Rambaut & Marc A. Suchard
+
+                   Centre for Computational Evolution
+                         University of Auckland
+                       r.bouckaert@auckland.ac.nz
+                        alexei@cs.auckland.ac.nz
+
+                   Institute of Evolutionary Biology
+                        University of Edinburgh
+                           a.rambaut@ed.ac.uk
+
+                    David Geffen School of Medicine
+                 University of California, Los Angeles
+                           msuchard@ucla.edu
+
+                      Downloads, Help & Resources:
+                           http://beast2.org/
+
+  Source code distributed under the GNU Lesser General Public License:
+                   http://github.com/CompEvol/beast2
+
+                           BEAST developers:
+   Alex Alekseyenko, Trevor Bedford, Erik Bloomquist, Joseph Heled,
+ Sebastian Hoehna, Denise Kuehnert, Philippe Lemey, Wai Lok Sibon Li,
+Gerton Lunter, Sidney Markowitz, Vladimir Minin, Michael Defoin Platel,
+          Oliver Pybus, Tim Vaughan, Chieh-Hsi Wu, Walter Xie
+
+                               Thanks to:
+          Roald Forsberg, Beth Shapiro and Korbinian Strimmer
+
+
+--- BEAGLE RESOURCES ---
+
+0 : CPU (x86_64)
+    Flags: PRECISION_SINGLE PRECISION_DOUBLE COMPUTATION_SYNCH EIGEN_REAL EIGEN_COMPLEX SCALING_MANUAL SCALING_AUTO SCALING_ALWAYS SCALERS_RAW SCALERS_LOG VECTOR_SSE VECTOR_NONE THREADING_CPP THREADING_NONE PROCESSOR_CPU FRAMEWORK_CPU
+
+

Here an interactive session with 2 nodes:

+
interactive -A uppmax2023-2-25 -M snowy -N 2 -n 32 --exclusive -t 1-00:00:00
+
+
[sven@s106 ~]$ beast -beagle_info
+
+                        BEAST v2.7.4, 2002-2023
+             Bayesian Evolutionary Analysis Sampling Trees
+                       Designed and developed by
+ Remco Bouckaert, Alexei J. Drummond, Andrew Rambaut & Marc A. Suchard
+
+                   Centre for Computational Evolution
+                         University of Auckland
+                       r.bouckaert@auckland.ac.nz
+                        alexei@cs.auckland.ac.nz
+
+                   Institute of Evolutionary Biology
+                        University of Edinburgh
+                           a.rambaut@ed.ac.uk
+
+                    David Geffen School of Medicine
+                 University of California, Los Angeles
+                           msuchard@ucla.edu
+
+                      Downloads, Help & Resources:
+                           http://beast2.org/
+
+  Source code distributed under the GNU Lesser General Public License:
+                   http://github.com/CompEvol/beast2
+
+                           BEAST developers:
+   Alex Alekseyenko, Trevor Bedford, Erik Bloomquist, Joseph Heled,
+ Sebastian Hoehna, Denise Kuehnert, Philippe Lemey, Wai Lok Sibon Li,
+Gerton Lunter, Sidney Markowitz, Vladimir Minin, Michael Defoin Platel,
+          Oliver Pybus, Tim Vaughan, Chieh-Hsi Wu, Walter Xie
+
+                               Thanks to:
+          Roald Forsberg, Beth Shapiro and Korbinian Strimmer
+
+
+--- BEAGLE RESOURCES ---
+
+0 : CPU (x86_64)
+    Flags: PRECISION_SINGLE PRECISION_DOUBLE COMPUTATION_SYNCH EIGEN_REAL EIGEN_COMPLEX SCALING_MANUAL SCALING_AUTO SCALING_ALWAYS SCALERS_RAW SCALERS_LOG VECTOR_SSE VECTOR_NONE THREADING_CPP THREADING_NONE PROCESSOR_CPU FRAMEWORK_CPU
+
+
+

Troubleshooting

+

BEAUti gives BadAlloc

+
    +
  • Platform(s): MacOS
  • +
+

This problem seems to be related to not having a proper X server installed. +In this case, SSH X forwarding works to the extent +that SSH is able to show xeyes, yet fails to show BEAUti. +Also, using the remote desktop via a ThinLinc client fails.

+

A solution may be to use the remote desktop via the web

+
+How does that look like? +

Here is how it looks like:

+
[kayakhi@rackham2 ~]$ xeyes
+
+[kayakhi@rackham2 ~]$ module load bioinfo-tools beast2/2.7.4
+
+beast2/2.7.4: Also loaded beagle/4.0.0
+
+beast2/2.7.4: Many Beast packages are available, to see the list, 'packagemanager -list'
+
+beast2/2.7.4: Use BEAST_XMX to specify the amount of RAM (default 5g), 'export BEAST_XMX=15g'. Do not exceed RAM available to your job.
+
+[kayakhi@rackham2 ~]$ beauti
+
+X Error of failed request:  BadAlloc (insufficient resources for operation)
+
+  Major opcode of failed request:  149 (GLX)
+
+  Minor opcode of failed request:  5 (X_GLXMakeCurrent)
+
+  Serial number of failed request:  0
+
+  Current serial number in output stream:  32
+
+

Note that this user has enabled SSH X forwarding, +as is proven by calling xeyes without problems.

+
+

Optimize performance

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_filezilla/index.html b/software/bianca_file_transfer_using_filezilla/index.html new file mode 100644 index 000000000..50f598393 --- /dev/null +++ b/software/bianca_file_transfer_using_filezilla/index.html @@ -0,0 +1,3262 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Bianca using FileZilla - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Bianca using FileZilla

+

FileZilla connected to Bianca

+
+

FileZilla connected to Bianca

+
+

There are multiple ways to transfer data to/from Bianca.

+

Here, we show how to transfer files using a graphical tool called FileZilla.

+

Procedure

+
+Would you like a video? +

If you like to see how to do file transfer from/to Bianca +using FileZilla, watch the video +here

+
+

To transfer files to/from Bianca using FileZilla, do the following steps:

+

1. Get inside SUNET

+

Get inside SUNET.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start FileZilla

+

Start FileZilla.

+

3. Select 'File | Site manager'

+

In FileZilla, from the menu, select 'File | Site manager'

+
+Where is that? +

It is here:

+

The FileZilla 'File' menu contains the item 'Site manager'

+
+

The FileZilla 'File' menu contains the item 'Site manager'

+
+
+

4. Click 'New site'

+

In the 'Site Manager' dialog, click 'New site'

+
+Where is that? +

It is here:

+

'New site' can be found at the bottom-left

+
+

'New site' can be found at the bottom-left

+
+
+

5. Create a name for the site, e.g. bianca-sens123456

+

In the 'New Site' dialog, create a name for the site, e.g. bianca-sens123456.

+

6. Configure site

+

In the 'New Site' dialog, use all standards, except:

+
    +
  • Set protocol to 'SFTP - SSH File Transfer Protocol'
  • +
  • Set host to bianca-sftp.uppmax.uu.se
  • +
  • Set user to [username]-[project], e.g. sven-sens123456
  • +
+
+How does that look like? +

It looks similar to these:

+

FileZilla settings for a user

+

FileZilla settings for another user

+
+
+

Storing a password is useless

+

Because Bianca holds sensitive data, +there is need to use the UPPMAX two-factor authentication +code every time you login. +Due to this, storing a password is hence useless

+
+

7. Click 'Connect'

+

In FileZilla, click 'Connect'

+

You will be asked for your password with two-factor identification, hence +type [your password][2FA code], e.g. VerySecret123456.

+

Now you can transfer files between your local computer and your wharf folder.

+

NOTE: Filezilla will ask for your password and two-factor for each file you transfer. To avoid that, go to +Site Manager > Transfer Settings > Limit number of simultaneous connections to 1.

+
+How does that look like? +

It looks like this:

+

FileZilla is ready to transfer files

+
+

FileZilla is ready to transfer files

+
+
+

Troubleshooting

+

Access denied

+

Full error, in the FileZilla terminal:

+
Status: Connecting to bianca-sftp.uppmax.uu.se...
+
+Status: Using username "sven-sens2023613".
+
+Status: Access denied
+
+Error: Authentication failed.
+
+Error: Critical error: Could not connect to server
+
+

Hypotheses:

+
    +
  • The user is not within SUNET
  • +
+
+How do I know if I am within the university networks? +

If you login via eduroam you are within the university networks.

+

When unsure, go to the Bianca remote desktop website at +https://bianca.uppmax.uu.se: +if this page does not load, you are outside of the university networks.

+

See How to get inside of the university networks +if you outside of the university networks.

+
+
    +
  • The account is not active
  • +
+
+How do I know if the Bianca project is active? +

A quick way to confirm your Bianca project is active: +go to https://bianca.uppmax.uu.se +and type your username. If the project is displayed, it is active.

+

To confirm your project is active or inactive, use the SUPR NAISS website. +See the UPPMAX documentation on projects +how to see if your project is active?

+
+
    +
  • The user is not a member of the Bianca project
  • +
+
+How do I know if I am a member of the Bianca project? +

A quick way to confirm you are a member of the Bianca project: +go to https://bianca.uppmax.uu.se +and type your username. If the project is displayed, +you are a member of the Bianca project.

+

To confirm your project is active or inactive, use the SUPR NAISS website. +See the UPPMAX documentation on projects +how to see which projects you are a member of.

+
+

See the UPPMAX page on contacting support +on how to contact us.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_gui/index.html b/software/bianca_file_transfer_using_gui/index.html new file mode 100644 index 000000000..8b6eda460 --- /dev/null +++ b/software/bianca_file_transfer_using_gui/index.html @@ -0,0 +1,3296 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Bianca using a graphical tool - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Bianca using a graphical tool

+

FileZilla connected to Bianca

+
+

FileZilla connected to Bianca

+
+

Overview

+

As a user, we need to transfer files +between our local computer and Bianca. +The many ways to transfer files to/from Bianca +are discussed here. +On this page, we learn how to transfer files +to Bianca using a graphical tool/program.

+

There are constraints on which programs +we can use, due to Bianca being an HPC cluster +for sensitive data. +Details are described in 'Bianca's constraints', +here are graphical tools that work:

+ + + + + + + + + + + + + + + + + +
ToolDescription
FileZillaAll operating systems
WinSCPWindows-only
+

When using such a graphical tool, +one needs to be inside of SUNET.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

When a tool is setup, one can only transfer files +between you local computer and your Bianca wharf folder.

+

Bianca's constraints

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#faf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#aaf,color:#000,stroke:#00f
+
+    subgraph sub_inside[IP inside SUNET]
+      subgraph sub_bianca_shared_env[Bianca shared network]
+        subgraph sub_bianca_private_env[The project's private virtual project cluster]
+          login_node(login/calculation/interactive node):::calculation_node
+          files_in_wharf(Files in wharf):::file_node
+          files_in_bianca_project(Files in Bianca project folder):::file_node
+        end
+      end
+      user(User)
+      user_local_files(Files on user computer):::file_node
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#fcc,color:#000,stroke:#fcc
+    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc
+    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc
+
+    user --> |logs in |login_node
+    user --> |uses| user_local_files
+
+    %% As of 2023-12-22, using `**text**` for bold face, does not render correctly
+    %% user_local_files <== "`**transfer files**`" ==> files_in_wharf
+    user_local_files <== "transfer files" ==> files_in_wharf
+
+    login_node --> |can use|files_in_bianca_project
+    login_node --> |can use|files_in_wharf
+    files_in_wharf <--> |transfer files| files_in_bianca_project
+
+

Overview of file transfer on Bianca, when using a graphical tool. +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'. +In this session, we will transfer files between +'Files on user computer' and 'Files in wharf' +using a graphical tool, e.g. FileZilla

+
+

Bianca is an HPC cluster for sensitive data. +To protect that sensitive data, +Bianca has no direct internet connection. +This means that files cannot be downloaded directly.

+
+What is an HPC cluster again? +

What an HPC cluster is, is described in general terms here.

+
+

Instead, one needs to learn one of the many ways to do secure file transfer.

+

Here, we show how to transfer files using a graphical tool called FileZilla.

+

In general, one can pick any graphical tools with these constraints:

+
    +
  • the tool must support SFTP
  • +
  • the tool must not store a password
  • +
+

Whatever tool one picks, it must do secure file transfer. +For secure file transfer, Bianca supports the SFTP protocol. +So, for secure file transfer to Bianca, one needs a tool +that supports SFTP.

+
+Use SFTP ... and why users think incorrectly that SCP will work +

Only SFTP will work. SCP will never work.

+

However, some users use tools that support SFTP, +yet that have 'SCP' in the name, for example, 'WinSCP'. +As users hear from colleagues that the tool 'WinSCP' works, +they may incorrectly conclude that SCP will work.

+

SCP will never work. Only SFTP will work.

+
+

Whatever tool one picks, additionally, the tool must not store a password. +Due to security reasons, one needs to connect to Bianca using a password +and a two-factor authentication number (e.g. VerySecret123456). +If a tool stores a password, that password will be valid for only one session.

+

One tool that can be used for file transfer to Bianca +is FileZilla, which is described in detail below. +The extra materials at the bottom of this page contain +other tools.

+

File transfer overview

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fcf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+    classDef transit_node fill:#fff,color:#000,stroke:#fff
+
+    subgraph sub_inside[IP inside SUNET]
+      subgraph sub_bianca_shared_env[Bianca shared network]
+        subgraph sub_bianca_private_env[The project's private virtual project cluster]
+          login_node(login/calculation/interactive node):::calculation_node
+          files_in_wharf(Files in wharf):::file_node
+          files_in_bianca_project(Files in Bianca project folder):::file_node
+        end
+      end
+      user(User)
+      user_local_files(Files on user computer):::file_node
+      files_on_transit(Files posted to Transit):::transit_node
+      files_on_other_clusters(Files on other HPC clusters):::file_node
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#fcc,color:#000,stroke:#fcc
+    style sub_bianca_shared_env fill:#ffc,color:#000,stroke:#ffc
+    style sub_bianca_private_env fill:#cfc,color:#000,stroke:#cfc
+
+    user --> |logs in |login_node
+    user --> |uses| user_local_files
+    user_local_files <--> |transfer files|files_in_wharf
+    user_local_files <--> |transfer files|files_on_transit
+    files_on_transit <--> |transfer files|files_in_wharf
+    files_on_transit <--> |transfer files|files_on_other_clusters
+    login_node --> |can use|files_in_bianca_project
+    login_node --> |can use|files_in_wharf
+    files_in_wharf <--> |transfer files| files_in_bianca_project
+
+

Overview of file transfer on Bianca +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_lftp/index.html b/software/bianca_file_transfer_using_lftp/index.html new file mode 100644 index 000000000..b541df3fa --- /dev/null +++ b/software/bianca_file_transfer_using_lftp/index.html @@ -0,0 +1,3119 @@ + + + + + + + + + + + + + + + + + + + Using lftp with Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using lftp with Bianca

+

lftp is a command-line program +to transfer files to/from Bianca.

+

With the command line SFTP client lftp, +you need to "set net:connection_limit 1". +lftp may also defer the actual connection +until it's really required unless you end your connect URL with a path.

+

When inside of SUNET +(which can be on a local computer or on Rackham) do:

+
lftp sftp://[user_name]-[project_id]@bianca-sftp.uppmax.uu.se/[user_name]-[project_id]/
+
+

where

+ +

For example:

+
lftp sftp://sven-sens2016001@bianca-sftp.uppmax.uu.se/sven-sens2016001/
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_rsync/index.html b/software/bianca_file_transfer_using_rsync/index.html new file mode 100644 index 000000000..0cba3a82a --- /dev/null +++ b/software/bianca_file_transfer_using_rsync/index.html @@ -0,0 +1,3320 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Bianca using rsync - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Bianca using rsync

+

rsync is a tool to do file transfer to/from Bianca, +that works under Linux, Mac and Windows.

+
+Prefer a video? +

Watch this video +to see the procedure below as a video.

+
+

To transfer files to/from Bianca using rsync, do the following steps:

+
flowchart TD
+  local_computer_ourside_sunet[Local computer outside of SUNET]
+  local_computer[Local computer]
+  transit[Transit]
+  bianca[Bianca]
+  local_computer_ourside_sunet --> |1.Get inside SUNET|local_computer
+  local_computer --> |2.login| transit
+  local_computer --> |4.rsync| bianca
+  bianca --> |5.rsync| local_computer
+  transit --> |3.mount| bianca
+

1. Get inside SUNET

+

Get inside SUNET.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Log in to Transit

+

On your local computer, start a terminal and use ssh to login to Transit:

+
ssh [username]@transit.uppmax.uu.se
+
+

where

+
    +
  • [username] is your UPPMAX username
  • +
+

For example:

+
ssh sven@transit.uppmax.uu.se
+
+

When asked for a password, use your UPPMAX password (without 2FA).

+

See Log in to transit for more details +on how to log in to Transit.

+

3. Mount a Bianca project

+

On transit, mount the wharf of your Bianca project:

+
mount_wharf [project_id]
+
+

where

+ +
+What about the [path] argument? +

Well spotted!

+

Indeed, the Transit server gives these arguments:

+
mount_wharf [project_id] [path]
+
+

However, the [path] argument is optional: if not +given, a default will be used.

+

To simplify matters, here we use the default.

+
+

for example:

+
mount_wharf sens2016001
+
+

The password is your normal UPPMAX password directly followed by +the six digits from the the UPPMAX 2-factor authentication. +For example, if your password is VerySecret and the second factor code is 123456 +you would type VerySecret123456 as the password in this step.

+

Now a folder called sens2016001 is created.

+

4. Transfer files to Bianca

+

You can transfer files to Bianca by:

+ +

4a. Transfer individual files to Bianca

+

On local computer, do:

+
rsync [my_local_file] [username]@transit.uppmax.uu.se:[project_id]
+
+

where

+
    +
  • [my_local_file] is the path to your local file
  • +
  • [project_id] is the ID of your NAISS project
  • +
  • [username] is your UPPMAX username
  • +
+

for example:

+
rsync my_local_file.txt sven@transit.uppmax.uu.se:sens2016001
+
+

No need to specify the path to the mounted folder, if defaults are used.

+

The files can now be found in your wharf folder.

+

4b. Transfer all files in a folder to Bianca

+

On local computer, do:

+
rsync --recursive my_folder [username]@transit.uppmax.uu.se:[project_id]
+
+

where

+
    +
  • [project_id] is the ID of your NAISS project
  • +
  • [username] is your UPPMAX username
  • +
+

for example:

+
rsync --recursive my_folder sven@transit.uppmax.uu.se:sens2016001
+
+

No need to specify the path to the mounted folder, if defaults are used.

+

The files can now be found in your wharf folder.

+

5. Transfer files from Bianca to you local computer

+
+

Be responsible with sensitive data

+

This command below will copy data from Bianca to your local computer.

+
+

You can transfer files from Bianca to your local computer by:

+ +

5a. Transfer individual files from Bianca to your local computer

+

On your local computer, do:

+
rsync [username]@transit.uppmax.uu.se:[project_id]/[file_in_wharf] .
+
+

where

+
    +
  • [project_id] is the ID of your NAISS project
  • +
  • [username] is your UPPMAX username
  • +
  • [file_in_wharf] is the name of the file in wharf
  • +
  • . means 'in the current folder of my local computer' or 'here'
  • +
+

for example:

+
rsync sven@transit.uppmax.uu.se:sens2016001/my_file_in_wharf.txt .
+
+

To copy the individual files in your wharf to your local computer.

+

5b. Transfer all folders from Bianca to you local computer

+
+

This will copy all folders in your wharf

+

This command below will copy all folders in your wharf folder +to your local computer.

+

This assumes that there is few data in your wharf folder.

+

We assume your follow good wharf hygiene, +i.e. your wharf folder is mostly empty most of the time.

+
+

On your local computer, do:

+
rsync --recursive [username]@transit.uppmax.uu.se:[project_id] .
+
+

where

+
    +
  • [project_id] is the ID of your NAISS project
  • +
  • [username] is your UPPMAX username
  • +
  • . means 'in the current folder of my local computer' or 'here'
  • +
+

for example:

+
rsync --recursive sven@transit.uppmax.uu.se:sens2016001 .
+
+

To your wharf folder to your local computer. +The folder created on your local computer will be called [project_id], +for example, sens2016001.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_sftp/index.html b/software/bianca_file_transfer_using_sftp/index.html new file mode 100644 index 000000000..e98b055f1 --- /dev/null +++ b/software/bianca_file_transfer_using_sftp/index.html @@ -0,0 +1,3183 @@ + + + + + + + + + + + + + + + + + + + Using sftp with Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using sftp with Bianca

+

sftp is a command-line program +to transfer files to/from Bianca.

+

Usage

+
+Would you enjoy a video? +

A video showing how to sftp with Bianca can be found here.

+
+

When inside of SUNET +(which can be on a local computer or on Rackham) do:

+
sftp [user_name]-[project_id]@bianca-sftp.uppmax.uu.se:/[user_name]-[project_id]
+
+

where

+ +

For example:

+
sftp sven-sens2016001@bianca-sftp.uppmax.uu.se:/sven-sens2016001
+
+

sftp will ask for a password:

+
sven-sens2016001@bianca-sftp.uppmax.uu.se's password:
+
+

The password is your normal UPPMAX password directly followed by +the six digits from the the UPPMAX 2-factor authentication. +For example, if your password is VerySecret and the second factor code is 123456 +you would type VerySecret123456 as the password in this step.

+

After typing in the password and 2FA one sees a welcome message +and the sftp prompt.

+
+How does that look like? +

This is the welcome message:

+
Hi!
+
+You are connected to the bianca wharf (sftp service) at
+bianca-sftp.uppmax.uu.se.
+
+Note that we only support SFTP, which is not exactly the
+same as SSH (rsync and scp will not work).
+
+Please see our homepage and the Bianca User Guide
+for more information:
+
+https://www.uppmax.uu.se/support/user-guides/bianca-user-guide/
+
+If you have any questions not covered by the User Guide, you are
+welcome to contact us at support@uppmax.uu.se.
+
+Best regards,
+UPPMAX
+
+sven-sens2016001@bianca-sftp.uppmax.uu.se's password:
+Connected to bianca-sftp.uppmax.uu.se.
+sftp>
+
+
+
+How do I get rid of the welcome message? +

Use sftp's -q (which is short for 'quiet') flag:

+
sftp -q sven-sens2016001@bianca-sftp.uppmax.uu.se
+
+
+

The last line, sftp> is the sftp prompt.

+

Once connected you will have to type the sftp commands to upload/download files. +See the UPPMAX page on sftp how to do so.

+

With sftp you only have access to your wharf folder.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/bianca_file_transfer_using_winscp/index.html b/software/bianca_file_transfer_using_winscp/index.html new file mode 100644 index 000000000..4f3f1882b --- /dev/null +++ b/software/bianca_file_transfer_using_winscp/index.html @@ -0,0 +1,3176 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Bianca using WinSCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Bianca using WinSCP

+

Download and install WinSCP

+

WinSCP

+

WinSCP is a secure file transfer tool that works under Windows.

+

To transfer files to/from Bianca using WinSCP, do the following steps:

+

1. Get inside SUNET

+

Get inside SUNET.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start WinSCP

+

Start WinSCP.

+

3. Create a new site

+

In WinSCP, click on 'Create new site'.

+

For that site, use all standards, except:

+
    +
  • Set file protocol to 'SFTP'
  • +
  • Set host name to bianca-sftp.uppmax.uu.se
  • +
  • Set user name to [username]-[project], e.g. sven-sens123456
  • +
  • Do not set password! Provide your UPPMAX password followed immediately by the UPPMAX 2FA when asked by the interactive login.
  • +
+
+How does that look like? +

It looks like this:

+

WinSCP

+
+

4. Transfer files

+

Now you can transfer files between your local computer and your wharf folder.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/cellranger/index.html b/software/cellranger/index.html new file mode 100644 index 000000000..ae62e2c69 --- /dev/null +++ b/software/cellranger/index.html @@ -0,0 +1,3426 @@ + + + + + + + + + + + + + + + + + + + Cell Ranger - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Cell Ranger

+

According to +the Cell Ranger GitHub repository:

+
+

Cell Ranger is a set of analysis pipelines that perform sample +demultiplexing, barcode processing, single cell 3' and 5' gene counting, +V(D)J transcript sequence assembly and annotation, +and Feature Barcode analysis from 10x Genomics Chromium Single Cell data.

+
+

Cell Ranger (the tool) is part of the cellranger +module.

+

Finding the module that +has cowsay installed:

+
module spider cellranger
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ module spider cellranger
+
+----------------------------------------------------------------------------
+  cellranger:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger/1.1.0
+        cellranger/1.3.0
+        cellranger/2.0.2
+        cellranger/2.2.0
+        cellranger/3.0.1
+        cellranger/4.0.0
+        cellranger/5.0.1
+        cellranger/6.0.2
+        cellranger/6.1.2
+        cellranger/7.0.0
+        cellranger/7.0.1
+        cellranger/7.1.0
+        cellranger/8.0.1
+     Other possible modules matches:
+        cellranger-ARC  cellranger-ARC-data  cellranger-ATAC  cellranger-ATAC-da
+ta  ...
+
+----------------------------------------------------------------------------
+  To find other possible module matches execute:
+
+      $ module -r spider '.*cellranger.*'
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger" package (including how 
+to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger/8.0.1
+----------------------------------------------------------------------------
+
+
+
+How to see the tools similar to cellranger? +

In case you want to search for similar tools, add +a dash at the end of the search term:

+
module spider cellranger-
+
+

Your output will look similar to this:

+
[sven@rackham1 ~]$ module spider cellranger-
+
+----------------------------------------------------------------------------
+  cellranger-ARC:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-ARC/1.0.0
+        cellranger-ARC/2.0.2
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-ARC" package (including 
+how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-ARC/2.0.2
+----------------------------------------------------------------------------
+
+----------------------------------------------------------------------------
+  cellranger-ARC-data:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-ARC-data/2020-A
+        cellranger-ARC-data/2020-A-2.0.0
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-ARC-data" package (inclu
+ding how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-ARC-data/2020-A-2.0.0
+----------------------------------------------------------------------------
+
+----------------------------------------------------------------------------
+  cellranger-ATAC:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-ATAC/1.2.0
+        cellranger-ATAC/2.0.0
+        cellranger-ATAC/2.1.0
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-ATAC" package (including
+ how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-ATAC/2.1.0
+----------------------------------------------------------------------------
+
+----------------------------------------------------------------------------
+  cellranger-ATAC-data:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-ATAC-data/1.2.0
+        cellranger-ATAC-data/2.0.0
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-ATAC-data" package (incl
+uding how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-ATAC-data/2.0.0
+----------------------------------------------------------------------------
+
+----------------------------------------------------------------------------
+  cellranger-DNA: cellranger-DNA/1.1.0
+----------------------------------------------------------------------------
+
+    You will need to load all module(s) on any one of the lines below before the
+ "cellranger-DNA/1.1.0" module is available to load.
+
+      bioinfo-tools
+
+    Help:
+       cellranger-DNA - use cellranger-DNA 1.1.0
+
+
+      The cellranger-DNA-data/1.0.0 module is loaded as a prerequisite.
+
+
+
+
+----------------------------------------------------------------------------
+  cellranger-DNA-data: cellranger-DNA-data/1.0.0
+----------------------------------------------------------------------------
+
+    This module can be loaded directly: module load cellranger-DNA-data/1.0.0
+
+    Help:
+       cellranger-DNA-data - use cellranger-DNA-data 1.0.0
+
+
+      10X Genomics Chromium Cell Ranger DNA data
+      Version 1.0.0
+      https://support.10xgenomics.com/single-cell-dna/software/downloads/latest
+
+      NOTE: This is a data module. The software that uses this data is the cellr
+anger-DNA module, which loads this.
+
+
+      Default data for GRCh38, GRCh38 and GRCm38 references can be found in $CEL
+LRANGER_DNA_DATA.
+      To see the top-level directories:
+
+       ls -l $CELLRANGER_DNA_DATA
+
+      Genome assembly    Subdirectory
+      ---------------    ------------
+      GRCh38             refdata-GRCh38-1.0.0
+      GRCh37             refdata-GRCh37-1.0.0
+      GRCm38             refdata-GRCm38-1.0.0
+
+      Sample Index Set Sequences (both CSV and JSON formats)
+      ------------------------------------------------------
+      Chromium DNA     chromium-shared-sample-indexes-plate.csv
+                       chromium-shared-sample-indexes-plate.json
+
+      For information on how each dataset was produced, see the References secti
+on of
+      https://support.10xgenomics.com/single-cell-dna/software/downloads/latest
+
+
+
+
+----------------------------------------------------------------------------
+  cellranger-VDJ-data:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-VDJ-data/4.0.0
+        cellranger-VDJ-data/5.0.0
+        cellranger-VDJ-data/7.0.0
+        cellranger-VDJ-data/7.1.0
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-VDJ-data" package (inclu
+ding how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-VDJ-data/7.1.0
+----------------------------------------------------------------------------
+
+----------------------------------------------------------------------------
+  cellranger-data:
+----------------------------------------------------------------------------
+     Versions:
+        cellranger-data/1.1.0
+        cellranger-data/1.2.0
+        cellranger-data/3.0.0
+        cellranger-data/2020-A
+        cellranger-data/2024-A
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "cellranger-data" package (including
+ how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider cellranger-data/2024-A
+----------------------------------------------------------------------------
+
+
+

Loading the latest version of the cellranger module:

+
module load bioinfo-tools cellranger/8.0.1
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ module load bioinfo-tools cellranger/8.0.1
+Default data for several references are available at $CELLRANGER_DATA; see 'module help cellranger-data/2024-A' for more information
+Default data for GRCh38 and GRCm38 immune profiling references are available at $CELLRANGER_VDJ_DATA; see 'module help cellranger-VDJ-data/7.1.0' for more information
+
+
+

Now you can run Cell Ranger:

+
cellranger
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ cellranger
+cellranger cellranger-8.0.1
+
+Process 10x Genomics Gene Expression, Feature Barcode, and Immune Profiling data
+
+Usage: cellranger <COMMAND>
+
+Commands:
+  count           Count gene expression and/or feature barcode reads from a
+                      single sample and GEM well
+  multi           Analyze multiplexed data or combined gene
+                      expression/immune profiling/feature barcode data
+  multi-template  Output a multi config CSV template
+  vdj             Assembles single-cell VDJ receptor sequences from 10x
+                      Immune Profiling libraries
+  aggr            Aggregate data from multiple Cell Ranger runs
+  reanalyze       Re-run secondary analysis (dimensionality reduction,
+                      clustering, etc)
+  mkvdjref        Prepare a reference for use with CellRanger VDJ
+  mkfastq         Run Illumina demultiplexer on sample sheets that contain
+                      10x-specific sample index sets
+  testrun         Execute the 'count' pipeline on a small test dataset
+  mat2csv         Convert a feature-barcode matrix to CSV format
+  mkref           Prepare a reference for use with 10x analysis software.
+                      Requires a GTF and FASTA
+  mkgtf           Filter a GTF file by attribute prior to creating a 10x
+                      reference
+  upload          Upload analysis logs to 10x Genomics support
+  sitecheck       Collect Linux system configuration information
+  help            Print this message or the help of the given subcommand(s)
+
+Options:
+  -h, --help     Print help
+  -V, --version  Print version
+
+
+

Using Cell Ranger from Python

+
+For staff +

Related to ticket 297240

+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/chmod/index.html b/software/chmod/index.html new file mode 100644 index 000000000..80fe66522 --- /dev/null +++ b/software/chmod/index.html @@ -0,0 +1,3161 @@ + + + + + + + + + + + + + + + + + + + chmod - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

chmod

+

chmod is a Linux command to change the ownership of a folder

+

How to make a script executable?

+

Use (when the script is called my_script.sh):

+
chmod +x my_script.sh
+
+

You can now run the script using:

+
./my_script.sh
+
+

How to create a folder in the shared project folder that only I can access?

+

Your project folders at /proj/[naiss_project] are shared by members of that +NAISS project.

+

If you need a folder that only you can access, assuming +that folder is called my_private_folder, do the following:

+
chmod 700 my_private_folder
+
+
+How can I confirm it worked? +

Use ll:

+
$ ll
+drwxrwsr-x 2 sven my_group 4096 Aug 14 09:07 a_shared_folder/
+drwx--S--- 2 sven my_group 4096 Aug 14 09:06 my_private_folder/
+
+

The first characters is what it is about:

+
    +
  • drwxrwsr-x: accessible with group
  • +
  • drwx--S---: only accessible by you
  • +
+
+

Now, you can enter that folder:

+
cd my_private_folder
+
+

However, others cannot and get this error message:

+
bash: cd: my_private_folder/: Permission denied
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/compilers/index.html b/software/compilers/index.html new file mode 100644 index 000000000..02688d2fc --- /dev/null +++ b/software/compilers/index.html @@ -0,0 +1,3195 @@ + + + + + + + + + + + + + + + + + + + + + + + Compilers - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compilers

+

UPPMAX supports multiple compilers:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CompilerLanguage(s)Description
GCCC, C++, FortranThe GNU compiler collection
iccCOlder Intel C compiler
icpcC++Intel C++ compiler
icxCNewer Intel C compiler
ifortFortranOlder Intel Fortran compiler
ifxFortranNewer Intel Fortran compiler
javacJavaJava compiler
+

Different compilers are association with different debuggers +and different profiling tools.

+
+How to make sure you have only the right compiler loaded? +

Use

+
module list
+
+

to get a list of modules.

+

This may look like this:

+
Currently Loaded Modules:
+  1)  uppmax    2) intel/19.5
+
+

If there are modules connected to the incorrect compiler, +unload the module, for example:

+
module unload intel
+
+

This scenario is valid if you want to use tools that use the GCC compiler.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/compiling_parallel/index.html b/software/compiling_parallel/index.html new file mode 100644 index 000000000..26d229fc7 --- /dev/null +++ b/software/compiling_parallel/index.html @@ -0,0 +1,3664 @@ + + + + + + + + + + + + + + + + + + + + + + + Compiling parallel code - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MPI and OpenMP user guide

+

Table of contents:

+
    +
  • Compiling and running parallel programs on UPPMAX clusters.
      +
    • Introduction
    • +
    +
  • +
  • Overview of available compilers from GCC and Intel and compatible MPI libraries
  • +
  • Running serial programs on execution nodes
  • +
  • MPI using the OpenMPI library
      +
    • C programs +-Fortran programs
    • +
    +
  • +
  • OpenMP
      +
    • C programs
    • +
    • Fortran programs
    • +
    +
  • +
  • Pthreads
  • +
+

This is a short tutorial about how to use the queuing system, and how to compile and run MPI and OpenMP jobs.

+

For serial programs, see a short version of this page at Compiling source code.

+

Compiling and running parallel programs on UPPMAX clusters

+

Introduction

+

These notes show by brief examples how to compile and run serial and parallel programs on the clusters at UPPMAX.

+

All programs are of the trivial "hello, world" type. The point is to demonstrate how to compile and execute the programs, not how to write parallel programs!

+

Running serial programs on execution nodes

+

Standard compatibility

+
    +
  • c11 gcc/4.8 intel/16+
  • +
  • c17 (bug-fix) gcc/8 intel/17+ 19 full
  • +
  • Fortran2008 gcc/9 intel/15+ 18 full
  • +
  • Fortran2018 gcc/9 intel/19+
  • +
+

Examples

+

Jobs are submitted to execution nodes through the resource manager. +We use Slurm on our clusters.

+

We will use the hello program we wrote in the section Compiling source code. The program language should not matter here when we deal with serial programs.

+

To run the serial program hello as a batch job using Slurm, enter the following shell script in the file hello.sh:

+
#!/bin/bash -l
+# hello.sh :  execute hello serially in Slurm
+# command: $ sbatch hello.sh
+# sbatch options use the sentinel #SBATCH
+# You must specify a project
+#SBATCH -A your_project_name
+#SBATCH -J serialtest
+# Put all output in the file hello.out
+#SBATCH -o hello.out
+# request 5 seconds of run time
+#SBATCH -t 0:0:5
+# request one core
+#SBATCH -p core -n 1
+./hello
+
+

The last line in the script is the command used to start the program.

+

Submit the job to the batch queue:

+
sbatch hello.sh
+
+

The program's output to stdout is saved in the file named at the -o flag.

+
$ cat hello.out
+hello, world
+
+

MPI using the OpenMPI library

+

Before compiling a program for MPI we must choose, in addition to the compiler, which version of MPI we want to use. At UPPMAX there are two, openmpi and intelmpi. These, with their versions, are compatible only to a subset of the gcc and intel compiler versions.

+
+

Tip

+

Check this compatibility page for a more complete picture of compatible versions.

+
+

C programs using OpenMPI

+

Enter the following mpi program in c and save in the file hello.c

+
/* hello-mpi.c :  mpi program in c printing a message from each process */
+#include <stdio.h>
+#include <mpi.h>
+int main(int argc, char *argv[])
+{
+    int npes, myrank;
+    MPI_Init(&argc, &argv);
+    MPI_Comm_size(MPI_COMM_WORLD, &npes);
+    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
+    printf("From process %d out of %d, Hello World!\n", myrank, npes);
+    MPI_Finalize();
+    return 0;
+}
+
+

Before compiling a program for MPI we must choose which version of MPI. At UPPMAX there are two, openmpi and intelmpi. For this example we will use openmpi. +To load the openmpi module, enter the command below or choose other versions according to the lists above.

+
module load gcc/10.3.0 openmpi/3.1.6
+
+

To check that the openmpi modules is loaded, use the command:

+
module list
+
+

The command to compile a c program for mpi is mpicc. Which compiler is used when this command is issued depends on what compiler module was loaded before openmpi

+

To compile, enter the command:

+
mpicc -o hello-mpi hello-mpi.c
+
+

You should add optimization and other flags to the mpicc command, just as you would to the compiler used. So if the gcc compiler is used and you wish to compile an mpi program written in C with good, fast optimization you should use a command similar to the following:

+
mpicc -fast -o hello-mpi hello-mpi.c
+
+

To run the mpi program hello using the batch system, we make a batch script with name hello-mpi.sh

+
#!/bin/bash -l
+# hello.sh :  execute parallel mpi program hello on Slurm
+# use openmpi
+# command: $ sbatch hello.sh
+# Slurm options use the sentinel #SBATCH
+#SBATCH -A your_project_name
+#SBATCH -J mpitest
+#SBATCH -o hello.out
+#
+# request 5 seconds of run time
+#SBATCH -t 00:00:05
+#SBATCH -p node -n 8
+module load gcc/10.3 openmpi/3.1.3
+mpirun ./hello-mpi
+
+

The last line in the script is the command used to start the program. +The last word on the last line is the program name hello.

+

Submit the job to the batch queue:

+
sbatch hello-mpi.sh
+
+

The program's output to stdout is saved in the file named at the -o flag. +A test run of the above program yelds the following output file:

+
$ cat hello-mpi.out
+From process 4 out of 8, Hello World!
+From process 5 out of 8, Hello World!
+From process 2 out of 8, Hello World!
+From process 7 out of 8, Hello World!
+From process 6 out of 8, Hello World!
+From process 3 out of 8, Hello World!
+From process 1 out of 8, Hello World!
+From process 0 out of 8, Hello World!
+
+

Fortran programs using OpenMPI

+

The following example program does numerical integration to find Pi (inefficiently, but it is just an example):

+
program testampi
+    implicit none
+    include 'mpif.h'
+    double precision :: h,x0,x1,v0,v1
+    double precision :: a,amaster
+    integer :: i,intlen,rank,size,ierr,istart,iend
+    call MPI_Init(ierr)
+    call MPI_Comm_size(MPI_COMM_WORLD,size,ierr)
+    call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
+    intlen=100000000
+    write (*,*) 'I am node ',rank+1,' out of ',size,' nodes.'
+
+    h=1.d0/intlen
+    istart=(intlen-1)*rank/size
+    iend=(intlen-1)*(rank+1)/size
+    write (*,*) 'start is ', istart
+    write (*,*) 'end is ', iend
+    a=0.d0
+    do i=istart,iend
+           x0=i*h
+           x1=(i+1)*h
+           v0=sqrt(1.d0-x0*x0)
+           v1=sqrt(1.d0-x1*x1)
+           a=a+0.5*(v0+v1)*h
+    enddo
+    write (*,*) 'Result from node ',rank+1,' is ',a
+    call MPI_Reduce(a,amaster,1, &
+             MPI_DOUBLE_PRECISION,MPI_SUM,0,MPI_COMM_WORLD,ierr)
+    if (rank.eq.0) then
+           write (*,*) 'Result of integration is ',amaster
+           write (*,*) 'Estimate of Pi is ',amaster*4.d0
+    endif
+    call MPI_Finalize(ierr)
+    stop
+end program testampi
+
+

The program can be compiled by this procedure, using mpif90:

+
module load intel/20.4 openmpi/3.1.6
+mpif90 -Ofast -o testampi testampi.f90
+
+

The program can be run by creating a submit script sub.sh:

+
#!/bin/bash -l
+# execute parallel mpi program in Slurm
+# command: $ sbatch sub.sh
+# Slurm options use the sentinel #SBATCH
+#SBATCH -J mpitest
+#SBATCH -A your_project_name
+#SBATCH -o pi
+#
+# request 5 seconds of run time
+#SBATCH -t 00:00:05
+#
+#SBATCH -p node -n 8
+module load intel/20.4 openmpi/3.1.6
+
+mpirun ./testampi
+
+

Submit it:

+
sbatch sub.sh
+
+

Output from the program on Rackham:

+
I am node             8  out of             8  nodes.
+start is      87499999
+end is      99999999
+I am node             3  out of             8  nodes.
+start is      24999999
+end is      37499999
+I am node             5  out of             8  nodes.
+start is      49999999
+end is      62499999
+I am node             2  out of             8  nodes.
+start is      12499999
+end is      24999999
+I am node             7  out of             8  nodes.
+start is      74999999
+end is      87499999
+I am node             6  out of             8  nodes.
+start is      62499999
+end is      74999999
+I am node             1  out of             8  nodes.
+start is             0
+end is      12499999
+I am node             4  out of             8  nodes.
+start is      37499999
+end is      49999999
+Result from node             8  is    4.0876483237300587E-002
+Result from node             5  is    0.1032052706959522
+Result from node             2  is    0.1226971551244773
+Result from node             3  is    0.1186446918315650
+Result from node             7  is    7.2451466712425514E-002
+Result from node             6  is    9.0559231928350928E-002
+Result from node             1  is    0.1246737119371059
+Result from node             4  is    0.1122902087263801
+Result of integration is    0.7853982201935574
+Estimate of Pi is     3.141592880774230
+
+

OpenMP

+

OpenMP uses threads that use shared memory. OpenMP is supported by both the gcc and intel compilers and in the c/c++ and Fortran languages. Don't mix with OpenMPI whis is an open source library for MPI. OpenMP is built in in all modern compiler libraries.

+

Depending on your preferences load the chosen compiler:

+
module load gcc/12.1.0
+
+

or

+
module load intel/20.4
+
+

C programs using OpenMP

+

Enter the following openmp program in c and save in the file hello_omp.c

+
/* hello.c :  openmp program in c printing a message from each thread */
+#include <stdio.h>
+#include <omp.h>
+int main()
+{
+      int nthreads, tid;
+      #pragma omp parallel private(nthreads, tid)
+      {
+            nthreads = omp_get_num_threads();
+            tid = omp_get_thread_num();
+           printf("From thread %d out of %d, hello, world\n", tid, nthreads);
+    }
+    return 0;
+}
+
+

To compile, enter the command (note the -fopenmp or -qopenmp flag depending on compiler):

+
gcc -fopenmp -o hello_omp hello_omp.c
+
+

or

+
icc qfopenmp -o hello_omp hello_omp.c
+
+

Also here you should add optimization flags such as -fast as appropriate.

+

To run the OpenMP program hello using the batch system, enter the following shell script in the file hello.sh:

+
#!/bin/bash -l
+# hello.sh :  execute parallel openmp program hello on Slurm
+# use openmp
+# command: $ sbatch hello.sh
+# Slurm options use the sentinel #SBATCH
+#SBATCH -J omptest
+#SBATCH -A your_project_name
+#SBATCH -o hello.out
+#
+# request 5 seconds of run time
+#SBATCH -t 00:00:05
+#SBATCH -p node -n 8
+uname -n
+#Tell the openmp program to use 8 threads
+export OMP_NUM_THREADS=8
+module load intel/20.4
+# or gcc...
+ulimit -s  $STACKLIMIT
+./hello_omp
+
+

The last line in the script is the command used to start the program.

+

Submit the job to the batch queue:

+
sbatch hello.sh
+
+

The program's output to stdout is saved in the file named at the -o flag. +A test run of the above program yelds the following output file:

+
$ cat hello.out
+r483.uppmax.uu.se
+unlimited
+From thread 0 out of 8, hello, world
+From thread 1 out of 8, hello, world
+From thread 2 out of 8, hello, world
+From thread 3 out of 8, hello, world
+From thread 4 out of 8, hello, world
+From thread 6 out of 8, hello, world
+From thread 7 out of 8, hello, world
+From thread 5 out of 8, hello, world
+
+

Fortran programs using OpenMP

+

Enter the following openmp program in Fortran and save in the file hello_omp.f90

+
PROGRAM HELLO
+INTEGER NTHREADS, TID, OMP_GET_NUM_THREADS, OMP_GET_THREAD_NUM
+! Fork a team of threads giving them their own copies of variables
+!$OMP PARALLEL PRIVATE(NTHREADS, TID)
+! Obtain thread number
+TID = OMP_GET_THREAD_NUM()
+PRINT *, 'Hello World from thread = ', TID
+! Only master thread does this
+IF (TID .EQ. 0) THEN
+ NTHREADS = OMP_GET_NUM_THREADS()
+PRINT *, 'Number of threads = ', NTHREADS
+END IF
+! All threads join master thread and disband
+!$OMP END PARALLEL
+END
+
+

With gcc compiler:

+
gfortran hello_omp.f90 -o hello_omp -fopenmp
+
+

and with Intel compiler:

+
ifort hello_omp.f90 -o hello_omp -qopenmp
+
+

Run with:

+
$ ./hello_omp
+
+ Hello World from thread =            1
+ Hello World from thread =            2
+ Hello World from thread =            0
+ Hello World from thread =            3
+ Number of threads =            4
+
+

A batch file would look similar to the C version, above.

+

Pthreads

+

Pthreads (Posix threads) are more low-level than OpenMP. That means that for a beginner it is easier to get rather expected gain only with a few lines with OpenMP. On the other hand it may be possible to gain more efficiency from your code with pthreads, though with quite some effort. Pthreads is native in c/c++. With additional installation of a POSIX library for Fortran it is possible to run it in there as well.

+

Enter the following program in c and save in the file hello_pthreads.c

+
/* hello.c :  create system pthreads and print a message from each thread */
+#include <stdio.h>
+#include <pthread.h>
+// does not work for setting array length of "tid": const int NTHR = 8;
+// Instead use "#define"
+#define NTHR 8
+int nt = NTHR, tid[NTHR];
+pthread_attr_t attr;
+void *hello(void *id)
+{
+     printf("From thread %d out of %d: hello, world\n", *((int *) id), nt);
+     pthread_exit(0);
+}
+int main()
+{
+    int i, arg1;
+    pthread_t thread[NTHR];
+    /* system threads */
+    pthread_attr_init(&attr);
+    pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
+    /* create threads */
+    for (i = 0; i < nt; i++) {
+          tid[i] = i;
+          pthread_create(&thread[i], &attr, hello, (void *) &tid[i]);
+     }
+    /* wait for threads to complete */
+    for (i = 0; i < nt; i++)
+            pthread_join(thread[i], NULL);
+      return 0;
+}
+
+

To compile, enter the commands

+
module load gcc/10.2.0
+gcc -pthread -o hello_pthread hello_pthread.c
+
+

To run the pthread program hello using the batch system, enter the following shell script in the file hello.sh:

+
#!/bin/bash -l
+# hello.sh :  execute parallel pthreaded program hello on Slurm
+# command: $ sbatch hello.sh
+# Slurm options use the sentinel #SBATCH
+#SBATCH -J pthread
+#SBATCH -A your_project_name
+#SBATCH -o hello.out
+#
+# request 5 seconds of run time
+#SBATCH -t 00:00:05
+# use openmp programming environment
+# to ensure all processors on the same node
+#SBATCH -p node -n 8
+uname -n
+./hello_pthread
+
+

The last line in the script is the command used to start the program. +Submit the job to the batch queue:

+
sbatch hello.sh
+
+

The program's output to stdout is saved in the file named at the -o flag. +A test run of the above program yelds the following output file:

+
$ cat hello.out
+r483.uppmax.uu.se
+From thread 0 out of 8: hello, world
+From thread 4 out of 8: hello, world
+From thread 5 out of 8: hello, world
+From thread 6 out of 8: hello, world
+From thread 7 out of 8: hello, world
+From thread 1 out of 8: hello, world
+From thread 2 out of 8: hello, world
+From thread 3 out of 8: hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/compiling_serial/index.html b/software/compiling_serial/index.html new file mode 100644 index 000000000..59bf4c616 --- /dev/null +++ b/software/compiling_serial/index.html @@ -0,0 +1,3244 @@ + + + + + + + + + + + + + + + + + + + + + + + Compiling serial code - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compiling serial source code

+

For parallel programs, see MPI and OpenMP user guide.

+

Overview

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LanguageCompilerFind guide at ...
CGCCCompile C using GCC
CIntel, iccCompile C using icc
CIntel, icxCompile C using icx
C++GCCCompile C++ using GCC
C++Intel, icpcCompile C++ using icpc
FortranGCCCompile Fortran using GCC
FortranIntel, ifortCompile Fortran using ifort
FortranIntel, ifxCompile Fortran using ifx
JavajavacCompile Java using javac
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/conda/index.html b/software/conda/index.html new file mode 100644 index 000000000..cf1b91d33 --- /dev/null +++ b/software/conda/index.html @@ -0,0 +1,3726 @@ + + + + + + + + + + + + + + + + + + + + + + + Installing with conda - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Conda

+
+Want to see the video 'How to use Conda on Rackham'? +

If you want to see a video how to use Conda on Rackham, +go here

+
+

Install packages or not? Check it

+

Python

+
    +
  • Check python versions: module spider python
  • +
+
+How does that look like? +

It will look similar to this:

+
[sven@rackham1 ~]$ module spider python
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  python:
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+     Versions:
+        python/2.7.6
+        python/2.7.9
+        python/2.7.11
+        python/2.7.15
+        python/3.3
+        python/3.3.1
+        python/3.4.3
+        python/3.5.0
+        python/3.6.0
+        python/3.6.8
+        python/3.7.2
+        python/3.8.7
+        python/3.9.5
+        python/3.10.8
+        python/3.11.4
+        python/3.11.8
+        python/3.12.1
+     Other possible modules matches:
+        Biopython  Boost.Python  GitPython  IPython  Python  biopython  flatbuffers-python  netcdf4-python  protobuf-python  python-parasail  python3  python_GIS_packages  ...
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  To find other possible module matches execute:
+
+      $ module -r spider '.*python.*'
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  For detailed information about a specific "python" package (including how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modules.
+  For example:
+
+     $ module spider python/3.12.1
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+
+
    +
  • load a python version like: module load python/3.10.8
  • +
  • from the Python shell with the import command
  • +
  • from BASH shell with the
      +
    • pip list command
    • +
    • module help python/3.9.5 (or other version) at UPPMAX
    • +
    +
  • +
+

Is it not there, or is it a stand-alone tool? Then proceed!**

+
+

Tip Python packages

+
    +
  • Try Conda first directly on Bianca.
  • +
  • Otherwise, on Rackham, in first case use Pip.
  • +
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.
  • +
  • If you want to keep number of files down, use PyPI (pip).
  • +
+
+

Python packages with pip

+
+Want to see the video 'Load and use Python packages on UPPMAX'? +

If you want to see a video how to load and use Python packages +on the UPPMAX (and HPC2N) HPC clusters, +go here

+
+

See the Python user guide

+

Conda repositories

+

We have mirrored all major non-proprietary Conda repositories (not main, anaconda and r) directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.

+
+

Available Conda channels

+
    +
  • bioconda
  • +
  • biocore
  • +
  • conda-forge
  • +
  • dranew
  • +
  • free
  • +
  • main
  • +
  • pro
  • +
  • qiime2
  • +
  • r
  • +
  • r2018.11
  • +
  • scilifelab-lts
  • +
  • nvidia
  • +
  • pytorch
  • +
+
+ +

Using Conda

+
+

Conda cheat sheet

+
    +
  • +

    List all environments: conda info -e or conda env list

    +
  • +
  • +

    Create a conda environment (it is good to directly define the packages included AND channels do not need to be explicitly mentioned)

    +
    conda create --prefix /some/path/to/env <package1> [<package2> ... ]
    +
    +
      +
    • On our systems the above should replace conda create --name myenvironment ...
    • +
    +
  • +
  • +

    Create a new environment from requirements.txt:

    +
      +
    • conda create --prefix /some/path/to/env --file requirements.txt
    • +
    +
  • +
  • +

    Activate a specific environment: conda activate myenvironment

    +
  • +
  • +

    List packages in present environment: conda list

    +
      +
    • Also pip list will work
    • +
    +
  • +
  • +

    Install additional package from an active environment:

    +
      +
    • conda install somepackage
    • +
    +
  • +
  • +

    Install from certain channel (conda-forge):

    +
      +
    • conda install -c conda-forge somepackage
    • +
    +
  • +
  • +

    Install a specific version: conda install somepackage=1.2.3

    +
      +
    • Install a specific version: conda install somepackage=1.2.3
    • +
    +
  • +
  • +

    Deactivate current environment: conda deactivate

    +
  • +
  • +

    More

    +
  • +
+
+

Your conda settings on Rackham and Bianca

+
    +
  • export CONDA_ENVS_PATH=/a/path/to/a/place/in/your/project/
  • +
+
+

Tip

+
    +
  • You may want to have the same path for all conda environments in the present project
  • +
  • echo "export CONDA_ENVS_PATH=/a/path/to/a/place/in/your/project/" >> ~/.bashrc
      +
    • Example: echo "export CONDA_ENVS_PATH=/proj/<project>/conda" >> ~/.bashrc
    • +
    +
  • +
+
+
+

Warning

+
    +
  • It seems you are required to use this path, ending with the name of your environment, together with --prefix when you install new envronments AND packages also after activating the conda environment! + Like: conda install --prefix $CONDA_ENVS_PATH/<your-environment> ...
  • +
+
+
+

Tip

+
    +
  • REMEMBER TO conda clean -a once in a while to remove unused and unnecessary files
  • +
+
+
+By choice +
    +
  • +

    Run source conda_init.sh to initialise your shell (bash) to be able to run conda activate and conda deactivate etcetera instead of source activate. It will modify (append) your .bashrc file.

    +
  • +
  • +

    When conda is loaded you will by default be in the base environment, which works in the same way as other Conda environments. It is a “best practice” to avoid installing additional packages into your base software environment unless it is very general packages

    +
  • +
+
+

Installing using Conda

+

We have mirrored all major Conda repositories directly on UPPMAX, on +both Rackham and Bianca. These are updated every third day. See above for these conda channels.

+
    +
  • You reach them all by loading the conda module.
  • +
  • You don't have to state the specific channel when using UPPMAX.
  • +
  • Also, you are offline on Bianca which means that the default is --offline, which you can specify if you want to simulate the experience on Rackham.
  • +
+
+

Tip

+

If you need a channel that isn't in our repository, we can easily add it. Just send us a message and we will do it.

+
+

Make a new conda environment

+
+

Tip

+
    +
  • Since python or other packages are dependent on each-other expect solving the versions takes some time.
  • +
  • use an interactive session!
  • +
+
+
    +
  1. +

    Do module load conda

    +
      +
    • This grants you access to the latest version of Conda and all major repositories on all UPPMAX systems.
    • +
    • Check the text output as conda is loaded, especially the first time, see below
    • +
    +
  2. +
  3. +

    Create the Conda environment

    +
      +
    • +

      Example:

      +
      conda create --prefix  $CONDA_ENVS_PATH/python36-env python=3.6 numpy=1.13.1 matplotlib=2.2.2
      +
      +
    • +
    +

    !!! info "The mamba alternative is not needed in newer versions of Conda!

    +
      +
    • +

      It all worked if you get something like this:

      +
      # To activate this environment, use
      +#
      +#     $ conda activate python36-env
      +#
      +# To deactivate an active environment, use
      +#
      +#     $ conda deactivate
      +
      +
    • +
    +
  4. +
  5. +

    Activate the conda environment by source activate if you have not enabled conda activate, see above:

    +
    source activate python36-env
    +
    +
      +
    • +

      You will see that your prompt is changing to start with (python-36-env) to show that you are within an environment.

      +
    • +
    • +

      You can also see the installed packages by:

      +
    • +
    +
    conda list
    +pip list
    +
    +
      +
    • you can also add more packages within the environment by exact version (use =) or latest (?) compatible version:
    • +
    +
    conda install --prefix   $CONDA_ENVS_PATH/python36-env pandas
    +
    +
      +
    • that may have given you pandas=1.1.5 which would be the newest version compatible with python3.6 and numpy=1.13.1
    • +
    +
  6. +
  7. +

    Now do your work!

    +
  8. +
  9. +

    Deactivate with conda deactivate (this will work in any case!)

    +
    (python-36-env) $ conda deactivate
    +
    +
  10. +
+
+

Warning

+
    +
  • Conda is known to create many small files. + Your diskspace is not only limited in gigabytes, + but also in number of files (typically 300000 in $HOME).
  • +
  • Check your disk usage and quota limit with uquota
  • +
  • Do a conda clean -a once in a while to remove unused and unnecessary files
  • +
+
+

Working with Conda environments defined by files

+
    +
  • +

    Create an environment based on dependencies given in an environment + file:

    +
    conda env create --file environment.yml
    +
    +
  • +
  • +

    Create file from present conda environment:

    +
    conda env export > environment.yml
    +
    +
  • +
+

environments.yml (for conda) is a yaml-file which looks like this:

+
name: my-environment
+channels:        # not needed on bianca
+- defaults
+dependencies:
+- numpy
+- matplotlib
+- pandas
+- scipy
+
+

environments.yml with versions:

+
name: my-environment
+channels:            #not needed on bianca
+- defaults
+dependencies:
+- python=3.7
+- numpy=1.18.1
+- matplotlib=3.1.3
+- pandas=1.1.2
+- scipy=1.6.2
+
+
+

More on dependencies

+ +
+
+

keypoints

+
    +
  • +

    Conda is an installer of packages but also bigger toolkits

    +
  • +
  • +

    Conda on Bianca is easy since the repos in the most used channels are local.

    +
  • +
  • +

    Conda creates isolated environments not clashing with other installations of python and other versions of packages

    +
  • +
  • +

    Conda environment requires that you install all packages needed by yourself, although automatically.

    +
  • +
  • +

    That is, you cannot load the python module and use the packages therein inside your Conda environment.

    +
  • +
+
+

Conda in batch scripts

+

If you already have setup the CONDA_ENVS_PATH path and run 'conda init bash' a batch script containing a conda environment shall include

+
module load conda
+conda activate <name of environment>
+
+

Packages on Bianca

+

Since we have mirrored conda repositories locally conda will work also on Bianca!

+

First try Conda! There is a mirrored repository with many available packages.

+

If your desired package is not there but available as pip follow the guide below, perhaps , while looking at Bianca user guide and Transit user guide.

+

Make an installation on Rackham and then use the wharf to copy it over to your directory on Bianca.

+

Path on Rackham and Bianca could be ~/.local/lib/python<version>/site-packages/.

+

You may have to:

+

in source directory:

+
cp –a <package_dir> <wharf_mnt_path>
+
+

you may want to tar before copying to include all possible symbolic links:

+
$ tar cfz <tarfile.tar.gz> <package>
+and in target directory (wharf_mnt) on Bianca:
+$ tar xfz <tarfile.tar.gz> #if there is a tar file!
+$ mv –a  <file(s)> ~/.local/lib/python<version>/site-packages/
+
+

If problems arise, send an email to support@uppmax.uu.se and we'll help you.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/containers/index.html b/software/containers/index.html new file mode 100644 index 000000000..11bce89ac --- /dev/null +++ b/software/containers/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + Containers - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Containers

+

Containers allow one to bundle installed software into a file, +with the goal to run software on any platform.

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/cowsay/index.html b/software/cowsay/index.html new file mode 100644 index 000000000..a3bffd641 --- /dev/null +++ b/software/cowsay/index.html @@ -0,0 +1,3142 @@ + + + + + + + + + + + + + + + + + + + cowsay - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

cowsay

+

cowsay is a tool that commonly use as a toy example.

+

Because cowsay is not part of the Linux kernel, +users commonly need to install it. +Or in our case: load a module to use it.

+

cowsay (the tool) is part of the identically-named cowsay +module.

+

Finding the module that +has cowsay installed:

+
module spider cowsay
+
+
+How does that look like? +

You output will look similar to this:

+
[sven@rackham1 ~]$ module spider cowsay
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  cowsay: cowsay/3.03
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+    This module can be loaded directly: module load cowsay/3.03
+
+    Help:
+       cowsay - use cowsay
+
+
+

Loading the latest version of the cowsay module:

+
module load cowsay/3.03
+
+

Now you can run cowsay:

+
cowsay hello
+
+

results in:

+
 _______
+< hello >
+ -------
+        \   ^__^
+         \  (oo)\_______
+            (__)\       )\/\
+                ||----w |
+                ||     ||
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/cram/index.html b/software/cram/index.html new file mode 100644 index 000000000..da61b4f11 --- /dev/null +++ b/software/cram/index.html @@ -0,0 +1,3355 @@ + + + + + + + + + + + + + + + + + + + + + + + Cram - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using CRAM to compress BAM files

+

Introduction

+

Biological data is being produced at a higher rate each day, and it is a challenge to store it all somewhere

+

The bioinformatics community is trying to keep up with the growing data amounts, and new file formats is part of this evolution. The BAM format was a huge success due to its ability to compress aligned reads by ~50-80% of their original size, but even that is not sustainable in the long run.

+

CRAM is a new program that can compress SAM/BAM files even more, which makes it suitable for long-term storage. We think this format will become more common, and that it will be supported by most tools, like the BAM format is today.

+

There are a couple of options you can give to CRAM that will make it behave differently. Even more about the different options on the developers homepage.

+

Lossless compression: When converting BAM -> CRAM -> BAM, the final BAM file will look identical to the initial BAM file.

+

Lossy compression: You can specify how to deal with the quality scores in a multitude of different way. To cite the creators of CRAM:

+

"Bam2Cram allows to specify lossy model via a string which can be composed of one or more words separated by '-'. +Each word is read or base selector and quality score treatment, which can be binning (Illumina 8 bins) or full scale (40 values).

+

Here are some examples:

+
    +
  • N40-D8 - preserve quality scores for non-matching bases with full precision, and bin quality scores for positions flanking deletions.
  • +
  • m5 - preserve quality scores for reads with mapping quality score lower than 5
  • +
  • R40X10-N40 - preserve non-matching quality scores and those matching with coverage lower than 10
  • +
  • *8 - bin all quality scores
  • +
+

Selectors:

+
    +
  • R - bases matching the reference sequence N aligned bases mismatching the reference, this only applies to 'M', '=' (EQ) or 'X' BAM cigar elements.
  • +
  • U - unmapped read
  • +
  • Pn - pileup: capture all bases at a given position on the reference if there are at least n mismatches D read positions flanking a deletion
  • +
  • Mn - reads with mapping quality score higher than n
  • +
  • mn - reads with mapping quality score lower than n
  • +
  • I - insertions
  • +
  • * - all
  • +
+

By default no quality scores will be preserved.

+

Illumina 8-binning scheme:

+
0, 1, 6, 6, 6, 6, 6, 6, 6, 6, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+15, 22, 22, 22, 22, 22, 27, 27, 27, 27, 27, 33, 33, 33, 33, 33, 37,
+37, 37, 37, 37, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
+40, 40, 40, 40, 40, 40"
+
+

Illumina's white paper on the matter

+

Compression rate

+

So, how much compression are we talking about here? Here are the results of a test with a 1.9 GB BAM file (7.4 GB SAM format).

+

CRAM COMPRESSION RATE

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
File formatFile size (GB)
SAM7.4
BAM1.9
CRAM lossless1.4
CRAM 8 bins0.8
CRAM no quality scores0.26
+

Graph showing the content of the above table

+

Examples

+

Lossless compression of a BAM file

+

Lossless compression means that the BAM file will be identical before and after compression/decompression The downside of this is that the produced CRAM file will be larger since if has to save each and every quality score. To make a lossless compression, use the following command (can also be written as a single line by removing the backslashes):

+
$ module load bioinfo-tools cramtools
+$ java -jar $CRAM_HOME/cram.jar cram \
+-I file.bam \
+-O file.cram \
+-R ref.fa \
+--capture-all-tags \
+--lossless-quality-score
+
+

The important parts here are:

+
    +
  • -I which means the input file (name of the BAM file to be compressed).
  • +
  • -O which means the output file (name of the new compressed CRAM file).
  • +
  • -R which means the reference file (the FASTA reference to be used. Must be the same when decompressing).
  • +
  • --capture-all-tags which means that all the tags in the BAM file will be saved.
  • +
  • --lossless-quality-score which means the quality scores will be preserved.
  • +
+

CRAM assumed you have indexed your reference genome using e.g. samtools faidx, i.e. that you will have both ref.fa and ref.fa.fai (Note the index name: ref.fa.fai, NOT ref.fai)

+

To decompress the CRAM file to a BAM file again, use this command (can also be written as a single line by removing the backslashes):

+
$ module load bioinfo-tools cramtools
+$ java -jar $CRAM_HOME/cram.jar bam \
+-I file.cram \
+-O file.bam \
+-R ref.fa
+
+

If you had NM or MD tags in your original BAM file, you have to specify that they should be added in the BAM file that is to be created by adding

+
--calculate-md-tag
+and/or
+--calculate-nm-tag
+
+

to the command.

+

Lossy compression of a BAM file

+

The motivation to use a lossy compression is that the compression ratio will be much larger, i.e. the cram file will be much smaller. The best compression ratio is reached, naturally, when the quality scores are removed all together. This does have an impact on future analysis such as SNP calling, so the trick is, as usual, to find a good balance.

+

Illumina has started with a practice called binning. That means that instead of having 40 unique quality scores, you put similar values into bins. Illumina thought 8 bins would get the job done, and that is what CRAM recommends. See this page's introduction for more details about the bins.

+

To compress your BAM file and binning the quality scores in the same way as Illumina, use this command (can also be written as a single line by removing the backslashes):

+
$ module load bioinfo-tools cramtools
+$ java -jar $CRAM_HOME/cram.jar cram \
+-I file.bam \
+-O file.cram \
+-R ref.fa \
+--capture-all-tags \
+--lossy-quality-score-spec \*8
+
+

The important parts here are:

+
    +
  • -I which means the input file (name of the BAM file to be compressed).
  • +
  • -O which means the output file (name of the new compressed BRAM file).
  • +
  • -R which means the reference file (the FASTA reference to be used. Must be the same when decompressing.).
  • +
  • --capture-all-tags which means that all the tags in the BAM file will be saved.
  • +
  • --lossy-quality-score-spec *8 which means the quality scores will be binned into 8 bins the Illumina way. (Notice that we need to apply a "\" before the "8" as your shell Bash will otherwise expand this expression if you'd happen to have any filenames ending with eights in the current directory.)
  • +
+

To decompress the CRAM file to a BAM file again, use this command (can also be written as a single line by removing the backslashes):

+
$ module load bioinfo-tools cramtools
+$ java -jar $CRAM_HOME/cram.jar bam \
+-I file.cram \
+-O file.bam \
+-R ref.fa
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container/index.html b/software/create_singularity_container/index.html new file mode 100644 index 000000000..e6c244038 --- /dev/null +++ b/software/create_singularity_container/index.html @@ -0,0 +1,3159 @@ + + + + + + + + + + + + + + + + + + + Creating a Singularity container - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Creating a Singularity container

+

There are many ways to create a Singularity container.

+

How and where to build?

+

Here is a decision tree on how and where to build a Singularity container.

+
flowchart TD
+  where_to_build[Where to build my Singularity container?]
+  where_to_build --> have_linux
+  have_linux[Do you have Linux with sudo rights and Singularity installed?]
+  build_short[Is the build short?]
+  use_linux(Build on Linux computer with sudo rights)
+  use_remote_builder_website(Build using Sylabs remote builder website)
+  use_remote_builder_rackham(Build using Sylabs remote builder from Rackham)
+
+  have_linux --> |yes| use_linux
+  have_linux --> |no| build_short
+  build_short --> |yes| use_remote_builder_website
+  build_short --> |yes| use_remote_builder_rackham
+ + + + + + + + + + + + + + + + + + + + + +
How and whereFeatures
Local LinuxEasiest for Linux users, can do longer builds
Remote builder from websiteEasiest for non-Linux users, short builds only
Remote builder from RackhamCan do longer builds
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_for_r_package/index.html b/software/create_singularity_container_for_r_package/index.html new file mode 100644 index 000000000..cd46dc3ee --- /dev/null +++ b/software/create_singularity_container_for_r_package/index.html @@ -0,0 +1,3397 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container for an R package - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container for an R package

+

There are multiple ways how to create a Singularity container.

+

This page shows how to create a Singularity container for an R package.

+

Although the R_Packages module +has thousands of packages, sometimes you need a package from GitHub.

+

Procedure

+
+Prefer a video? +

See the video 'Create a Singularity container for an R package on GitHub'

+
+

The hardest part of this procedure may be to have +Linux with Singularity installed on a computer where you have +super-user rights.

+

The most important things for creating a Singularity container +is to start with a good container.

+

1. Create a Singularity script

+

Create a file called Singularity (this is the recommended filename +for Singularity scripts) with the following content:

+
Bootstrap: docker
+From: rocker/tidyverse
+
+%post
+    # From https://github.com/brucemoran/Singularity/blob/8eb44591284ffb29056d234c47bf8b1473637805/shub/bases/recipe.CentOs7-R_3.5.2#L21
+    echo 'export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C' >> $SINGULARITY_ENVIRONMENT
+
+    Rscript -e 'install.packages(c("remotes", "devtools"))'
+    Rscript -e 'remotes::install_github("bmbolstad/preprocessCore")'
+
+%runscript
+Rscript "$@"
+
+

This example script installs the R package hosted on GitHub at +https://github.com/bmbolstad/preprocessCore. +Replace the R package to suit your needs.

+

2. Build the Singularity container

+

Here is how you create a Singularity container called my_container.sif from the Singularity script:

+
sudo singularity build my_container.sif Singularity
+
+

Which will build a Singularity container called my_container.sif.

+
+How does that look like? +

You output will be similar to this:

+
sven@sven-N141CU:~/temp$ sudo singularity build my_container.sif Singularity 
+INFO:    Starting build...
+INFO:    Fetching OCI image...
+307.6MiB / 307.6MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+30.9MiB / 30.9MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+28.2MiB / 28.2MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+261.1MiB / 261.1MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+193.7MiB / 193.7MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+26.3MiB / 26.3MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+288.7KiB / 288.7KiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+INFO:    Extracting OCI image...
+INFO:    Inserting Singularity configuration...
+INFO:    Running post scriptlet
++ echo export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C
++ Rscript -e install.packages(c("remotes", "devtools"))
+Installing packages into ‘/usr/local/lib/R/site-library’
+(as ‘lib’ is unspecified)
+trying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/remotes_2.5.0.tar.gz'
+Content type 'binary/octet-stream' length 436043 bytes (425 KB)
+==================================================
+downloaded 425 KB
+
+trying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/devtools_2.4.5.tar.gz'
+Content type 'binary/octet-stream' length 435688 bytes (425 KB)
+==================================================
+downloaded 425 KB
+
+* installing *binary* package ‘remotes’ ...
+* DONE (remotes)
+* installing *binary* package ‘devtools’ ...
+* DONE (devtools)
+
+The downloaded source packages are in
+ ‘/tmp/Rtmpow1CFQ/downloaded_packages’
++ Rscript -e remotes::install_github("bmbolstad/preprocessCore")
+Downloading GitHub repo bmbolstad/preprocessCore@HEAD
+── R CMD build ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
+  checking for file ‘/tmp/Rtmpx5C1XE/remotes13a1238df5bce/bmbolstad-preprocessCore-33ccbd9/DESCRIPTION’ (337ms)
+  preparing ‘preprocessCore’:
+  checking DESCRIPTION meta-information ...
+  cleaning src
+  running ‘cleanup’
+  checking for LF line-endings in source and make files and shell scripts
+  checking for empty or unneeded directories
+  building ‘preprocessCore_1.61.0.tar.gz’
+
+Installing package into ‘/usr/local/lib/R/site-library’
+(as ‘lib’ is unspecified)
+* installing *source* package ‘preprocessCore’ ...
+** using staged installation
+'config' variable 'CPP' is defunct
+checking for gcc... gcc
+checking whether the C compiler works... yes
+checking for C compiler default output file name... a.out
+checking for suffix of executables... 
+checking whether we are cross compiling... no
+checking for suffix of object files... o
+checking whether we are using the GNU C compiler... yes
+checking whether gcc accepts -g... yes
+checking for gcc option to accept ISO C89... none needed
+checking how to run the C preprocessor... gcc -E
+checking for library containing pthread_create... none required
+checking for grep that handles long lines and -e... /usr/bin/grep
+checking for egrep... /usr/bin/grep -E
+checking for ANSI C header files... yes
+checking for sys/types.h... yes
+checking for sys/stat.h... yes
+checking for stdlib.h... yes
+checking for string.h... yes
+checking for memory.h... yes
+checking for strings.h... yes
+checking for inttypes.h... yes
+checking for stdint.h... yes
+checking for unistd.h... yes
+checking for stdlib.h... (cached) yes
+checking if PTHREAD_STACK_MIN is defined... yes
+checking if R is using flexiblas... flexiblas not found. preprocessCore threading will not be disabled
+configure: Enabling threading for preprocessCore
+configure: creating ./config.status
+config.status: creating src/Makevars
+** libs
+using C compiler: ‘gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0’
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_colSummarize.c -o R_colSummarize.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmd_interfaces.c -o R_plmd_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmr_interfaces.c -o R_plmr_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_rlm_interfaces.c -o R_rlm_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subColSummarize.c -o R_subColSummarize.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subrcModel_interfaces.c -o R_subrcModel_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg.c -o avg.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg_log.c -o avg_log.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c biweight.c -o biweight.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c init_package.c -o init_package.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c lm.c -o lm.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_avg.c -o log_avg.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_median.c -o log_median.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c matrix_functions.c -o matrix_functions.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median.c -o median.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median_log.c -o median_log.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c medianpolish.c -o medianpolish.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmd.c -o plmd.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmr.c -o plmr.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c psi_fns.c -o psi_fns.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c qnorm.c -o qnorm.o
+qnorm.c: In function ‘qnorm_c_l’:
+qnorm.c:595:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+  595 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+  596 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c:616:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+  616 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+  617 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c: In function ‘qnorm_c_determine_target_l’:
+qnorm.c:2004:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+ 2004 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+ 2005 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c: In function ‘qnorm_c_determine_target_via_subset_l’:
+qnorm.c:2604:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+ 2604 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+ 2605 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm.c -o rlm.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_anova.c -o rlm_anova.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_se.c -o rlm_se.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_background4.c -o rma_background4.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_common.c -o rma_common.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c weightedkerneldensity.c -o weightedkerneldensity.o
+gcc -shared -L/usr/local/lib/R/lib -L/usr/local/lib -o preprocessCore.so R_colSummarize.o R_plmd_interfaces.o R_plmr_interfaces.o R_rlm_interfaces.o R_subColSummarize.o R_subrcModel_interfaces.o avg.o avg_log.o biweight.o init_package.o lm.o log_avg.o log_median.o matrix_functions.o median.o median_log.o medianpolish.o plmd.o plmr.o psi_fns.o qnorm.o rlm.o rlm_anova.o rlm_se.o rma_background4.o rma_common.o weightedkerneldensity.o -llapack -lblas -lgfortran -lm -lquadmath -L/usr/local/lib/R/lib -lR
+installing to /usr/local/lib/R/site-library/00LOCK-preprocessCore/00new/preprocessCore/libs
+** R
+** inst
+** byte-compile and prepare package for lazy loading
+** help
+*** installing help indices
+** building package indices
+** testing if installed package can be loaded from temporary location
+** checking absolute paths in shared objects and dynamic libraries
+** testing if installed package can be loaded from final location
+** testing if installed package keeps a record of temporary installation path
+* DONE (preprocessCore)
+INFO:    Adding runscript
+INFO:    Creating SIF file...
+INFO:    Build complete: my_container.sif
+
+
+

3. Create an R script for the container to use

+

Here we create an R script of the container to use.

+

Here is such an example R script, that prints the contents of the +preprocessCore::colSummarizeAvgLog function:

+
preprocessCore::colSummarizeAvgLog
+
+

Save this R script, for example, as my_r_script.R.

+

4. Use the Singularity container on an R script

+

Run the container on the R script:

+
./my_container.sif my_r_script.R
+
+
+How does that look like? +

You output will be similar to this:

+
sven@sven-N141CU:~/temp$ ./my_container.sif my_r_script.R
+function (y) 
+{
+    if (!is.matrix(y)) 
+        stop("argument should be matrix")
+    if (!is.double(y) & is.numeric(y)) 
+        y <- matrix(as.double(y), dim(y)[1], dim(y)[2])
+    else if (!is.numeric(y)) 
+        stop("argument should be numeric matrix")
+    .Call("R_colSummarize_avg_log", y, PACKAGE = "preprocessCore")
+}
+<bytecode: 0x62d460a4d470>
+<environment: namespace:preprocessCore>
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_a_singularity_script/index.html b/software/create_singularity_container_from_a_singularity_script/index.html new file mode 100644 index 000000000..58a65e847 --- /dev/null +++ b/software/create_singularity_container_from_a_singularity_script/index.html @@ -0,0 +1,3126 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from a Singularity script - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from a Singularity script

+

There are multiple ways how to create a Singularity container.

+

This page shows how to create a Singularity container from a Singularity script.

+

These are the procedures:

+ + + + + + + + + + + + + + + + + +
ProcedureDescription
using a websiteEasiest for Mac and Windows users
using a computer with Linux where you have super-user rightHarder for Mac and Windows users
+

Note that users have no super-user rights on our UPPMAX clusters.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_a_singularity_script_on_linux/index.html b/software/create_singularity_container_from_a_singularity_script_on_linux/index.html new file mode 100644 index 000000000..042be8349 --- /dev/null +++ b/software/create_singularity_container_from_a_singularity_script_on_linux/index.html @@ -0,0 +1,3335 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from a Singularity script on a computer with Linux where you have super-user rights - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from a Singularity script on a computer with Linux where you have super-user rights

+

There are multiple ways how to create a Singularity container.

+

This page shows how to do so using a computer with Linux where you have super-user rights.

+

Note that users have no super-user rights on our UPPMAX clusters.

+

Procedure

+

1. Save the script to a Singularity file

+

Save the script as a file called Singularity (this is the recommended filename +for Singularity scripts).

+
+Do you have an example Singularity script? +

Yes! Here is an example Singularity script:

+
Bootstrap: docker
+From: rocker/tidyverse
+
+%post
+    # From https://github.com/brucemoran/Singularity/blob/8eb44591284ffb29056d234c47bf8b1473637805/shub/bases/recipe.CentOs7-R_3.5.2#L21
+    echo 'export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C' >> $SINGULARITY_ENVIRONMENT
+
+    Rscript -e 'install.packages(c("remotes", "devtools"))'
+    Rscript -e 'remotes::install_github("bmbolstad/preprocessCore")'
+
+%runscript
+Rscript "$@"
+
+
+

2. Build the Singularity container

+

Here is how you create a Singularity container called my_container.sif from the Singularity script:

+
sudo singularity build my_container.sif Singularity
+
+

Which will build a Singularity container called my_container.sif.

+
+How does that look like? +

You output will be similar to this:

+
sven@sven-N141CU:~/temp$ sudo singularity build my_container.sif Singularity 
+INFO:    Starting build...
+INFO:    Fetching OCI image...
+307.6MiB / 307.6MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+30.9MiB / 30.9MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+28.2MiB / 28.2MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+261.1MiB / 261.1MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+193.7MiB / 193.7MiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+26.3MiB / 26.3MiB [==================================================================================================================================================] 100 % 0.0 b/s 0s
+288.7KiB / 288.7KiB [================================================================================================================================================] 100 % 0.0 b/s 0s
+INFO:    Extracting OCI image...
+INFO:    Inserting Singularity configuration...
+INFO:    Running post scriptlet
++ echo export LANG=en_US.UTF-8 LANGUAGE=C LC_ALL=C LC_CTYPE=C LC_COLLATE=C  LC_TIME=C LC_MONETARY=C LC_PAPER=C LC_MEASUREMENT=C
++ Rscript -e install.packages(c("remotes", "devtools"))
+Installing packages into ‘/usr/local/lib/R/site-library’
+(as ‘lib’ is unspecified)
+trying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/remotes_2.5.0.tar.gz'
+Content type 'binary/octet-stream' length 436043 bytes (425 KB)
+==================================================
+downloaded 425 KB
+
+trying URL 'https://p3m.dev/cran/__linux__/jammy/latest/src/contrib/devtools_2.4.5.tar.gz'
+Content type 'binary/octet-stream' length 435688 bytes (425 KB)
+==================================================
+downloaded 425 KB
+
+* installing *binary* package ‘remotes’ ...
+* DONE (remotes)
+* installing *binary* package ‘devtools’ ...
+* DONE (devtools)
+
+The downloaded source packages are in
+ ‘/tmp/Rtmpow1CFQ/downloaded_packages’
++ Rscript -e remotes::install_github("bmbolstad/preprocessCore")
+Downloading GitHub repo bmbolstad/preprocessCore@HEAD
+── R CMD build ────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
+  checking for file ‘/tmp/Rtmpx5C1XE/remotes13a1238df5bce/bmbolstad-preprocessCore-33ccbd9/DESCRIPTION’ (337ms)
+  preparing ‘preprocessCore’:
+  checking DESCRIPTION meta-information ...
+  cleaning src
+  running ‘cleanup’
+  checking for LF line-endings in source and make files and shell scripts
+  checking for empty or unneeded directories
+  building ‘preprocessCore_1.61.0.tar.gz’
+
+Installing package into ‘/usr/local/lib/R/site-library’
+(as ‘lib’ is unspecified)
+* installing *source* package ‘preprocessCore’ ...
+** using staged installation
+'config' variable 'CPP' is defunct
+checking for gcc... gcc
+checking whether the C compiler works... yes
+checking for C compiler default output file name... a.out
+checking for suffix of executables... 
+checking whether we are cross compiling... no
+checking for suffix of object files... o
+checking whether we are using the GNU C compiler... yes
+checking whether gcc accepts -g... yes
+checking for gcc option to accept ISO C89... none needed
+checking how to run the C preprocessor... gcc -E
+checking for library containing pthread_create... none required
+checking for grep that handles long lines and -e... /usr/bin/grep
+checking for egrep... /usr/bin/grep -E
+checking for ANSI C header files... yes
+checking for sys/types.h... yes
+checking for sys/stat.h... yes
+checking for stdlib.h... yes
+checking for string.h... yes
+checking for memory.h... yes
+checking for strings.h... yes
+checking for inttypes.h... yes
+checking for stdint.h... yes
+checking for unistd.h... yes
+checking for stdlib.h... (cached) yes
+checking if PTHREAD_STACK_MIN is defined... yes
+checking if R is using flexiblas... flexiblas not found. preprocessCore threading will not be disabled
+configure: Enabling threading for preprocessCore
+configure: creating ./config.status
+config.status: creating src/Makevars
+** libs
+using C compiler: ‘gcc (Ubuntu 11.4.0-1ubuntu1~22.04) 11.4.0’
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_colSummarize.c -o R_colSummarize.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmd_interfaces.c -o R_plmd_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_plmr_interfaces.c -o R_plmr_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_rlm_interfaces.c -o R_rlm_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subColSummarize.c -o R_subColSummarize.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c R_subrcModel_interfaces.c -o R_subrcModel_interfaces.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg.c -o avg.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c avg_log.c -o avg_log.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c biweight.c -o biweight.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c init_package.c -o init_package.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c lm.c -o lm.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_avg.c -o log_avg.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c log_median.c -o log_median.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c matrix_functions.c -o matrix_functions.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median.c -o median.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c median_log.c -o median_log.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c medianpolish.c -o medianpolish.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmd.c -o plmd.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c plmr.c -o plmr.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c psi_fns.c -o psi_fns.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c qnorm.c -o qnorm.o
+qnorm.c: In function ‘qnorm_c_l’:
+qnorm.c:595:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+  595 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+  596 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c:616:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+  616 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+  617 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c: In function ‘qnorm_c_determine_target_l’:
+qnorm.c:2004:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+ 2004 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+ 2005 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+qnorm.c: In function ‘qnorm_c_determine_target_via_subset_l’:
+qnorm.c:2604:63: warning: format ‘%d’ expects argument of type ‘int’, but argument 2 has type ‘size_t’ {aka ‘long unsigned int’} [-Wformat=]
+ 2604 |          error("ERROR; return code from pthread_join(thread #%d) is %d, exit status for thread was %d\n",
+      |                                                              ~^
+      |                                                               |
+      |                                                               int
+      |                                                              %ld
+ 2605 |                i, returnCode, *((int *) status));
+      |                ~                                               
+      |                |
+      |                size_t {aka long unsigned int}
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm.c -o rlm.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_anova.c -o rlm_anova.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rlm_se.c -o rlm_se.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_background4.c -o rma_background4.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c rma_common.c -o rma_common.o
+gcc -I"/usr/local/lib/R/include" -DNDEBUG -I/usr/local/include  -I/usr/local/include   -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -DPACKAGE_NAME=\"\" -DPACKAGE_TARNAME=\"\" -DPACKAGE_VERSION=\"\" -DPACKAGE_STRING=\"\" -DPACKAGE_BUGREPORT=\"\" -DPACKAGE_URL=\"\" -DSTDC_HEADERS=1 -DHAVE_SYS_TYPES_H=1 -DHAVE_SYS_STAT_H=1 -DHAVE_STDLIB_H=1 -DHAVE_STRING_H=1 -DHAVE_MEMORY_H=1 -DHAVE_STRINGS_H=1 -DHAVE_INTTYPES_H=1 -DHAVE_STDINT_H=1 -DHAVE_UNISTD_H=1 -DHAVE_STDLIB_H=1 -DUSE_PTHREADS=1 -fpic  -g -O2 -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -g  -c weightedkerneldensity.c -o weightedkerneldensity.o
+gcc -shared -L/usr/local/lib/R/lib -L/usr/local/lib -o preprocessCore.so R_colSummarize.o R_plmd_interfaces.o R_plmr_interfaces.o R_rlm_interfaces.o R_subColSummarize.o R_subrcModel_interfaces.o avg.o avg_log.o biweight.o init_package.o lm.o log_avg.o log_median.o matrix_functions.o median.o median_log.o medianpolish.o plmd.o plmr.o psi_fns.o qnorm.o rlm.o rlm_anova.o rlm_se.o rma_background4.o rma_common.o weightedkerneldensity.o -llapack -lblas -lgfortran -lm -lquadmath -L/usr/local/lib/R/lib -lR
+installing to /usr/local/lib/R/site-library/00LOCK-preprocessCore/00new/preprocessCore/libs
+** R
+** inst
+** byte-compile and prepare package for lazy loading
+** help
+*** installing help indices
+** building package indices
+** testing if installed package can be loaded from temporary location
+** checking absolute paths in shared objects and dynamic libraries
+** testing if installed package can be loaded from final location
+** testing if installed package keeps a record of temporary installation path
+* DONE (preprocessCore)
+INFO:    Adding runscript
+INFO:    Creating SIF file...
+INFO:    Build complete: my_container.sif
+
+
+

3. Use the container

+

How to use a container, depends on what it does.

+

Here are some thing to try:

+

Run the container without arguments, in the hope of getting a clear error message with instructions:

+
./my_container.sif
+
+

Run the container in the hope of seeing its documentation:

+
./my_container.sif --help
+
+

Run the container on the local folder, in the hope of getting a clear error message with instructions:

+
./my_container.sif .
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_a_singularity_script_using_remote_builder/index.html b/software/create_singularity_container_from_a_singularity_script_using_remote_builder/index.html new file mode 100644 index 000000000..b2f1c238b --- /dev/null +++ b/software/create_singularity_container_from_a_singularity_script_using_remote_builder/index.html @@ -0,0 +1,3243 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from a Singularity script using a website - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from a Singularity script using a website

+

There are multiple ways how to create a Singularity container.

+

This page shows how to do so using a website

+

Procedure

+

1. Go to to Sylabs website

+

Go to the Sylabs website

+
+How does that look like? +

The Sylabs website looks similar to this:

+

The Sylabs website

+
+

2. Got to the Sylabs Singularity Container Services website

+

On the Sylabs website, +click 'Products | Singularity Container Services'

+
+Where to click? +

Click here:

+

The Sylabs website

+
+

You will be takes to the 'Singularity Container Services'.

+
+How does that look like? +

The Singularity Container Services website looks similar to this:

+

The Singularity Container Services website

+
+

3. Sign in or sign up

+

At the 'Singularity Container Services' website, click 'Sign Up' or 'Sign In'

+
+How does signing in look like? +

Signing in looks similar to this:

+

Signing in to the Singularity Container Services

+
+

You are now logged in at the 'Singularity Container Services':

+
+How does that look like? +

The Singularity Container Services looks similar to this after logging in:

+

Logged in

+
+

4. Go to the remote builder

+

Click on 'Remote builder'.

+
+Where to click? +

Click here:

+

Here is where you can click on 'Remote builder'

+
+

5. Setup the remote builder

+

The remote builder shows a Singularity script and some default settings.

+
+How does that look like? +

The remote builder's default settings look similar to this:

+

The remote builder's default settings

+
+

Make the following changes:

+
    +
  • paste your Singularity script in the text box
  • +
  • change Repository to a valid name (as indicated), for example, as default/my_container
  • +
+
+How does that look like? +

The remote builder with modified values looks similar to this:

+

Filled in values

+
+

6. Let the container be built

+

Click 'Submit Build'.

+
+Where to click? +

Click here:

+

Click 'Submit Build'

+
+

The building will start.

+
+How does that look like? +

A build that has just started looks similar to this:

+

Building in progress

+
+

After a while the building will be done.

+
+How does that look like? +

A build that has finished looks similar to this:

+

Building done

+
+

7. Download the container

+

There are multiple ways to download your Singularity container:

+
    +
  • Download from the website: click on 'View image', + then scroll down and click 'Download'
  • +
+
+How does that look like? +

Click on 'View image' here:

+

Click on 'View image'

+

The 'View image' page looks similar to this:

+

View image

+

At the 'View image' page, scroll down to find the 'Download' button:

+

View image and click on Download

+
+
    +
  • Use a singularity pull
  • +
+

For example:

+
singularity pull library://sven/default/my_container
+
+
+How does that look like? +

For example:

+
$ singularity pull library://pontus/default/sortmerna:3.0.3
+WARNING: Authentication token file not found : Only pulls of public images will succeed
+INFO:    Downloading library image
+ 65.02 MiB / 65.02 MiB [=========================================================================================================================================] 100.00% 30.61 MiB/s 2s
+
+
+

8. Use the container

+

How to use a container, depends on what it does.

+

Here are some thing to try:

+

Run the container without arguments, in the hope of getting a clear error message with instructions:

+
./my_container.sif
+
+

Run the container in the hope of seeing its documentation:

+
./my_container.sif --help
+
+

Run the container on the local folder, in the hope of getting a clear error message with instructions:

+
./my_container.sif .
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/index.html b/software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/index.html new file mode 100644 index 000000000..cfb560dfe --- /dev/null +++ b/software/create_singularity_container_from_a_singularity_script_using_remote_builder_from_rackham/index.html @@ -0,0 +1,3184 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from a Singularity script using a remote build from Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from a Singularity script using a remote build from Rackham

+

There are multiple ways how to create a Singularity container.

+

This page shows how to do so using a remote build from Rackham.

+

Building images on Rackham

+

On Rackham, the singularity capabilities are instead provided by Apptainer. The differences are beyond the scope of this material, but you can safely assume you are working with Singularity. Apptainer, also allows you to build containers without sudo/administrative rights. In most of the cases, you can simply start building directly without sudo i.e. singularity build myimage.img examples/ubuntu.def. Here are some precautions that will allow you to safely build images on Rackham.

+
# Change to fit your account
+PRJ_DIR=/crex/uppmax2022-0-00
+
+# Singularity
+export SINGULARITY_CACHEDIR=${PRJ_DIR}/nobackup/SINGULARITY_CACHEDIR
+export SINGULARITY_TMPDIR=${PRJ_DIR}/nobackup/SINGULARITY_TMPDIR
+mkdir -p $SINGULARITY_CACHEDIR $SINGULARITY_TMPDIR
+
+# Apptainer
+export APPTAINER_CACHEDIR=${PRJ_DIR}/nobackup/SINGULARITY_CACHEDIR
+export APPTAINER_TMPDIR=${PRJ_DIR}/nobackup/SINGULARITY_TMPDIR
+mkdir -p $APPTAINER_CACHEDIR $APPTAINER_TMPDIR
+
+# Disabling cache completelly - perfect when you only need to pull containers
+# export SINGULARITY_DISABLE_CACHE=true
+# export APPTAINER_DISABLE_CACHE=true
+
+

Procedure

+

The remote builder service provided by Sylabs also supports remote builds through an API. This means you can call on it from the shell at UPPMAX.

+

Using this service also requires you to register/log in to the Sylabs cloud service. To use this, simply run

+
singularity remote login SylabsCloud
+
+

and you should see

+
Generate an API Key at https://cloud.sylabs.io/auth/tokens, and paste here:
+API Key:
+
+

if you visit that link and give a name, a text-token will be created for you. Copy and paste this to the prompt at UPPMAX. You should see

+
INFO: API Key Verified!
+
+

once you've done this, you can go on and build images almost as normal, using commands like

+
singularity build --remote testcontainer.sif testdefinition.def
+
+

which will build the container from testdefinition.def remotely and transfer it to your directory, storing it as testcontainer.sif.

+
+Could you give an example script? +

A sample job script for running a tool provided in a container may look like

+
#!/bin/bash -l
+#SBATCH -N 1
+#SBATCH -n 1
+#SBATCH -t 0:30:00
+#SBATCH -A your-project
+#SBATCH -p core
+cd /proj/something/containers
+
+singularity exec ./ubuntu.img echo "Hey, I'm running ubuntu"
+singularity exec ./ubuntu.img lsb_release -a
+singularity run ./anotherimage some parameters here
+./yetanotherimage parameters
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_conda/index.html b/software/create_singularity_container_from_conda/index.html new file mode 100644 index 000000000..d0f8056f5 --- /dev/null +++ b/software/create_singularity_container_from_conda/index.html @@ -0,0 +1,3131 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from conda - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from conda

+

There are multiple ways how to create a Singularity container.

+

This page shows how to create a Singularity container from a Singularity +script that uses conda.

+

As an example we use a script that build qiime2:

+
BootStrap: library
+From: centos:7
+
+%runscript
+  . /miniconda/etc/profile.d/conda.sh
+  PATH=$PATH:/miniconda/bin
+  conda activate qiime2-2019.7
+  qiime "$@"
+
+%post
+  yum clean all
+  yum -y update
+  yum -y install wget python-devel
+  cd /tmp
+  wget https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh
+  bash ./Miniconda2-latest-Linux-x86_64.sh -b -p /miniconda
+  /miniconda/bin/conda update -y conda
+  wget https://data.qiime2.org/distro/core/qiime2-2019.7-py36-linux-conda.yml
+  /miniconda/bin/conda env create -n qiime2-2019.7 --file qiime2-2019.7-py36-linux-conda.yml
+  # OPTIONAL CLEANUP
+  rm qiime2-2019.7-py36-linux-conda.yml
+  /miniconda/bin/conda clean -a
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_docker_pull/index.html b/software/create_singularity_container_from_docker_pull/index.html new file mode 100644 index 000000000..a27c1bc5d --- /dev/null +++ b/software/create_singularity_container_from_docker_pull/index.html @@ -0,0 +1,3179 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from a Docker pull - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from a Docker pull

+

There are multiple ways how to create a Singularity container.

+

This page shows how to create a Singularity container from a Docker pull, +such as this one (from here)

+
docker pull lycheeverse/lychee
+
+

Procedure

+
+Prefer a video? +

You can see the procedure below in the video Create a Singularity container from docker pull.

+
+

The hardest part of this procedure may be to have +Linux with Singularity installed on a computer where you have +super-user rights.

+

In this example, we create a Singularity container +for lychee, +a tool to check for broken links in text files.

+

1. Create the Singularity container

+

Here we build a Singularity container from a Docker file:

+
sudo singularity build my_container.sif [location to Docker file]
+
+

The magic is in [location to Docker file].

+

In our case, we have seen the documentation state the command docker pull lycheeverse/lychee +to install this Docker container. Using a docker pull like this, means that +the Docker script is on Docker Hub. +And yes, our Docker script is on Docker Hub!

+

To build a Singularity container from a Docker file on Docker Hub, do:

+
sudo singularity build my_container.sif docker:lycheeverse/lychee
+
+

2. Use the Singularity container

+
./my_container.sif [your command-line arguments]
+
+

For example, in this case:

+
./my_container.sif .
+
+

The . means 'in this folder'.

+

As, in this example, we have created a Singularity container +for lychee, +a tool to check for broken links in text files. +Hence, the full command can be read as +'Check all files in this folder for broken links'.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/create_singularity_container_from_dockerhub/index.html b/software/create_singularity_container_from_dockerhub/index.html new file mode 100644 index 000000000..a06486071 --- /dev/null +++ b/software/create_singularity_container_from_dockerhub/index.html @@ -0,0 +1,3212 @@ + + + + + + + + + + + + + + + + + + + Create a Singularity container from Docker Hub - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create a Singularity container from Docker Hub

+

There are multiple ways how to create a Singularity container.

+

This page shows how to create a Singularity container from a Docker script on Docker Hub.

+

Procedure

+

The hardest part of this procedure may be to have +Linux with Singularity installed on a computer where you have +super-user rights.

+

In this example, we create a Singularity container +for https://github.com/lindenb/jvarkit +with a Docker Hub script at https://hub.docker.com/r/lindenb/jvarkit.

+

1. Create the Singularity container

+

Here we build a Singularity container from a Docker file:

+
sudo singularity build my_container.sif docker:[owner/file]
+
+

The magic is in docker:[owner/file], which for us +becomes docker:lindenb/jvarkit:

+
sudo singularity build my_container.sif docker:lindenb/jvarkit
+
+

In some case, the Singularity container is now created.

+
+How does that look like? +
$ sudo singularity build my_container.sif docker:lindenb/jvarkit
+INFO:    Starting build...
+INFO:    Fetching OCI image...
+28.2MiB / 28.2MiB [================================================================================================================================================] 100 % 2.5 MiB/s 0s
+1.0GiB / 1.0GiB [==================================================================================================================================================] 100 % 2.5 MiB/s 0s
+INFO:    Extracting OCI image...
+INFO:    Inserting Singularity configuration...
+INFO:    Creating SIF file...
+INFO:    Build complete: my_container.sif
+
+
+

1.1 Troubleshooting

+

In our case, however, we get the MANIFEST_UNKNOWN error:

+
[sudo] password for sven: 
+INFO:    Starting build...
+INFO:    Fetching OCI image...
+FATAL:   While performing build: conveyor failed to get: GET https://index.docker.io/v2/lindenb/jvarkit/manifests/latest: MANIFEST_UNKNOWN: manifest unknown; unknown tag=latest
+
+

This means that Docker Hub cannot conclude with Docker script we want to use exactly. +To solve this, we need to find a tag that allows us to find an exact script. +On Docker Hub, we can find the tags for our Docker script ar https://hub.docker.com/r/lindenb/jvarkit/tags.

+
+How does that page look like? +

Here is how https://hub.docker.com/r/lindenb/jvarkit/tags looks like:

+

jvarkit tags

+
+

We can see there that 1b2aedf24 is the tag for the latest version.

+
sudo singularity build my_container.sif docker:lindenb/jvarkit:1b2aedf24
+
+
+How does that look like? +
$ sudo singularity build my_container.sif docker:lindenb/jvarkit
+INFO:    Starting build...
+INFO:    Fetching OCI image...
+28.2MiB / 28.2MiB [================================================================================================================================================] 100 % 2.5 MiB/s 0s
+1.0GiB / 1.0GiB [==================================================================================================================================================] 100 % 2.5 MiB/s 0s
+INFO:    Extracting OCI image...
+INFO:    Inserting Singularity configuration...
+INFO:    Creating SIF file...
+INFO:    Build complete: my_container.sif
+
+
+

Works!

+

2. Use the Singularity container

+
./my_container.sif [your command-line arguments]
+
+

For example, in this case:

+
./my_container.sif --help
+
+

However, this container is setup differently. +From the documentation, one find that this container is used as such:

+
./jvarkit.sif java -jar /opt/jvarkit/dist/jvarkit.jar --help
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/darsync/index.html b/software/darsync/index.html new file mode 100644 index 000000000..1306b58dd --- /dev/null +++ b/software/darsync/index.html @@ -0,0 +1,3476 @@ + + + + + + + + + + + + + + + + + + + + + + + darsync - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Darsync

+

Darsync is a tool used to prepare +your project for transfer to +Dardel. +It has two modes; check mode where it goes through your files +and looks for uncompressed file formats and counts the number of files, +and gen mode where it generates a script file you can submit +to Slurm to do the actual data transfer.

+

The idea is to

+
    +
  1. Run the check mode and mitigate any problems problems it finds.
  2. +
  3. Run the gen mode.
  4. +
  5. Submit the generated script as a job.
  6. +
+
flowchart TD
+  check[Check files]
+  generate[Generate script for transferring files safely]
+  submit[Submit script]
+
+  check --> |no errors| generate
+  check --> |errors that need fixing| check
+  generate --> |no errors| submit
+
+

The Darsync workflow

+
+
+

Temporarily add a PATH

+

Until the darsync script is added to the /sw/uppmax/bin folder +you will have to add its location to your PATH variable manually:

+
export PATH=$PATH:/proj/staff/dahlo/testarea/darsync
+
+
+

TLDR

+

If you know your way around Linux, here is the short version.

+
# run check
+darsync check -l /path/to/dir
+
+# fix warnings on your own
+
+# book a 30 day single core job on Snowy and run the rsync command
+rsync -e "ssh -i ~/.ssh/id_rsa" -acPuv /local/path/to/files/ username@dardel.pdc.kth.se:/remote/path/to/files/
+
+
+How does that look like? +

Running the temporary export gives no output:

+
[sven@rackham4 ~]$ export PATH=$PATH:/proj/staff/dahlo/testarea/darsync
+
+

The folder GitHubs is a folder containing multiple GitHub repositories +and is chosen as the test subject:

+
[sven@rackham4 ~]$ darsync check -l GitHubs/
+
+
+   ____ _   _ _____ ____ _  __
+  / ___| | | | ____/ ___| |/ /
+ | |   | |_| |  _|| |   | ' /
+ | |___|  _  | |__| |___| . \
+  \____|_| |_|_____\____|_|\_\
+
+The check module of this script will recursivly go through
+all the files in, and under, the folder you specify to see if there
+are any improvments you can to do save space and speed up the data transfer.
+
+It will look for file formats that are uncompressed, like fasta and vcf files
+(most uncompressed file formats have compressed variants of them that only
+take up 25% of the space of the uncompressed file).
+
+If you have many small files, e.g. folders with 100 000 or more files,
+it will slow down the data transfer since there is an overhead cost per file
+you want to transfer. Large folders like this can be archived/packed into
+a single file to speed things up.
+GitHubs/git/scripts
+
+
+Checking completed. Unless you got any warning messages above you should be good to go.
+
+Generate a Slurm script file to do the transfer by running this script again, but use the 'gen' option this time.
+See the help message for details, or continue reading the user guide for examples on how to run it.
+https://
+
+darsync gen -h
+
+A file containing file ownership information,
+darsync_GitHubs.ownership.gz
+has been created. This file can be used to make sure that the
+file ownership (user/group) will look the same on Dardel as it does here. See https:// for more info about this.
+
+
+NBIS staff test project code +

Follow the project application procedure as +described here. +Request permission to join project NAISS 2023/22-1027

+
+
+

Check mode

+

To initiate the check mode you run Darsync with the check argument. If you run it without any other arguments it will ask you interactive questions to get the information it needs.

+
# interactive mode
+darsync check
+
+# or give it the path to the directory to check directly
+darsync check -l /path/to/dir
+
+

The warnings you can get are:

+

Too many uncompressed files

+

It looks for files with file endings matching common uncompressed file formats, like .fq, .sam, .vcf, .txt. If the combined file size of these files are above a threshold it will trigger the warning. Most programs that uses these formats can also read the compressed version of them.

+

Examples of how to compress common formats:

+
# fastq/fq/fasta/txt
+gzip file.fq
+
+# vcf
+bgzip file.vcf
+
+# sam
+samtools view -b file.sam > file.bam
+# when the above command is completed successfully:
+# rm file.sam
+
+

For examples on how to compress other file formats, use an internet search engine to look for

+
how to compress <insert file format name> file
+
+

Too many files

+

If a project consists of many small files it will decrease the data transfer speed, as there is an overhead cost to starting and stopping each file transfer. A way around this is to pack all the small files into a single tar archive, so that it only has to start and stop a single time.

+

Example of how to pack a folder and all files in it into a single tar archive.

+
# pack it
+tar -czvf folder.tar.gz /path/to/folder
+
+# unpack it after transfer
+tar -xzvf folder.tar.gz
+
+

Once you have mitigated any warnings you got you are ready to generate the Slurm script that will preform the data transfer.

+

Gen mode

+

To generate a transfer script you will need to supply Darsync with some information. Make sure to have this readily available:

+
    +
  • ID of the UPPMAX project that will run the transfer job, e.g. naiss2099-23-99 +
  • +
  • Path to the folder you want to transfer, .e.g. /proj/naiss2099-23-999
      +
    • Either transfer your whole project, or put the files and folder your want to transfer into a new folder in your project folder and transfer that folder.
    • +
    • The project's folder on UPPMAX will be located in the /proj/ folder, most likely a folder with the same name as the project's ID, /proj/<project id>, e.g. /proj/naiss2024-23-999. If your project has picked a custom directory name when it was created it will have that name instead of the project ID, e.g. /proj/directory_name. Check which directory name your project has by looking at the project's page in SUPR and look at the field called Directory name:
    • +
    +
  • +
  • Your Dardel username.
      +
    • You can see your Dardel username in SUPR
    • +
    +
  • +
  • The path on Dardel where you want to put your data, e.g. /cfs/klemming/projects/snic/naiss2099-23-999 +
  • +
  • The path to the SSH key you have prepared to be used to login from Rackham to Dardel, e.g. ~/.ssh/id_rsa
      +
    • Check
    • +
    +
  • +
  • The path to where you want to save the generated transfer script.
  • +
+

To initiate the gen mode you run Darsync with the gen argument. If you run it without any other arguments it will ask you interactive questions to get the information it needs.

+
# interactive mode
+darsync gen
+
+
+# or give it any or all arguments directly
+darsync check -l /path/to/dir/on/uppmax/ -r /path/to/dir/on/dardel/ -A naiss2099-23-99 -u dardel_username -s ~/.ssh/id_rsa -o ~/dardel_transfer_script.sh
+
+

Starting the transfer

+

Before you submit the generated transfer script you should make sure everything is in order. You can try to run the transfer script directly on the UPPMAX login node and see if it starts or if you get any errors:

+
bash ~/dardel_transfer_script.sh
+
+

If you start see progress reports from rsync you know it works and you can press ctrl+c to stop.

+

Example of how it can look when it works:

+
bash darsync_temp.slurm
+sending incremental file list
+temp/
+temp/counts
+             10 100%    0,51kB/s    0:00:00 (xfr#4, to-chk=72/77)
+temp/export.sh
+             13 100%    0,67kB/s    0:00:00 (xfr#5, to-chk=71/77)
+temp/my_stuff.py
+             70 100%    3,60kB/s    0:00:00 (xfr#7, to-chk=69/77)
+temp/run.sh
+             52 100%    2,67kB/s    0:00:00 (xfr#8, to-chk=68/77)
+temp/sequence_tools.py
+            345 100%   17,73kB/s    0:00:00 (xfr#9, to-chk=67/77)
+temp/similar_sequences.txt
+             24 100%    1,23kB/s    0:00:00 (xfr#10, to-chk=66/77)
+temp/t.py
+            328 100%   16,86kB/s    0:00:00 (xfr#11, to-chk=65/77)
+
+

Example of how it can look when it doesn't work:

+
bash darsync_temp.slurm
+user@dardel.pdc.kth.se: Permission denied (publickey,gssapi-keyex,gssapi-with-mic).
+rsync: connection unexpectedly closed (0 bytes received so far) [sender]
+rsync error: unexplained error (code 255) at io.c(231) [sender=3.2.7]
+
+

Troubleshooting

+

Apart from getting the username or paths wrong, we foresee that the most common problem will be to get the SSH keys generated, added to the PDC login portal, and adding the UPPMAX ip/hostname as authorized for that SSH key. Please see the PDC user guide on how to set up SSH keys. Once you have your key created and added to the login portal, go to the login portal again and add the address *.uppmax.uu.se to your key to make it work from Rackham.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/debuggers/index.html b/software/debuggers/index.html new file mode 100644 index 000000000..e1387a7cb --- /dev/null +++ b/software/debuggers/index.html @@ -0,0 +1,3138 @@ + + + + + + + + + + + + + + + + + + + + + + + Debuggers - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/directly-from-IG/GAMESS_US.html b/software/directly-from-IG/GAMESS_US.html new file mode 100644 index 000000000..1a1caca4e --- /dev/null +++ b/software/directly-from-IG/GAMESS_US.html @@ -0,0 +1,59 @@ + + + + +

GAMESS-US versions 20170930 is installed on Rackham. Newer versions can be installed on request to UPPMAX support. Snowy currently lacks GAMESS-US.

+ +

Citing GAMESS papers

+ +

It is essential that you read the GAMESS manual thoroughly to properly reference the papers specified in the instructions. All publications using gamess should cite at least the following paper:

+ +
+
+@Article{GAMESS,
+author={M.W.Schmidt and K.K.Baldridge and J.A.Boatz and S.T.Elbert and
+M.S.Gordon and J.J.Jensen and S.Koseki and N.Matsunaga and
+K.A.Nguyen and S.Su and T.L.Windus and M.Dupuis and J.A.Montgomery},
+journal={J.~Comput.~Chem.},
+volume=14,
+pages={1347},
+year=1993,
+comment={The GAMESS program}}
+
+ +

If you need to obtain GAMESS yourself, please visit the GAMESS website for further instructions.

+ +

Running GAMESS

+ +


+Load the module using

+ +

module load gamess/20170930

+ +

+ +

Below is an example submitscript for Rackham, running on 40 cores (2 nodes with 20 cores each). It is essential to specify the project name:

+ +
+
+#!/bin/bash -l
+#SBATCH -J jobname
+#SBATCH -p node -n 40
+#SBATCH -A PROJECT
+#SBATCH -t 03:00:00
+ 
+module load gamess/20170930
+ 
+rungms gms >gms.out
+
+ +

Memory specification

+ +

GAMESS uses two kinds of memory: replicated memory and distributed memory. Both kinds of memory should be given in the $SYSTEM specification. Replicated memory is specified using the MWORDS keyword and distributed memory with the MEMDDI keyword. It is very important that you understand the uses of these keywords. Check the GAMESS documentation for further information.

+ +

If your job requires 16MW (mega-words) of replicated memory and 800MW of distributed memory, as in the example below, the memory requirements per CPU core varies as 16+800/N where N is the number of cores. Each word is 8 bytes of memory, why the amount of memory per core is (16+800/N)*8. The amount of memory per node depends on the number of cores per node. Rackham has 20 cores per node, most nodes have 128 GB of memory, but 30 nodes have 512 GB and 4 nodes at 1 TB.

+ +


+Communication

+ +

For intra-node communication shared memory is used. For inter-node communication MPI is used which uses the Infiniband interconnect.

diff --git a/software/directly-from-IG/gaussian.html b/software/directly-from-IG/gaussian.html new file mode 100644 index 000000000..e1ef79790 --- /dev/null +++ b/software/directly-from-IG/gaussian.html @@ -0,0 +1,251 @@ +

A short guide on how to run g09 on UPPMAX.

+ +

Access to Gaussian 09

+ +

Gaussian 09 is available at UPPMAX. Uppsala University has an university license for all employees. If you want to be able to run g09 email support@uppmax.uu.se and ask to be added to the g09 group.

+ +

Running g09

+ +

In order to run g09 you must first set up the correct environment. You do this with:

+ +
+
+module load gaussian/g09.d01
+
+ +

Running single core jobs in SLURM

+ +

Here is an example of a submit script for SLURM:

+ +
+
+
+#!/bin/bash -l
+#SBATCH -J g09test
+#SBATCH -p core
+#SBATCH -n 1
+#If you ask for a single core in slurm on Rackham you get 6.4 Gb of memory
+#SBATCH -t 1:00:00
+#SBATCH -A your_project_name
+ 
+module load gaussian/g09.d01
+g09 mp2.inp mp2.out
+
+
+ +

If you run a single core job on Rackham you can't use more than 6.4GB of memory.

+ +

When specifying the memory requirements, make sure that you ask for some more memory in the submit-script than in g09 to allow for some memory overhead for the program. As a general rule you should ask for 200MB more than you need in the calculation.

+ +

The mp2.inp inputfile in the example above:

+ +
+
+
+%Mem=800MB
+#P MP2 aug-cc-pVTZ OPT
+ 
+test
+ 
+0 1
+Li 
+F 1 1.0
+
+
+ +

Scratch space

+ +

The g09 module sets the environment GAUSS_SCRDIR to /scratch/$SLURM_JOBID in slurm. These directories are removed after the job is finished.

+ +

If you want to set GAUSS_SCRDIR, you must do it after module load gaussian/g09.a02 in your script.

+ +

If you set GAUSS_SCRDIR to something else in your submit script remember to remove all unwanted files after your job has finished.

+ +

If you think you will use a large amount of scratch space, you might want to set maxdisk in your input file. You can either set maxdisk directly on the command line in your input file:

+ +
+
+
+#P MP2 aug-cc-pVTZ SCF=Tight maxdisk=170GB
+
+
+ +

or you can put something like:

+ +
+
+
+MAXDISK=$( df | awk '/scratch/ { print $4 }' )KB
+sed -i '/^#/ s/ maxdisk=[[:digit:]]*KB//' inputfile
+sed -i '/^#/ s/$/ maxdisk='$MAXDISK'/'; inputfile
+
+
+ +

in your scriptfile. This will set maxdisk to the currently available size of the /scratch disk on the node you will run on. Read more on maxdisk in the online manual.

+ +

Running g09 in parallel

+ +

Gaussian can be run in parallel on a single node using shared memory.

+ +

This is the input file for the slurm example below:

+ +

The dimer4.inp input:

+ +
+
+
+%Mem=3800MB
+%NProcShared=4
+#P MP2 aug-cc-pVTZ SCF=Tight
+ 
+methanol dimer MP2
+ 
+0 1
+6 0.754746 -0.733607 -0.191063
+1 -0.033607 -1.456810 -0.395634
+1 1.007890 -0.778160 0.867678
+1 1.635910 -0.998198 -0.774627
+8 0.317192 0.576306 -0.534002
+1 1.033100 1.188210 -0.342355
+6 1.513038 3.469264 0.971885
+1 1.118398 2.910304 1.819367
+1 0.680743 3.818664 0.361783
+1 2.062618 4.333044 1.344537
+8 2.372298 2.640544 0.197416
+1 2.702458 3.161614 -0.539550
+
+
+ +

Running g09 in parallel in slurm

+ +

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcShared link 0 command.

+ +

An example submit-script:

+ +
+
+
+#!/bin/bash -l
+#SBATCH -J g09_4
+#SBATCH -p node -n 8
+#SBATCH -t 1:00:00
+#SBATCH -A your_project_name
+ 
+module load gaussian/g09.d01
+export OMP_NUM_THREADS=1
+ulimit -s $STACKLIMIT
+g09 dimer4.inp dimer4.out
+
+
+ +

Notice that 8 cores are requested from the queue-system using the line #SLURM -p node -n 8 and that Gaussian is told to use 4 cores with the link 0 command %NProcShared=4

+ +

The example above runs about 1.7 times as fast on eight cores than on four, just change in the input file to %NProcShared=8.

+ +

Please benchmark your own inputs as the speedup depends heavily on the method and size of system.

+ +

In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:

+ +

PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda.

+ +

The reason for specifying OMP_NUM_THREADS=1 is to not use the parts of OpenMP in the Gaussian code, but to use Gaussians own threads.

+ +

Running g09 in parallel with linda

+ +

In order to run g09 in parallel over several nodes we have acquired Linda TCP.

+ +

Running g09 in parallel with linda in slurm

+ +

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcLinda and NProcShared link 0 command.

+ +

An example submit-script:

+ +
+
+
+#!/bin/bash -l
+#SBATCH -J g09-linda
+#
+#SBATCH -t 2:00:0 
+#
+#SBATCH -p node -n 40
+#SBATCH -A your_project_name
+ 
+module load gaussian/g09.d01
+ulimit -s $STACKLIMIT
+export OMP_NUM_THREADS=1
+ 
+#Next lines are there for linda to know what nodes to run on
+srun hostname -s | sort -u > tsnet.nodes.$SLURM_JOBID
+export GAUSS_LFLAGS='-nodefile tsnet.nodes.$SLURM_JOBID -opt "Tsnet.Node.lindarsharg: ssh"'
+ 
+#export GAUSS_SCRDIR=
+time g09 dimer20-2.inp dimer20-2.out
+ 
+rm tsnet.nodes.$SLURM_JOBID
+
+
+ +

Here is the input file:

+ +
+
+
+%NProcLinda=2
+%NProcShared=20
+%Mem=2800MB
+#P MP2 aug-cc-pVTZ SCF=Tight
+ 
+methanol dimer MP2
+ 
+0 1
+6 0.754746 -0.733607 -0.191063
+1 -0.033607 -1.456810 -0.395634
+1 1.007890 -0.778160 0.867678
+1 1.635910 -0.998198 -0.774627
+8 0.317192 0.576306 -0.534002
+1 1.033100 1.188210 -0.342355
+6 1.513038 3.469264 0.971885
+1 1.118398 2.910304 1.819367
+1 0.680743 3.818664 0.361783
+1 2.062618 4.333044 1.344537
+8 2.372298 2.640544 0.197416
+1 2.702458 3.161614 -0.539550
+
+
+ +

Notice that 40 cores are requested from the queue-system using the line #SLURM -p node -n 40 and that g09 is told to use 2 nodes via linda with the %NProcLinda=2 link 0 command and 20 cores on each node with the link 0 command %NProcShared=20.

+ +

Please benchmark your own inputs as the speedup depends heavily on the method and size of system.

+ +

In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:
+PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda.

+ +

Number of CPUs on the shared memory nodes

+ +

Use the information below as a guide to how many CPUs to request for your calculation:

+ +

On Rackham:

+ +
    +
  • 272 nodes with two 10-core CPUs and 128GB memory
  • +
  • 32 nodes with two 10-core CPUs and 256GB memory 
  • +
+ +

On Milou:

+ +
    +
  • 174 nodes with two 8-core CPUs and 128GB memory
  • +
  • 17 nodes with two 8-core CPUs and 256GB memory
  • +
  • 17 nodes with two 8-core CPUs and 512GB memory
  • +
+ +

+ +

Note on chk-files:

+ +

You may experience difficulties if you mix different versions (g09 and g03) or revisions of gaussian. If you use a checkpoint file (.chk file) from an older revision (say g03 e.01), in a new calculation with revision a.02, g09 may not run properly.

+ +

We recommend using the same revision if you want to restart a calculation or reuse an older checkpoint file.

+ +

diff --git a/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-4-413x252.png b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-4-413x252.png new file mode 100644 index 000000000..69b3e68cf Binary files /dev/null and b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-4-413x252.png differ diff --git a/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-6-412x427.png b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-6-412x427.png new file mode 100644 index 000000000..7495c2fda Binary files /dev/null and b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-6-412x427.png differ diff --git a/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-7-400x246.png b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-7-400x246.png new file mode 100644 index 000000000..e4f9523c2 Binary files /dev/null and b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-7-400x246.png differ diff --git a/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-8-247x209.png b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-8-247x209.png new file mode 100644 index 000000000..fd82dd23e Binary files /dev/null and b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-8-247x209.png differ diff --git a/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-9-333x338.png b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-9-333x338.png new file mode 100644 index 000000000..d8848f86a Binary files /dev/null and b/software/directly-from-IG/img/c_555128-l_1-k_ddtscreenshot-9-333x338.png differ diff --git a/software/directly-from-IG/img/index.html b/software/directly-from-IG/img/index.html new file mode 100644 index 000000000..30a2f804a --- /dev/null +++ b/software/directly-from-IG/img/index.html @@ -0,0 +1,3101 @@ + + + + + + + + + + + + + + + + + + + Put related images here - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Put related images here

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/directly-from-IG/nvidia-deep-learning-frameworks.html b/software/directly-from-IG/nvidia-deep-learning-frameworks.html new file mode 100644 index 000000000..4877a1bb9 --- /dev/null +++ b/software/directly-from-IG/nvidia-deep-learning-frameworks.html @@ -0,0 +1,101 @@ +

Here is how easy one can use an NVIDIA environment for deep learning with all the following tools preset. A screenshot of that page is shown below.

+ +

pytorch

+ +

Pull the container (6.5GB).

+ +
+
+singularity pull docker://nvcr.io/nvidia/pytorch:22.03-py3
+
+
+
+ +

Get an interactive shell.

+ +
+
+singularity shell --nv ~/external_1TB/tmp/pytorch_22.03-py3.sif
+
+Singularity> python3
+Python 3.8.12 | packaged by conda-forge | (default, Jan 30 2022, 23:42:07) 
+[GCC 9.4.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+
+>>> import torch
+# Check torch version
+>>> print(torch.__version__) 
+1.12.0a0+2c916ef
+
+# Check if CUDA is available
+>>> print(torch.cuda.is_available()) 
+True
+
+# Check which GPU architectures are supported
+>>> print(torch.cuda.get_arch_list()) 
+['sm_52', 'sm_60', 'sm_61', 'sm_70', 'sm_75', 'sm_80', 'sm_86', 'compute_86']
+
+# test torch
+>>> torch.zeros(1).to('cuda')
+tensor([0.], device='cuda:0')
+
+
+
+ +

From the container shell, check what else is available...

+ +
+
+Singularity> nvcc -V
+nvcc: NVIDIA (R) Cuda compiler driver
+Copyright (c) 2005-2022 NVIDIA Corporation
+Built on Thu_Feb_10_18:23:41_PST_2022
+Cuda compilation tools, release 11.6, V11.6.112
+Build cuda_11.6.r11.6/compiler.30978841_0
+
+# Check what conda packages are already there
+Singularity> conda list -v
+
+# Start a jupyter-lab (keep in mind the hostname)
+Singularity> jupyter-lab
+...
+[I 13:35:46.270 LabApp] [jupyter_nbextensions_configurator] enabled 0.4.1
+[I 13:35:46.611 LabApp] jupyter_tensorboard extension loaded.
+[I 13:35:46.615 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.8/site-packages/jupyterlab
+[I 13:35:46.615 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab
+[I 13:35:46.616 LabApp] [Jupytext Server Extension] NotebookApp.contents_manager_class is (a subclass of) jupytext.TextFileContentsManager already - OK
+[I 13:35:46.616 LabApp] Serving notebooks from local directory: /home/pmitev
+[I 13:35:46.616 LabApp] Jupyter Notebook 6.4.8 is running at:
+[I 13:35:46.616 LabApp] http://hostname:8888/?token=d6e865a937e527ff5bbccfb3f150480b76566f47eb3808b1
+[I 13:35:46.616 LabApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
+...
+
+
+
+ +

You can use this container to add more packages.

+ +
+
+Bootstrap: docker
+From: nvcr.io/nvidia/pytorch:22.03-py3
+...
+
+
+
+ +

Just keep in mind that "upgrading" the build-in torch package might install a package that is compatible with less GPU architectures and it might not work anymore on your hardware.

+ +

+ +
+
+Singularity> python3 -c "import torch; print(torch.__version__); print(torch.cuda.is_available()); print(torch.cuda.get_arch_list()); torch.zeros(1).to('cuda')"
+
+1.10.0+cu102
+True
+['sm_37', 'sm_50', 'sm_60', 'sm_70']
+NVIDIA A100-PCIE-40GB with CUDA capability sm_80 is not compatible with the current PyTorch installation.
+The current PyTorch install supports CUDA capabilities sm_37 sm_50 sm_60 sm_70.
+
+
diff --git a/software/directly-from-IG/openmolcas.html b/software/directly-from-IG/openmolcas.html new file mode 100644 index 000000000..e5c893801 --- /dev/null +++ b/software/directly-from-IG/openmolcas.html @@ -0,0 +1,149 @@ +

How to run the program MOLCAS on UPPMAX

+ +

Information

+ +

MOLCAS is an ab initio computational chemistry program. Focus in the program is placed on methods for calculating general electronic structures in molecular systems in both ground and excited states. MOLCAS is, in particular, designed to study the potential surfaces of excited states

+ +

This guide will help you get started running MOLCAS on UPPMAX. More detailed information on how to use Molcas can be found on the official website.

+ +

Licensing

+ +

A valid license key is required to run Molcas on UPPMAX. The licence key should be kept in a directory named .Molcas under the home directory.

+ +

Molcas is currently free of charge for academic researchers active in the Nordic countries. You can get hold of a license by following these instructions.

+ +

Versions installed at UPPMAX

+ +

At UPPMAX the following versions are installed:

+ +
    +
  • 8.0 (serial)
  • +
  • 7.8 (serial)
  • +
+ +

Modules needed to run MOLCAS

+ +

In order to run MOLCAS you must first load the MOLCAS module. You can see all available versions of MOLCAS installed at UPPMAX with:

+ +
+
+
+module avail molcas
+
+
+ +

Load a MOLCAS module with, eg:

+ +
+
+
+module load molcas/7.8.082
+
+
+ +

How to run MOLCAS interactively

+ +

If you would like to do tests or short runs, we recommend using the interactive command:

+ +
+
+
+interactive -A your_project_name
+
+
+ +

This will reserve a node for you to do your test on. Note that you must provide the name of an active project in order to run on UPPMAX resources. After a short wait you will get access to the node. Then you can run MOLCAS by:

+ +
+
+
+module load molcas/7.8.082
+molcas -f test000.input
+
+
+ +

The test000.input looks like:

+ +
+
+
+*$Revision: 7.7 $
+************************************************************************
+* Molecule: H2
+* Basis: DZ
+* Symmetry: x y z
+* SCF: conventional
+*
+*  This is a test to be run during first run to verify
+*  that seward and scf works at all
+*
+ 
+>export MOLCAS_PRINT=VERBOSE
+ &GATEWAY
+coord
+2
+angstrom
+H  0.350000000  0.000000000  0.000000000
+H -0.350000000  0.000000000  0.000000000
+basis
+H.DZ....
+ 
+ &SEWARD
+ 
+ &SCF
+Title
+ H2, DZ Basis set
+ 
+ &RASSCF
+Title
+ H2, DZ Basis set
+nActEl
+ 2  0 0
+Ras2
+ 1 1 0 0 0 0 0 0
+ 
+ &ALASKA
+ 
+ &SLAPAF
+ 
+ &CASPT2
+
+
+ +

See the SLURM user guide for more information on the interactive command. Don't forget to exit your interactive job when you have finished your calculation. Exiting will free the resource for others to use.

+ +

Batch scripts for slurm

+ +

It's possible to run MOLCAS in the batch queue. Here is an example running MOLCAS on one core:

+ +
+
+
+#!/bin/bash -l
+#
+#SBATCH -A <em>your_project_name</em>
+#SBATCH -J molcastest
+#SBATCH -t 00:10:00
+#SBATCH -p core -n 1
+ 
+module load molcas/7.8.082
+ 
+#In order to let MOLCAS use more memory
+export MOLCASMEM=2000
+ 
+molcas -f test000.input
+
+
+ +

Again you'll have to provide your project name.

+ +

If the script is called test000.job you can submit it to the batch queue with:

+ +
+
+
+sbatch test000.job
+
+
+ +

This example will not take many seconds to run on e.g. Tintin or Milou.

diff --git a/software/dnabert2/index.html b/software/dnabert2/index.html new file mode 100644 index 000000000..0add68382 --- /dev/null +++ b/software/dnabert2/index.html @@ -0,0 +1,3156 @@ + + + + + + + + + + + + + + + + + + + DNABERT 2 - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

DNABERT 2

+

DNABERT 2 is 'a foundation model +trained on large-scale multi-species genome that achieves the +state-of-the-art performance on 28 tasks of the GUE benchmark', +according to DNABERT 2

+

DNABERT 2 is not part of +the UPPMAX module system.

+
+For UPPMAX staff +

Notes on installing and running DNABERT2 on Rackham and Snowy +can be found here

+
+

Installing DNABERT 2

+

Run dnabert2_install_on_rackham.sh.

+

Running DNABERT 2

+

Run dnabert2_run_on_rackham.sh +with the example Python script dnabert2_example.py.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/dnabert2_example.py b/software/dnabert2_example.py new file mode 100644 index 000000000..a700f991b --- /dev/null +++ b/software/dnabert2_example.py @@ -0,0 +1,22 @@ +import torch +from transformers import AutoTokenizer, AutoModel + +tokenizer = AutoTokenizer.from_pretrained("zhihan1996/DNABERT-2-117M", trust_remote_code=True) +model = AutoModel.from_pretrained("zhihan1996/DNABERT-2-117M", trust_remote_code=True) + +from transformers.models.bert.configuration_bert import BertConfig + +config = BertConfig.from_pretrained("zhihan1996/DNABERT-2-117M") +model = AutoModel.from_pretrained("zhihan1996/DNABERT-2-117M", trust_remote_code=True, config=config) + +dna = "ACGTAGCATCGGATCTATCTATCGACACTTGGTTATCGATCTACGAGCATCTCGTTAGC" +inputs = tokenizer(dna, return_tensors = 'pt')["input_ids"] +hidden_states = model(inputs)[0] # [1, sequence_length, 768] + +# embedding with mean pooling +embedding_mean = torch.mean(hidden_states[0], dim=0) +print(embedding_mean.shape) # expect to be 768 + +# embedding with max pooling +embedding_max = torch.max(hidden_states[0], dim=0)[0] +print(embedding_max.shape) # expect to be 768 \ No newline at end of file diff --git a/software/dnabert2_install_on_rackham.sh b/software/dnabert2_install_on_rackham.sh new file mode 100644 index 000000000..60fb917c7 --- /dev/null +++ b/software/dnabert2_install_on_rackham.sh @@ -0,0 +1,15 @@ +#!/bin/bash +# +# Install DNABERT2 on Rackham +# +# Adapted from https://github.com/richelbilderbeek/create_dnabert2_singularity_container/blob/master/install_on_rackham.sh + +# Clones DNABERT 2 in a folder, then installs it + +module load python/3.8.7 +git clone https://github.com/MAGICS-LAB/DNABERT_2 +cd DNABERT_2 +python3 -m pip install -r requirements.txt +cd .. + +pip uninstall -y triton \ No newline at end of file diff --git a/software/dnabert2_run_on_rackham.sh b/software/dnabert2_run_on_rackham.sh new file mode 100644 index 000000000..892509c18 --- /dev/null +++ b/software/dnabert2_run_on_rackham.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# +# Run DNABERT2 example code on Rackham +# +# Adapted from https://github.com/richelbilderbeek/create_dnabert2_singularity_container/blob/master/run_on_rackham.sh + +module load python/3.8.7 +python dnabert2_example.py + diff --git a/software/doc/index.html b/software/doc/index.html new file mode 100644 index 000000000..d1c75bbde --- /dev/null +++ b/software/doc/index.html @@ -0,0 +1,3101 @@ + + + + + + + + + + + + + + + + + + + Software-specific documentation - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Software-specific documentation

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/emacs/index.html b/software/emacs/index.html new file mode 100644 index 000000000..2db1349d0 --- /dev/null +++ b/software/emacs/index.html @@ -0,0 +1,3140 @@ + + + + + + + + + + + + + + + + + + + Emacs - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Emacs

+

UPPMAX has multiple text editors available. +This page describes the Emacs text editor.

+

Emacs is an advanced terminal editor that is fast fast and powerful, once you learn it.

+

Examples how to use Emacs

+

Start emacs on a terminal with:

+
emacs
+
+

Start emacs to edit a file:

+
emacs filename
+
+

Start Emacs keeping you in your terminal window:

+
emacs –nw
+
+

Do the editing you want, then save with:

+
Control-x, Control-s
+
+

Exit emacs with:

+
Control-x, Control-c
+
+

You can read a tutorial in emacs by doing:

+
Control-h t
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/eog/index.html b/software/eog/index.html new file mode 100644 index 000000000..667b96876 --- /dev/null +++ b/software/eog/index.html @@ -0,0 +1,3123 @@ + + + + + + + + + + + + + + + + + + + eog - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

eog

+

Example use of eog

+

eog is a tool to view images on an UPPMAX cluster.

+

To be able to see the images, +either use SSH with X-forwarding +or login to a remote desktop

+

Usage:

+
eog [filename]
+
+

for example:

+
eog my.png
+
+
+Need an example image to work with? +

In the terminal, do:

+
convert -size 32x32 xc:transparent my.png
+
+

This will create an empty PNG image.

+
+
+How does this look like? +

Example use of eog

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/files/matlab/uppsala.Desktop.zip b/software/files/matlab/uppsala.Desktop.zip new file mode 100644 index 000000000..23587a796 Binary files /dev/null and b/software/files/matlab/uppsala.Desktop.zip differ diff --git a/software/filezilla/index.html b/software/filezilla/index.html new file mode 100644 index 000000000..5395feacd --- /dev/null +++ b/software/filezilla/index.html @@ -0,0 +1,3114 @@ + + + + + + + + + + + + + + + + + + + FileZilla - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/finishedjobinfo/index.html b/software/finishedjobinfo/index.html new file mode 100644 index 000000000..4fe62b807 --- /dev/null +++ b/software/finishedjobinfo/index.html @@ -0,0 +1,3279 @@ + + + + + + + + + + + + + + + + + + + finishedjobinfo - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

finishedjobinfo

+

finishedjobinfo shows information on jobs that have finished, +which is useful to help optimize Slurm jobs.

+

Usage

+
finishedjobinfo
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ finishedjobinfo
+2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15
+2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56
+2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00
+2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00
+2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14
+2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56
+2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15
+2024-10-08 00:00:43 jobid=50661471 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r441 procs=1 partition=core qos=normal jobname=P8913_277.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r441 timelimit=12:00:00 submit_time=2024-10-07T21:07:30 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:43 runtime=02:49:18 margin=09:10:42 queuetime=00:03:55
+2024-10-08 00:00:58 jobid=50661227 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r387 procs=1 partition=core qos=normal jobname=P8913_264.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r387 timelimit=12:00:00 submit_time=2024-10-07T21:07:24 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:58 runtime=02:49:35 margin=09:10:25 queuetime=00:03:59
+2024-10-08 00:01:00 jobid=50661458 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:01:00 runtime=02:49:35 margin=09:10:25 queuetime=00:03:56
+
+
+

Show the help

+

To show the help of finishedjobinfo, in a terminal , do:

+
finishedjobinfo -h
+
+
+How does that look like? +

```bash +[sven@rackham3 ~]$ finishedjobinfo -h +Usage: finishedjobinfo [-h] [-M cluster_name] [-j jobid[,jobid...]] [-m|-y|-s YYYY-MM-DD[/hh🇲🇲ss]] [-e YYYY-MM-DD[/hh🇲🇲ss]] [project_or_user]... + -h Ask for help + -M Request data from a named other cluster + -j Request data for a specific jobid or jobids (comma-separated) + -q Quiet, quick, abbreviated output (no QOS or memory information) + -v Verbose, tells a little more + -m Start time is start of this month + -y Start time is start of this year + -s Request a start time (default is a month back in time) + -e Request an end time (default is now) + Time can also be specified as NOW, TODAY, YYYY, YYYY-MM, YYYY-w, w, hh🇲🇲ss, or name of month

+

Meaning of jobstate: +CANCELLED Job was cancelled, before or after it had started +COMPLETED Job run to finish, last command gave exit code 0 +FAILED Job crashed or at least ended with an exit code that was not 0 +NODE_FAIL One of your job nodes experienced a major problem, perhaps your job used all available memory +TIMEOUT Job exceeded the specified timelimit and was therefore terminated +````

+
+

Show the information about a specific job

+

Use finishedjobinfo -j [job_number] to get information about a specific +job, where [job_number] is the job number, +for example finishedjobinfo -j 44981366.

+
+How does that look like? +

Here is an example output:

+
[sven@rackham3 ~]$ finishedjobinfo -j 44981366
+2024-02-09 12:30:37 jobid=44981366 jobstate=TIMEOUT username=sven account=staff nodes=r35 procs=1 partition=core qos=normal jobname=run_beast2.sh maxmemory_in_GiB=0.1 maxmemory_node=r35 timelimit=00:01:00 submit_time=2024-02-09T12:27:29 start_time=2024-02-09T12:29:18 end_time=2024-02-09T12:30:37 runtime=00:01:19 margin=-00:00:19 queuetime=00:01:49
+
+
+

2024-02-09 12:30:37 +jobid=44981366 +jobstate=TIMEOUT username=sven account=staff +nodes=r35 +procs=1 partition=core qos=normal jobname=run_beast2.sh maxmemory_in_GiB=0.1 maxmemory_node=r35 timelimit=00:01:00 submit_time=2024-02-09T12:27:29 start_time=2024-02-09T12:29:18 end_time=2024-02-09T12:30:37 runtime=00:01:19 margin=-00:00:19 queuetime=00:01:49

+
+
+

How do I find jobs that have finished and took longer than an hour and less than a day?

+
finishedjobinfo | grep "runtime.[0-9][1-9]"
+
+

Press CTRL-C to stop the process: it will take very long to finish.

+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ finishedjobinfo | grep "runtime.[0-9][1-9]"
+2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15
+2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56
+2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00
+2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00
+2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14
+2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56
+2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15
+
+

This output took around 1 second to produce.

+
+

How do I find jobs that have finished and took longer than an hour?

+
finishedjobinfo | grep -E "runtime.([0-9]-)?[0-9][1-9]"
+
+

Press CTRL-C to stop the process: it will take very long to finish.

+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ finishedjobinfo | grep -E "runtime.([0-9]-)?[0-9][1-9]"
+2024-10-08 00:00:01 jobid=50661814 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r65 procs=1 partition=core qos=normal jobname=P8913_295.chr12 maxmemory_in_GiB=2.1 maxmemory_node=r65 timelimit=12:00:00 submit_time=2024-10-07T21:07:37 start_time=2024-10-07T21:15:52 end_time=2024-10-08T00:00:01 runtime=02:44:09 margin=09:15:51 queuetime=00:08:15
+2024-10-08 00:00:09 jobid=50661456 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r437 procs=1 partition=core qos=normal jobname=P8913_276.chr16 maxmemory_in_GiB=2.1 maxmemory_node=r437 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:09 runtime=02:48:44 margin=09:11:16 queuetime=00:03:56
+2024-10-08 00:00:13 jobid=50661186 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r349 procs=1 partition=core qos=normal jobname=P8913_262.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r349 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:13 runtime=02:48:50 margin=09:11:10 queuetime=00:04:00
+2024-10-08 00:00:19 jobid=50661172 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r344 procs=1 partition=core qos=normal jobname=P8913_261.chr18 maxmemory_in_GiB=2.1 maxmemory_node=r344 timelimit=12:00:00 submit_time=2024-10-07T21:07:23 start_time=2024-10-07T21:11:23 end_time=2024-10-08T00:00:19 runtime=02:48:56 margin=09:11:04 queuetime=00:04:00
+2024-10-08 00:00:23 jobid=50661695 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r370 procs=1 partition=core qos=normal jobname=P8913_289.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r370 timelimit=12:00:00 submit_time=2024-10-07T21:07:35 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:23 runtime=02:44:34 margin=09:15:26 queuetime=00:08:14
+2024-10-08 00:00:27 jobid=50661466 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r438 procs=1 partition=core qos=normal jobname=P8913_277.chr7 maxmemory_in_GiB=2.1 maxmemory_node=r438 timelimit=12:00:00 submit_time=2024-10-07T21:07:29 start_time=2024-10-07T21:11:25 end_time=2024-10-08T00:00:27 runtime=02:49:02 margin=09:10:58 queuetime=00:03:56
+2024-10-08 00:00:39 jobid=50661663 jobstate=COMPLETED username=mrendon account=naiss2023-5-478 nodes=r360 procs=1 partition=core qos=normal jobname=P8913_287.chr13 maxmemory_in_GiB=2.1 maxmemory_node=r360 timelimit=12:00:00 submit_time=2024-10-07T21:07:34 start_time=2024-10-07T21:15:49 end_time=2024-10-08T00:00:39 runtime=02:44:50 margin=09:15:10 queuetime=00:08:15
+
+

This output took around 1 second to produce.

+
+

How do I find jobs that have finished and took longer than a day?

+
finishedjobinfo | grep "runtime.[0-9]-"
+
+

Press CTRL-C to stop the process: it will take very long to finish.

+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham1 ~]$ finishedjobinfo | grep "runtime.[0-9]-"
+2024-10-08 00:01:18 jobid=50597318 jobstate=COMPLETED username=nikolay account=naiss2024-22-35 nodes=r356 procs=20 partition=node qos=normal jobname=168011 maxmemory_in_GiB=5.3 maxmemory_node=r356 timelimit=10-00:00:00 submit_time=2024-10-02T10:36:59 start_time=2024-10-06T21:05:31 end_time=2024-10-08T00:01:18 runtime=1-02:55:47 margin=8-21:04:13 queuetime=4-10:28:32
+2024-10-08 00:21:55 jobid=50597286 jobstate=COMPLETED username=nikolay account=naiss2024-22-35 nodes=r432 procs=20 partition=node qos=normal jobname=1578718 maxmemory_in_GiB=5.3 maxmemory_node=r432 timelimit=10-00:00:00 submit_time=2024-10-02T10:36:10 start_time=2024-10-06T14:32:36 end_time=2024-10-08T00:21:55 runtime=1-09:49:19 margin=8-14:10:41 queuetime=4-03:56:26
+
+

This output took 30 seconds to produce as there were few jobs at that time +that took longer than a day to finish.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/games_us/index.html b/software/games_us/index.html new file mode 100644 index 000000000..ff895f629 --- /dev/null +++ b/software/games_us/index.html @@ -0,0 +1,3276 @@ + + + + + + + + + + + + + + + + + + + + + + + GAMESS_US - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

GAMESS-US user guide

+

GAMESS-US versions 20170930 is installed on Rackham. Newer versions can be installed on request to UPPMAX support. Snowy currently lacks GAMESS-US.

+

Running GAMESS

+

Load the module using

+
module load gamess/20170930
+
+

Below is an example submit script for Rackham, running on 40 cores (2 nodes with 20 cores each). It is essential to specify the project name:

+
#!/bin/bash -l
+#SBATCH -J jobname
+#SBATCH -p node -n 40
+#SBATCH -A PROJECT
+#SBATCH -t 03:00:00
+
+module load gamess/20170930
+
+rungms gms >gms.out
+
+

Memory specification

+

GAMESS uses two kinds of memory: replicated memory and distributed memory. Both kinds of memory should be given in the $SYSTEM specification. Replicated memory is specified using the MWORDS keyword and distributed memory with the MEMDDI keyword. It is very important that you understand the uses of these keywords. Check the GAMESS documentation for further information.

+

If your job requires 16MW (mega-words) of replicated memory and 800MW of distributed memory, as in the example below, the memory requirements per CPU core varies as 16+800/N where N is the number of cores. Each word is 8 bytes of memory, why the amount of memory per core is (16+800/N)*8. The amount of memory per node depends on the number of cores per node. Rackham has 20 cores per node, most nodes have 128 GB of memory, but 30 nodes have 512 GB and 4 nodes at 1 TB.

+

Communication

+

For intra-node communication shared memory is used. For inter-node communication MPI is used which uses the InfiniBand interconnect.

+

Citing GAMESS papers

+

It is essential that you read the GAMESS manual thoroughly to properly reference the papers specified in the instructions. All publications using GAMESS should cite at least the following paper:

+
@Article{GAMESS,
+author={M.W.Schmidt and K.K.Baldridge and J.A.Boatz and S.T.Elbert and
+M.S.Gordon and J.J.Jensen and S.Koseki and N.Matsunaga and
+K.A.Nguyen and S.Su and T.L.Windus and M.Dupuis and J.A.Montgomery},
+journal={J.~Comput.~Chem.},
+volume=14,
+pages={1347},
+year=1993,
+comment={The GAMESS program}}
+
+

If you need to obtain GAMESS yourself, please visit the GAMESS website for further instructions.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gaussian/index.html b/software/gaussian/index.html new file mode 100644 index 000000000..26c7ee120 --- /dev/null +++ b/software/gaussian/index.html @@ -0,0 +1,3436 @@ + + + + + + + + + + + + + + + + + + + + + + + Gaussian - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Gaussian 09 user guide

+
+

A short guide on how to run g09 on UPPMAX.

+
+

Access to Gaussian 09

+

Gaussian 09 is available at UPPMAX. Uppsala University has an university license for all employees. If you want to be able to run g09 email support@uppmax.uu.se and ask to be added to the g09 group.

+

Running g09

+

In order to run g09 you must first set up the correct environment. +You load this module with:

+
module load gaussian/g09.d01
+
+

Running single core jobs in Slurm

+

Here is an example of a submit script for Slurm:

+
#!/bin/bash -l
+#SBATCH -J g09test
+#SBATCH -p core
+#SBATCH -n 1
+#If you ask for a single core in slurm on Rackham you get 6.4 Gb of memory
+#SBATCH -t 1:00:00
+#SBATCH -A your_project_name
+
+module load gaussian/g09.d01
+g09 mp2.inp mp2.out
+
+

If you run a single core job on Rackham you can't use more than 6.4GB of memory.

+

When specifying the memory requirements, make sure that you ask for some more memory in the submit-script than in g09 to allow for some memory overhead for the program. As a general rule you should ask for 200MB more than you need in the calculation.

+

The mp2.inp input file in the example above:

+
%Mem=800MB
+#P MP2 aug-cc-pVTZ OPT
+
+test
+
+0 1
+Li
+F 1 1.0
+
+

Scratch space

+

The g09 module sets the environment GAUSS_SCRDIR to /scratch/$SLURM_JOBID in slurm. These directories are removed after the job is finished.

+

If you want to set GAUSS_SCRDIR, you must do it after module load gaussian/g09.a02 in your script.

+

If you set GAUSS_SCRDIR to something else in your submit script remember to remove all unwanted files after your job has finished.

+

If you think you will use a large amount of scratch space, you might want to set maxdisk in your input file. You can either set maxdisk directly on the command line in your input file:

+
#P MP2 aug-cc-pVTZ SCF=Tight maxdisk=170GB
+
+

or you can put something like:

+
MAXDISK=$( df | awk '/scratch/ { print $4 }' )KB
+sed -i '/^#/ s/ maxdisk=[[:digit:]]*KB//' inputfile
+sed -i '/^#/ s/$/ maxdisk='$MAXDISK'/'; inputfile
+
+

in your scriptfile. This will set maxdisk to the currently available size of the /scratch disk on the node you will run on. Read more on maxdisk in the online manual.

+

Running g09 in parallel

+

Gaussian can be run in parallel on a single node using shared memory. This is the input file for the slurm example below:

+

The dimer4.inp input:

+
%Mem=3800MB
+%NProcShared=4
+#P MP2 aug-cc-pVTZ SCF=Tight
+
+methanol dimer MP2
+
+0 1
+6 0.754746 -0.733607 -0.191063
+1 -0.033607 -1.456810 -0.395634
+1 1.007890 -0.778160 0.867678
+1 1.635910 -0.998198 -0.774627
+8 0.317192 0.576306 -0.534002
+1 1.033100 1.188210 -0.342355
+6 1.513038 3.469264 0.971885
+1 1.118398 2.910304 1.819367
+1 0.680743 3.818664 0.361783
+1 2.062618 4.333044 1.344537
+8 2.372298 2.640544 0.197416
+1 2.702458 3.161614 -0.539550
+
+

Running g09 in parallel in slurm

+

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcShared link 0 command.

+

An example submit-script:

+
#!/bin/bash -l
+#SBATCH -J g09_4
+#SBATCH -p node -n 8
+#SBATCH -t 1:00:00
+#SBATCH -A your_project_name
+
+module load gaussian/g09.d01
+export OMP_NUM_THREADS=1
+ulimit -s $STACKLIMIT
+
+g09 dimer4.inp dimer4.out
+
+

Notice that 8 cores are requested from the queue-system using the line #SLURM -p node -n 8 and that Gaussian is told to use 4 cores with the link 0 command %NProcShared=4. The example above runs about 1.7 times as fast on eight cores than on four, just change in the input file to %NProcShared=8. Please benchmark your own inputs as the speedup depends heavily on the method and size of system. In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:

+

PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda.

+

The reason for specifying OMP_NUM_THREADS=1 is to not use the parts of OpenMP in the Gaussian code, but to use Gaussians own threads.

+

Running g09 in parallel with linda

+

In order to run g09 in parallel over several nodes we have acquired Linda TCP.

+

Running g09 in parallel with linda in slurm

+

This can be done by asking for CPUs on the same node using the parallel node environments and telling Gaussian to use several CPUs using the NProcLinda and NProcShared link 0 command.

+

An example submit-script:

+
#!/bin/bash -l
+#SBATCH -J g09-linda
+#
+#SBATCH -t 2:00:0
+#
+#SBATCH -p node -n 40
+#SBATCH -A your_project_name
+
+module load gaussian/g09.d01
+ulimit -s $STACKLIMIT
+export OMP_NUM_THREADS=1
+
+#Next lines are there for linda to know what nodes to run on
+srun hostname -s | sort -u > tsnet.nodes.$SLURM_JOBID
+export GAUSS_LFLAGS='-nodefile tsnet.nodes.$SLURM_JOBID -opt "Tsnet.Node.lindarsharg: ssh"'
+
+#export GAUSS_SCRDIR=
+time g09 dimer20-2.inp dimer20-2.out
+
+rm tsnet.nodes.$SLURM_JOBID
+
+

Here is the input file:

+
%NProcLinda=2
+%NProcShared=20
+%Mem=2800MB
+#P MP2 aug-cc-pVTZ SCF=Tight
+
+methanol dimer MP2
+
+0 1
+6 0.754746 -0.733607 -0.191063
+1 -0.033607 -1.456810 -0.395634
+1 1.007890 -0.778160 0.867678
+1 1.635910 -0.998198 -0.774627
+8 0.317192 0.576306 -0.534002
+1 1.033100 1.188210 -0.342355
+6 1.513038 3.469264 0.971885
+1 1.118398 2.910304 1.819367
+1 0.680743 3.818664 0.361783
+1 2.062618 4.333044 1.344537
+8 2.372298 2.640544 0.197416
+1 2.702458 3.161614 -0.539550
+
+

Notice that 40 cores are requested from the queue-system using the line #SLURM -p node -n 40 and that g09 is told to use 2 nodes via linda with the %NProcLinda=2 link 0 command and 20 cores on each node with the link 0 command %NProcShared=20.

+

Please benchmark your own inputs as the speedup depends heavily on the method and size of system.

+

In some cases Gaussian cannot use all the cpus you ask for. This is indicated in the output with lines looking like this:

+
_ PrsmSu: requested number of processors reduced to: 1 ShMem 1 Linda._
+
+

Number of CPUs on the shared memory nodes

+

Use the information below as a guide to how many CPUs to request for your calculation:

+

On Rackham

+
    +
  • 272 nodes with two 10-core CPUs and 128GB memory
  • +
  • 32 nodes with two 10-core CPUs and 256GB memory
  • +
+

On Milou

+
    +
  • 174 nodes with two 8-core CPUs and 128GB memory
  • +
  • 17 nodes with two 8-core CPUs and 256GB memory
  • +
  • 17 nodes with two 8-core CPUs and 512GB memory
  • +
+

Note on chk-files

+

You may experience difficulties if you mix different versions (g09 and g03) or revisions of gaussian. If you use a checkpoint file (.chk file) from an older revision (say g03 e.01), in a new calculation with revision a.02, g09 may not run properly.

+

We recommend using the same revision if you want to restart a calculation or reuse an older checkpoint file.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gcc/index.html b/software/gcc/index.html new file mode 100644 index 000000000..5324c7a23 --- /dev/null +++ b/software/gcc/index.html @@ -0,0 +1,3190 @@ + + + + + + + + + + + + + + + + + + + GCC/gcc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

GCC/gcc

+

GCC is shorthand for 'GNU Compiler Collection', +a collection of compilers, +where gcc is the name of the actual program.

+

gcc is part of the gcc module.

+
+How do I find the gcc module? +

Like you'd find any module:

+
module spider gcc
+
+
+
+Which versions does the gcc module have? +

Like you'd find the version of any module:

+
module spider gcc
+
+

This will look similar to this:

+
[sven@rackham2 ~]$ module spider gcc
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  gcc:
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+     Versions:
+        gcc/4.2.3
+        gcc/4.3.0
+        gcc/4.4
+        gcc/4.8.2
+        gcc/4.8.3
+        gcc/4.9.2
+        gcc/4.9.4
+        gcc/5.2.0
+        gcc/5.3.0
+        gcc/5.4.0
+        gcc/5.5.0
+        gcc/6.1.0
+        gcc/6.2.0
+        gcc/6.3.0
+        gcc/6.4.0
+        gcc/7.1.0
+        gcc/7.2.0
+        gcc/7.3.0
+        gcc/7.4.0
+        gcc/8.1.0
+        gcc/8.2.0
+        gcc/8.3.0
+        gcc/8.4.0
+        gcc/9.1.0
+        gcc/9.2.0
+        gcc/9.3.0
+        gcc/10.1.0
+        gcc/10.2.0
+        gcc/10.3.0
+        gcc/11.2.0
+        gcc/11.3.0
+        gcc/12.1.0
+        gcc/12.2.0
+        gcc/12.3.0
+        gcc/13.1.0
+        gcc/13.2.0
+        gcc/13.3.0
+        gcc/14.1.0
+     Other possible modules matches:
+        GCC  GCCcore  gcccuda
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  To find other possible module matches execute:
+
+      $ module -r spider '.*gcc.*'
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  For detailed information about a specific "gcc" package (including how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modules.
+  For example:
+
+     $ module spider gcc/14.1.0
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+
+

The GCC can be used to:

+ +

Working together with GCC are:

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gcc_compile_c/index.html b/software/gcc_compile_c/index.html new file mode 100644 index 000000000..c60704d7f --- /dev/null +++ b/software/gcc_compile_c/index.html @@ -0,0 +1,3189 @@ + + + + + + + + + + + + + + + + + + + Compile C using GCC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile C using GCC

+

GCC (shorthand for 'GNU Compiler Collection') +is a collection of compilers +able to compile multiple different programming languages.

+

This page describes how to compile C code using the GCC.

+

Procedure

+

0. Create a C source file

+

You will need C code to work on.

+

In this optional step, a file with a minimal C program is created.

+

Create and write a C source file called hello_world.c:

+
nano hello_world.c
+
+

In nano, write the C program as such:

+
#include <stdio.h>
+
+int main() {
+  printf("hello, world\n");
+}
+
+

1. Load a GCC module

+

Load a recent GCC module:

+
module load gcc/13.2.0
+
+
+Do I really need to load a module? +

No, as there is a system-installed GCC.

+

For sake of doing reproducible research, +always load a module of a specific version.

+
+

If you need the C11 or C17 standards, use these module versions or newer:

+ + + + + + + + + + + + + + + + + +
Module versionDescription
gcc/4.8Fully implemented C11 standard
gcc/8Fully implemented C17 standard
+

2. Compile the source file

+

After saving and closing nano, compile as such:

+
gcc hello_world.c
+
+

This compiles the file hello_world.c using all defaults:

+
    +
  • default/no optimization
  • +
  • the executable created is called a.out
  • +
+

To compiles the file hello_world.c with run-time speed optimization +and creating an executable with a more sensible name, use:

+
gcc -O3 -o hello_world hello_world.c
+
+
    +
  • -O3: optimize for run-time speed
  • +
  • -o hello_world: the executable created is called hello_world
  • +
+

3. Run

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gcc_compile_cpp/index.html b/software/gcc_compile_cpp/index.html new file mode 100644 index 000000000..e4e217905 --- /dev/null +++ b/software/gcc_compile_cpp/index.html @@ -0,0 +1,3171 @@ + + + + + + + + + + + + + + + + + + + Compile C++ using GCC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile C++ using GCC

+

GCC (shorthand for 'GNU Compiler Collection') +is a collection of compilers +able to compile multiple different programming languages.

+

This page describes how to compile C++ code using the GCC.

+

Procedure

+

0. Create a C++ source file

+

You will need C++ code to work on.

+

In this optional step, a file with a minimal C++ program is created.

+

Create and write a C++ source file called hello_world.cpp:

+
nano hello_world.c
+
+

In nano, write the C++ program as such:

+
#include <iostream>
+
+int main()
+{
+  std::cout << "hello, world\n";
+}
+
+

1. Load a GCC module

+

Load a recent GCC module:

+
module load gcc/13.2.0
+
+
+Do I really need to load a module? +

No, as there is a system-installed GCC.

+

For sake of doing reproducible research, +always load a module of a specific version.

+
+

2. Compile the source file

+

After saving and closing nano, compile as such:

+
g++ hello_world.cpp
+
+

This compiles the file hello_world.cpp using all defaults:

+
    +
  • default/no optimization
  • +
  • the executable created is called a.out
  • +
+

To compiles the file hello_world.cpp with run-time speed optimization +and creating an executable with a more sensible name, use:

+
g++ -O3 -o hello_world hello_world.cpp
+
+
    +
  • -O3: optimize for run-time speed
  • +
  • -o hello_world: the executable created is called hello_world
  • +
+

3. Run

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gcc_compile_fortran/index.html b/software/gcc_compile_fortran/index.html new file mode 100644 index 000000000..aceff04ab --- /dev/null +++ b/software/gcc_compile_fortran/index.html @@ -0,0 +1,3169 @@ + + + + + + + + + + + + + + + + + + + Compile Fortran using GCC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile Fortran using GCC

+

GCC (shorthand for 'GNU Compiler Collection') +is a collection of compilers +able to compile multiple different programming languages.

+

This page describes how to compile Fortran code using the GCC.

+

Procedure

+

0. Create a Fortran source file

+

You will need Fortran code to work on.

+

In this optional step, a file with a minimal Fortran program is created.

+

Create and write a Fortran source file called hello_world.f:

+
nano hello_world.f
+
+

In nano, write the Fortran program as such:

+
C     HELLO.F :  PRINT MESSAGE ON SCREEN
+      PROGRAM HELLO
+      WRITE(*,*) "hello, world";
+      END
+
+

1. Load a GCC module

+

Load a recent GCC module:

+
module load gcc/13.2.0
+
+
+Do I really need to load a module? +

No, as there is a system-installed GCC.

+

For sake of doing reproducible research, +always load a module of a specific version.

+
+

2. Compile the source file

+

After saving and closing nano, compile as such:

+
gfortran hello_world.f
+
+

This compiles the file hello_world.f using all defaults:

+
    +
  • default/no optimization
  • +
  • the executable created is called a.out
  • +
+

To compiles the file hello_world.f with run-time speed optimization +and creating an executable with a more sensible name, use:

+
gfortran -Ofast -o hello_world hello_world.f
+
+
    +
  • -Ofast: optimize for run-time speed, similar to -O3
  • +
  • -o hello_world: the executable created is called hello_world
  • +
+

3. Run

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gdb/index.html b/software/gdb/index.html new file mode 100644 index 000000000..0d46843d0 --- /dev/null +++ b/software/gdb/index.html @@ -0,0 +1,3162 @@ + + + + + + + + + + + + + + + + + + + gdb - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

gdb

+

There are many debuggers. +This page described gdb, the GNU debugger.

+

gdb is a debugger provided with the GNU compilers. +It works fine for C, C++ and Fortran. +With older versions there were problems with fortran90/95.

+ +

Debugging GCC-compiled programs

+

In order to use gdb do the following. +load a recent gcc module and a gdb module (system gdb is from 2013!).

+
module load gcc/10.3.0 gdb/11.2
+
+

compile your program with flags for debugging added, e.g. -ggdb

+
gcc -ggdb your-program.c -o your-program
+
+

run the gdb program:

+
gdb your-program
+
+

Then you can use the gdb commands, like run, break, step, help, ...

+

Exit with Ctrl+D.

+

Debugging Intel-compiled programs

+

In order to use gdb with Intel-compiled programs, do the following"

+

Load the icc module

+
module load intel/20.4
+
+

Compile your program with flags for debugging added, e.g. -g

+
icc -g your-program.c -o your-program
+
+

Run the gdb program:

+
gdb your-program
+
+

Then you can use the gdb commands, like run, break, step, help, ...

+

Exit with Ctrl+D.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gedit/index.html b/software/gedit/index.html new file mode 100644 index 000000000..7f791fe15 --- /dev/null +++ b/software/gedit/index.html @@ -0,0 +1,3106 @@ + + + + + + + + + + + + + + + + + + + gedit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

gedit

+

There are many text editors +installed on the UPPMAX systems. +gedit is one of these.

+

gedit has a graphical user interface +and is included within MobaXterm.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/git_on_bianca/index.html b/software/git_on_bianca/index.html new file mode 100644 index 000000000..51389ef8d --- /dev/null +++ b/software/git_on_bianca/index.html @@ -0,0 +1,3270 @@ + + + + + + + + + + + + + + + + + + + Git on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Git on Bianca

+

NOTE: This guide assumes you know basic git commands and will not cover how to use git as a tool.

+
    +
  • One of the security features of Bianca is that there is no internet access from the cluster.
  • +
  • This makes it a bit more complicated to use things like Git to collaborate on files.
  • +
  • +

    In this guide we will cover two use-cases:

    +
      +
    1. collaborate with other users within the same Bianca project, and
    2. +
    3. collaborate with other users using Github.
    4. +
    +
  • +
+

Within the same Bianca project

+

Usually an external service like GitHub is used to host a remote repository (repo) that everyone pushes and pulls from. Since we don’t have an internet connection on Bianca we have to push and pull from a location within your Bianca project. Luckily that is simple to setup with git.

+

To create your own remote repo that everyone will push and pull from, create an empty directory somewhere in your project folder, go into it and initialize the repo.

+
# go to project dir
+cd /proj/nobackup/sens2023999/
+
+# create dir
+mkdir my_repo.git
+
+# go into dir
+cd my_repo.git
+
+# init repo
+git init --bare --share=group
+
+

The name of the created directory doesn’t have to end with .git but it is good for us humans to indicate that this is a repo people will use to push and pull from, and not where you will manually edit files.

+

To start using this repo you will clone it just like you would clone a GitHub repo.

+
# go to where you want to clone the repo, e.g. your home
+cd ~/
+
+# clone it
+git clone /proj/nobackup/sens2023999/my_repo.git
+
+# add a file and make the first commit
+echo "# my_repo" >> README.md
+git add README.md
+git commit -m "first commit"
+git branch -M main
+git push -u origin main
+
+

Now you will have a new directory named my_repo that only has a README.md file, and you can start creating other files in there. From this point onwards git will work the same way as if you were using a GitHub hosted repo to collaborate. Once you have pushed your files the others in your project can clone the repo and start pushing and pulling their changes.

+

Using Github (or any other git hosting service)

+

These instructions will work with any git hosting provider, like GitLab or Bitbucket, but we’ll use GitHub in the examples.

+

In the examples we use Rackham to mount the wharf directory. This is not the only way to do it. If you’d rather use a sftp client to transfer your files from the outside of Bianca to and from the wharf it will work just as well.

+

Cloning and pulling only

+

If you only want to run someone else's software that they have stored in a GitHub repo, you only need to clone the repo to be able to use it. Since you are only a user of the software there is no need to be able to push to the repo. If there are any updates to the repo you only need to pull the repo to get them.

+

The way to do this on Bianca is to simply clone the repo on a computer with internet access, move it to the Bianca wharf, and then copy it to its final destination on Bianca. If there are any updates to the repo you want to get you move the repo back to the wharf, pull the updates to the mounted wharf directory on Rackham, then move the directory back to its final destination on Bianca.

+
### on rackham ###
+
+# set variables for readability
+PROJ=sens2023999
+UNAME=youruppmaxusername
+
+# mount the wharf directory
+mkdir -p ~/wharf_mnt
+/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt
+
+# clone the repo to the wharf directory
+cd ~/wharf_mnt
+git clone git@github.com:example/example.git
+
+### on Bianca ###
+
+# move the directory to its final destination on Bianca
+
+mv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/example/ /proj/$PROJ/
+
+

If there are any updates to the software you might want to pull the changes from GitHub.

+
### on bianca ###
+
+# move the directory you cloned from GitHub back to the wharf
+mv /proj/$PROJ/example/ /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/
+
+### on rackham ###
+
+# mount the wharf directory
+mkdir -p ~/wharf_mnt
+/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt
+
+# pull the updates
+cd ~/wharf_mnt/example
+git pull
+
+### on bianca ###
+
+# move the directory to its final destination on Bianca
+mv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/example/ /proj/$PROJ/
+
+

Pushing and pulling

+

If you are a collaborator on a software you will need to do both pulling and pushing to the repo

+

The general approach to using git as a collaborator with GitHub on Bianca is:

+
    +
  1. On Bianca: make a backup of your code directory.
  2. +
  3. On Bianca: move the entire code directory to the wharf folder.
  4. +
  5. On Rackham: mount the wharf directory.
  6. +
  7. On Rackham: change the git remote URL to GitHub’s URL.
  8. +
  9. On Rackham: pull and push from GitHub.
  10. +
  11. On Bianca: move the directory from the wharf back to your project.
  12. +
  13. On Bianca: change the git remote URL back to your local Bianca repo.
  14. +
  15. On Bianca: push any changes you got from GitHub to your local Bianca repo.
  16. +
+

Best way to show this is by an example:

+
### on bianca ###
+
+# set variables for readability
+PROJ=sens2023999
+UNAME=youruppmaxusername
+
+# make a copy of your code dir, delete this later if all goes well :)
+cp -ar /proj/$PROJ/code_dir /proj/$PROJ/code_dir.$(date +%Y-%m-%d)
+
+# move the directory with your code to the wharf
+mv /proj/$PROJ/code_dir/ /proj/$PROJ/nobackup/wharf/$UNAME/$UNAME-$PROJ/
+
+### on rackham ###
+
+# set variables for readability
+PROJ=sens2023999
+UNAME=youruppmaxusername
+
+# mount the wharf folder
+mkdir -p ~/wharf_mnt
+/proj/staff/dahlo/bin/sshfs $UNAME-$PROJ@bianca-sftp.uppmax.uu.se:$UNAME-$PROJ ~/wharf_mnt
+
+# update the remote repo's URL to your GitHub URL
+cd ~/wharf_mnt/code_dir
+git remote set-url origin git@github.com:example/example.git
+git pull
+git push
+
+### on bianca ###
+
+# move the directory back from the wharf
+mv /proj/$PROJ/nobackup/wharf/$UNAME/$USER-$PROJ/code_dir/ /proj/$PROJ/
+
+# change the remote repo's URL back to your local repo on Bianca
+git remote set-url origin /path/to/local/repo
+
+# push any changes you got from GitHub to your local repo
+git push
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/globus/index.html b/software/globus/index.html new file mode 100644 index 000000000..72bdf95ac --- /dev/null +++ b/software/globus/index.html @@ -0,0 +1,3131 @@ + + + + + + + + + + + + + + + + + + + globus - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

globus

+

Globus homepage

+

Globus is a service to easily and safely transfer data.

+

Globus after first login

+

However, Uppsala University does not have a subscription.

+
+Does UU have a subscription now? +

Please contribute by letting us know. +Thanks!

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gprof/index.html b/software/gprof/index.html new file mode 100644 index 000000000..eab689663 --- /dev/null +++ b/software/gprof/index.html @@ -0,0 +1,3120 @@ + + + + + + + + + + + + + + + + + + + gprof - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

gprof

+

There are multiple profilers +available on UPPMAX. +This page describes gprof.

+

gprof is the GNU profiler, provided with the GNU compiler package.

+

In order to use gprof do the following:

+

Load a recent gcc module +and a recent binutils module:

+
module load gcc
+module load binutils
+
+

Compile your program with the -pg -g flags added

+
gcc -O0 -pg -g your-program.c -o your-program
+
+

run it:

+
./your-program
+
+

then do:

+
gprof your-program gmon.out > output-file
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/gromacs/index.html b/software/gromacs/index.html new file mode 100644 index 000000000..0a59570cb --- /dev/null +++ b/software/gromacs/index.html @@ -0,0 +1,3358 @@ + + + + + + + + + + + + + + + + + + + + + + + GROMACS - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Running Gromacs at UPPMAX

+

This page describes how to run the GROMACS molecular dynamics software on UPPMAX systems. See the gromacs web page for more information.

+

Have a look on this page as well - best practices running GROMAC on HPC.

+

Selected setups for benchmarking on HPC2N as examples.

+

Loading the gromac module

+
module load gromacs/2021.1.th
+
+

SBATCH script

+
+

adapted from HPC2N

+
+
#!/bin/bash -l
+#SBATCH -A SNIC_project
+#SBATCH -t 00:15:00
+#SBATCH -p node -n 10
+# Use 2 threads per task
+#SBATCH -c 2
+
+module load gromacs/2021.1.th
+
+# Automatic selection of single or multi node based GROMACS
+if [ $SLURM_JOB_NUM_NODES -gt 1 ]; then
+  GMX="gmx_mpi"
+  MPIRUN="mpirun"
+  ntmpi=""
+else
+  GMX="gmx"
+  MPIRUN=""
+  ntmpi="-ntmpi $SLURM_NTASKS"
+fi
+
+# Automatic selection of ntomp argument based on "-c" argument to sbatch
+if [ -n "$SLURM_CPUS_PER_TASK" ]; then
+  ntomp="$SLURM_CPUS_PER_TASK"
+else
+  ntomp="1"
+fi
+# Make sure to set OMP_NUM_THREADS equal to the value used for ntomp
+# to avoid complaints from GROMACS
+export OMP_NUM_THREADS=$ntomp
+$MPIRUN $GMX mdrun $ntmpi -ntomp $ntomp -s MEM.tpr -nsteps 10000 -resethway
+
+

How important is to select appropriate options

+

Here is a simple benchmark ran on single interactive node with 20CPUs +using the MEM example from +this benchmark:

+
module load gromacs/2021.1.th
+mpirun -np XX gmx_mpi mdrun -ntomp YY -s MEM.tpr -nsteps 10000 -resethway
+
+

where XX * YY = 20

+
 1
+ 2
+ 3
+ 4
+ 5
+ 6
+ 7
+ 8
+ 9
+10
+11
+12
+13
+14
+15
+16
+17
+18
+19
+20
+21
+22
+23
+24
+25
+26
$ grep "gmx_mpi\|MPI ranks\|Performance" *
+
+#md.log.1#:  gmx_mpi mdrun -ntomp 1 -s MEM.tpr -nsteps 10000 -resethway
+#md.log.1#:On 12 MPI ranks doing PP, and
+#md.log.1#:on 8 MPI ranks doing PME
+#md.log.1#:Performance:       20.520        1.170
+
+#md.log.2#:  gmx_mpi mdrun -ntomp 2 -s MEM.tpr -nsteps 10000 -resethway
+#md.log.2#:On 10 MPI ranks, each using 2 OpenMP threads
+#md.log.2#:Performance:       25.037        0.959
+
+#md.log.3#:  gmx_mpi mdrun -ntomp 4 -s MEM.tpr -nsteps 10000 -resethway
+#md.log.3#:On 5 MPI ranks, each using 4 OpenMP threads
+#md.log.3#:Performance:        5.388        4.454
+
+#md.log.4#:  gmx_mpi mdrun -ntomp 5 -s MEM.tpr -nsteps 10000 -resethway
+#md.log.4#:On 4 MPI ranks, each using 5 OpenMP threads
+#md.log.4#:Performance:       24.090        0.996
+
+#md.log.5#:  gmx_mpi mdrun -ntomp 10 -s MEM.tpr -nsteps 10000 -resethway
+#md.log.5#:NOTE: Your choice of number of MPI ranks and amount of resources results in using 10 OpenMP threads per rank, which is most likely inefficient. The optimum is usually between 1 and 6 threads per rank.
+#md.log.5#:On 2 MPI ranks, each using 10 OpenMP threads
+#md.log.5#:Performance:        3.649        6.577
+
+md.log:  gmx_mpi mdrun -ntomp 20 -s MEM.tpr -nsteps 10000 -resethway
+md.log:Performance:        2.012       11.931
+
+

Notice how bad is the last run

+

$ mpirun -np 1 gmx_mpi mdrun -ntomp 20 -s MEM.tpr -nsteps 10000 -resethway (lines 25-26)

+

According to this short test, this particular setup runs best on single Rackham node with

+

$ mpirun -np 10 gmx_mpi mdrun -ntomp 2 -s MEM.tpr -nsteps 10000 -resethway (lines 8-10)

+

Running older versions of gromacs

+

Versions 4.5.1 to 5.0.4

+

The gromacs tools have been compiled serially. The mdrun program has also been compiled in parallel using MPI. The name of the parallel binary is mdrun_mpi.

+

Run the parallelized program using:

+
mpirun -np XXX mdrun_mpi
+
+

... where XXX is the number of cores to run the program on.

+

Version 5.1.1

+

The binary is gmx_mpi and (e.g.) the mdrun command is issued like this:

+
mpirun -np XXX gmx_mpi mdrun
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icc/index.html b/software/icc/index.html new file mode 100644 index 000000000..6fb96d576 --- /dev/null +++ b/software/icc/index.html @@ -0,0 +1,3115 @@ + + + + + + + + + + + + + + + + + + + icc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icc_compile_c/index.html b/software/icc_compile_c/index.html new file mode 100644 index 000000000..2a173a618 --- /dev/null +++ b/software/icc_compile_c/index.html @@ -0,0 +1,3148 @@ + + + + + + + + + + + + + + + + + + + Compile a C program using icc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile a C program using icc

+

icc is the Intel C compiler. +This page describes how to compile C code using icc.

+

Procedure

+

1. Load an intel module

+

For version of the Intel compiler to and including 2020, +load an intel module with a version having two digits, +from 15 to and including 20:

+
module load intel/20.4
+
+

C11 and C17 (bug fix) standards have support from intel/17+ (fully from 19).

+

2

+

Create and write a C source file called hello_world.c:

+
nano hello_world.c
+
+

In nano, write the C program as such:

+
#include <stdio.h>
+
+int main() {
+  printf("hello, world\n");
+}
+
+

After saving and closing nano, compile as such:

+
icc hello_world.c
+
+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icpc/index.html b/software/icpc/index.html new file mode 100644 index 000000000..8870c07c4 --- /dev/null +++ b/software/icpc/index.html @@ -0,0 +1,3115 @@ + + + + + + + + + + + + + + + + + + + icpc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icpc_compile_cpp/index.html b/software/icpc_compile_cpp/index.html new file mode 100644 index 000000000..d45fb9195 --- /dev/null +++ b/software/icpc_compile_cpp/index.html @@ -0,0 +1,3148 @@ + + + + + + + + + + + + + + + + + + + Compile a C++ program using icpc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile a C++ program using icpc

+

icpc is an Intel C++ compiler. +This page describes how to compile C++ code using icpc.

+

Procedure

+

1. Load the modules

+

Load a recent intel module:

+
module load intel/20.4
+
+

2. Write the C++ program

+

Create and write a C++ source file called hello_world.cpp:

+
nano hello_world.cpp
+
+

In nano, write the C++ program as such:

+
#include <iostream>
+
+int main() 
+{
+  std::cout << "hello, world\n";
+}
+
+

3. Compile the C++ program

+

After saving and closing nano, compile as such:

+
icpc hello_world.cpp 
+
+

4. Run the executable

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icx/index.html b/software/icx/index.html new file mode 100644 index 000000000..6fceb88ec --- /dev/null +++ b/software/icx/index.html @@ -0,0 +1,3115 @@ + + + + + + + + + + + + + + + + + + + icx - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/icx_compile_c/index.html b/software/icx_compile_c/index.html new file mode 100644 index 000000000..1a955d61c --- /dev/null +++ b/software/icx_compile_c/index.html @@ -0,0 +1,3148 @@ + + + + + + + + + + + + + + + + + + + Compile a C program using icx - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile a C program using icx

+

icx is an Intel C compiler. +This page describes how to compile C code using icx.

+

Procedure

+

1. Load the modules

+

load an these modules:

+
module load intel-oneapi 
+module load compiler/2023.1.0
+
+

2. Write the C program

+

Create and write a C source file called hello_world.c:

+
nano hello_world.c
+
+

In nano, write the C program as such:

+
#include <stdio.h>
+
+int main() {
+  printf("hello, world\n");
+}
+
+

3. Compile the C program

+

After saving and closing nano, compile as such:

+
icx hello_world.c
+
+

4. Run the executable

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/idb/index.html b/software/idb/index.html new file mode 100644 index 000000000..cbd82d261 --- /dev/null +++ b/software/idb/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + idb - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

idb

+

There are many debuggers. +This page described idb, the Intel debugger.

+

idb was provided with the Intel compiler. +Now it is deprecated and you are advised to use gdb. +to debug programs compiled with the Intel compiler. +See gdb how to do so.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ides/index.html b/software/ides/index.html new file mode 100644 index 000000000..72651450a --- /dev/null +++ b/software/ides/index.html @@ -0,0 +1,3136 @@ + + + + + + + + + + + + + + + + + + + IDE:s - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

IDE:s

+

RStudio on Bianca

+
+

RStudio is an IDEs. Here, it is run on Bianca.

+
+

Introduction

+

IDE (pronounce aj-dee-ee) is short for 'Integrated Development Environment', +or 'a program in which you do programming'. +The goal of an IDE is to help develop code, with features +such as code completion, code hints and interactive debugging.

+

There are many different IDEs), +of which some are tailored to one programming +language (e.g. RStudio) and some allow multiple programming languages.

+

How to use an IDE depends on the UPPMAX cluster you want to use:

+ +

In general, using an IDE is easiest on Rackham and hardest on Bianca.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ides_on_bianca/index.html b/software/ides_on_bianca/index.html new file mode 100644 index 000000000..cd5fe90b5 --- /dev/null +++ b/software/ides_on_bianca/index.html @@ -0,0 +1,3158 @@ + + + + + + + + + + + + + + + + + + + IDEs on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

IDEs on Bianca

+

RStudio on Bianca

+
+

RStudio is one of the IDEs that can be used on Bianca.

+
+

Here we show how to use some IDEs on Bianca.

+
+Forgot what an IDE is? +

See at the general page on IDEs here.

+
+
+Do you really want to use an IDE on Bianca? +

Using an IDE on Bianca is cumbersome and +there are superior ways to develop code on Bianca.

+

However, using an IDE may make it easier for a new user to feel +comfortable using Bianca.

+

The UPPMAX 'Programming Formalisms' course +will teach you a superior workflow, +where development takes place on your own regular computer +and testing is done using simulated/fake data. +When development is done, +the tested project is uploaded to Bianca and setup to +use the real data instead.

+

This avoids using a clumsy remote desktop environment, +as well as many added bonuses.

+
+

Here are step-by-step guides to start these IDEs on Rackham:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDELanguagesScreenshot
JupyterPythonJupyter on Bianca
RStudioRRStudio on Bianca
⛔ VSCodeGeneral-purposeImpossible
VSCodiumGeneral-purposeVSCodium running on Bianca
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ides_on_rackham/index.html b/software/ides_on_rackham/index.html new file mode 100644 index 000000000..575ad5a47 --- /dev/null +++ b/software/ides_on_rackham/index.html @@ -0,0 +1,3157 @@ + + + + + + + + + + + + + + + + + + + IDEs on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

IDEs on Rackham

+

RStudio on Rackham

+
+

RStudio is one of the IDEs that can be used on Rackham.

+
+

Here we show how to use some IDEs on Rackham.

+
+Forgot what an IDE is? +

See at the general page on IDEs here.

+
+
+Do you really want to use an IDE on Rackham? +

Using an IDE on Rackham is cumbersome and +there are superior ways to develop code on Rackham.

+

However, using an IDE may make it easier for a new user to feel +comfortable using Rackham.

+

The UPPMAX 'Programming Formalisms' course +will teach you a superior workflow, +where development takes place on your own regular computer +and testing is done using simulated/fake data. +When development is done, +the tested project is uploaded to Rackham and setup to +use the real data instead.

+

This avoids using a clumsy remote desktop environment, +as well as many added bonuses.

+
+

Here are step-by-step guides to start these IDEs on Rackham:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
IDELanguages
JupyterPython
RStudioR
⛔ VSCodeGeneral-purpose
⛔ VSCodiumGeneral-purpose
+
+

IDEs on Rackham. +IDEs marked with ⛔ cannot be run on Rackham.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ifort/index.html b/software/ifort/index.html new file mode 100644 index 000000000..0e910a7e3 --- /dev/null +++ b/software/ifort/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + ifort - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ifort_compile_fortran/index.html b/software/ifort_compile_fortran/index.html new file mode 100644 index 000000000..e3821220b --- /dev/null +++ b/software/ifort_compile_fortran/index.html @@ -0,0 +1,3146 @@ + + + + + + + + + + + + + + + + + + + Compile a Fortran program using ifort - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile a Fortran program using ifort

+

ifort is an Intel Fortran compiler. +This page describes how to compile Fortran code using ifort.

+

Procedure

+

1. Load the modules

+

Load a recent intel module:

+
module load intel/20.4
+
+

2. Write the Fortran program

+

Create and write a Fortran source file called hello_world.f:

+
nano hello_world.f
+
+

In nano, write the Fortran program as such:

+
C     HELLO.F :  PRINT MESSAGE ON SCREEN
+      PROGRAM HELLO
+      WRITE(*,*) "hello, world";
+      END
+
+

3. Compile the C++ program

+

After saving and closing nano, compile as such:

+
ifort hello_world.f
+
+

4. Run the executable

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ifx/index.html b/software/ifx/index.html new file mode 100644 index 000000000..79752534d --- /dev/null +++ b/software/ifx/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + ifx - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ifx_compile_fortran/index.html b/software/ifx_compile_fortran/index.html new file mode 100644 index 000000000..a34617675 --- /dev/null +++ b/software/ifx_compile_fortran/index.html @@ -0,0 +1,3147 @@ + + + + + + + + + + + + + + + + + + + Compile a Fortran program using ifx - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile a Fortran program using ifx

+

ifx is an Intel Fortran compiler. +This page describes how to compile Fortran code using ifx.

+

Procedure

+

1. Load the modules

+

Load a recent intel module:

+
module load intel-oneapi 
+module load compiler/2023.1.0
+
+

2. Write the Fortran program

+

Create and write a Fortran source file called hello_world.f:

+
nano hello_world.f
+
+

In nano, write the Fortran program as such:

+
C     HELLO.F :  PRINT MESSAGE ON SCREEN
+      PROGRAM HELLO
+      WRITE(*,*) "hello, world";
+      END
+
+

3. Compile the C++ program

+

After saving and closing nano, compile as such:

+
ifx hello_world.f
+
+

4. Run the executable

+

Run the program:

+
./a.out 
+
+

Output:

+
hello, world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/igv/index.html b/software/igv/index.html new file mode 100644 index 000000000..bf67177d3 --- /dev/null +++ b/software/igv/index.html @@ -0,0 +1,3274 @@ + + + + + + + + + + + + + + + + + + + + + + + IGV - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Starting Integrative Genomics Viewer (IGV) on Rackham/Snowy

+

This guide will go through step by step how to start Integrative Genomics Viewer.

+

Step 1: Connect to UPPMAX with X-forwarding enabled. (Important step)

+

In a terminal, use SSH with X forwarding enabled:

+
ssh -X [user name]@rackham.uppmax.uu.se
+
+

For example:

+
ssh -X sven@rackham.uppmax.uu.se
+
+ +

Step 2: Reserve a node using "interactive"

+

Since genomic sequences require lots of memory, it is not suitable to run IGV on one of the login nodes. That would slow down the response times for all other users on the same login node..

+

Instead, reserve a node that you will have all by yourself. This command will reserve a whole node for 12 hours, the maximum amount of interactive time you can get and still receive a high priority for your job (feel free to change that if you want to).

+
interactive -A [UPPMAX project id] -p node -t 12:00:00
+
+

For example:

+
interactive -A snic2017-7-274 -p node -t 12:00:00
+
+

For interactive session on Snowy add the flag "-M snowy":

+
interactive -A snic2017-7-274 -M snowy -p node -t\ 12:00:00
+
+

Step 3: Load the IGV module

+

When your job has been started, type the following command to load the IGV module:

+
module load bioinfo-tools IGV
+
+

Step 4: Start IGV

+

To start IGV, type the following:

+
igv-node
+
+

That's it, now IGV should be loaded and ready to go. For more information about how to use IGV, please visit IGV's user guide.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/img/Add-On_explorer.PNG b/software/img/Add-On_explorer.PNG new file mode 100644 index 000000000..6077d921c Binary files /dev/null and b/software/img/Add-On_explorer.PNG differ diff --git a/software/img/Add-On_manager.PNG b/software/img/Add-On_manager.PNG new file mode 100644 index 000000000..44f518b6b Binary files /dev/null and b/software/img/Add-On_manager.PNG differ diff --git a/software/img/Thinlinc1.jpg b/software/img/Thinlinc1.jpg new file mode 100644 index 000000000..25e981ef1 Binary files /dev/null and b/software/img/Thinlinc1.jpg differ diff --git a/software/img/Thinlinc2.jpg b/software/img/Thinlinc2.jpg new file mode 100644 index 000000000..71fef5394 Binary files /dev/null and b/software/img/Thinlinc2.jpg differ diff --git a/software/img/Toolbar_Add-Ons.PNG b/software/img/Toolbar_Add-Ons.PNG new file mode 100644 index 000000000..5594b434c Binary files /dev/null and b/software/img/Toolbar_Add-Ons.PNG differ diff --git a/software/img/WinSCP.png b/software/img/WinSCP.png new file mode 100644 index 000000000..668034f42 Binary files /dev/null and b/software/img/WinSCP.png differ diff --git a/software/img/apps.PNG b/software/img/apps.PNG new file mode 100644 index 000000000..f9e97298f Binary files /dev/null and b/software/img/apps.PNG differ diff --git a/software/img/beauti.png b/software/img/beauti.png new file mode 100644 index 000000000..5982fd133 Binary files /dev/null and b/software/img/beauti.png differ diff --git a/software/img/densitree.png b/software/img/densitree.png new file mode 100644 index 000000000..497f2c612 Binary files /dev/null and b/software/img/densitree.png differ diff --git a/software/img/eog.png b/software/img/eog.png new file mode 100644 index 000000000..210f73d86 Binary files /dev/null and b/software/img/eog.png differ diff --git a/software/img/filezilla-snapshot.png b/software/img/filezilla-snapshot.png new file mode 100644 index 000000000..6ce5ccc86 Binary files /dev/null and b/software/img/filezilla-snapshot.png differ diff --git a/software/img/filezilla_enter_password_rackham.png b/software/img/filezilla_enter_password_rackham.png new file mode 100644 index 000000000..a7eb62ee7 Binary files /dev/null and b/software/img/filezilla_enter_password_rackham.png differ diff --git a/software/img/filezilla_enter_password_transit.png b/software/img/filezilla_enter_password_transit.png new file mode 100644 index 000000000..62c01506a Binary files /dev/null and b/software/img/filezilla_enter_password_transit.png differ diff --git a/software/img/filezilla_file_on_bianca.png b/software/img/filezilla_file_on_bianca.png new file mode 100644 index 000000000..db5224ee0 Binary files /dev/null and b/software/img/filezilla_file_on_bianca.png differ diff --git a/software/img/filezilla_file_site_manager.png b/software/img/filezilla_file_site_manager.png new file mode 100644 index 000000000..0e7312698 Binary files /dev/null and b/software/img/filezilla_file_site_manager.png differ diff --git a/software/img/filezilla_login_to_bianca.png b/software/img/filezilla_login_to_bianca.png new file mode 100644 index 000000000..b39762e1b Binary files /dev/null and b/software/img/filezilla_login_to_bianca.png differ diff --git a/software/img/filezilla_login_to_bianca_236_x_266.png b/software/img/filezilla_login_to_bianca_236_x_266.png new file mode 100644 index 000000000..cd45f9555 Binary files /dev/null and b/software/img/filezilla_login_to_bianca_236_x_266.png differ diff --git a/software/img/filezilla_login_to_rackham.png b/software/img/filezilla_login_to_rackham.png new file mode 100644 index 000000000..8131de091 Binary files /dev/null and b/software/img/filezilla_login_to_rackham.png differ diff --git a/software/img/filezilla_login_to_rackham_480_x_270.png b/software/img/filezilla_login_to_rackham_480_x_270.png new file mode 100644 index 000000000..78887f403 Binary files /dev/null and b/software/img/filezilla_login_to_rackham_480_x_270.png differ diff --git a/software/img/filezilla_login_to_rackham_and_transit.png b/software/img/filezilla_login_to_rackham_and_transit.png new file mode 100644 index 000000000..5ae0a2373 Binary files /dev/null and b/software/img/filezilla_login_to_rackham_and_transit.png differ diff --git a/software/img/filezilla_login_to_rackham_on_ubuntu.png b/software/img/filezilla_login_to_rackham_on_ubuntu.png new file mode 100644 index 000000000..865de369a Binary files /dev/null and b/software/img/filezilla_login_to_rackham_on_ubuntu.png differ diff --git a/software/img/filezilla_login_to_transit.png b/software/img/filezilla_login_to_transit.png new file mode 100644 index 000000000..abbd8c92a Binary files /dev/null and b/software/img/filezilla_login_to_transit.png differ diff --git a/software/img/filezilla_login_to_transit_480_x_270.png b/software/img/filezilla_login_to_transit_480_x_270.png new file mode 100644 index 000000000..6b225c960 Binary files /dev/null and b/software/img/filezilla_login_to_transit_480_x_270.png differ diff --git a/software/img/filezilla_logo_240_x_240.png b/software/img/filezilla_logo_240_x_240.png new file mode 100644 index 000000000..c1573306c Binary files /dev/null and b/software/img/filezilla_logo_240_x_240.png differ diff --git a/software/img/filezilla_setup_bianca_pavlin.png b/software/img/filezilla_setup_bianca_pavlin.png new file mode 100644 index 000000000..6ce5ccc86 Binary files /dev/null and b/software/img/filezilla_setup_bianca_pavlin.png differ diff --git a/software/img/filezilla_setup_bianca_sven.png b/software/img/filezilla_setup_bianca_sven.png new file mode 100644 index 000000000..33b23aec4 Binary files /dev/null and b/software/img/filezilla_setup_bianca_sven.png differ diff --git a/software/img/filezilla_setup_rackham_sven.png b/software/img/filezilla_setup_rackham_sven.png new file mode 100644 index 000000000..fdc4bffab Binary files /dev/null and b/software/img/filezilla_setup_rackham_sven.png differ diff --git a/software/img/filezilla_setup_transit_richel_outside_sunet.png b/software/img/filezilla_setup_transit_richel_outside_sunet.png new file mode 100644 index 000000000..6c04b585f Binary files /dev/null and b/software/img/filezilla_setup_transit_richel_outside_sunet.png differ diff --git a/software/img/filezilla_setup_transit_sven.png b/software/img/filezilla_setup_transit_sven.png new file mode 100644 index 000000000..7403dc745 Binary files /dev/null and b/software/img/filezilla_setup_transit_sven.png differ diff --git a/software/img/filezilla_site_manager.png b/software/img/filezilla_site_manager.png new file mode 100644 index 000000000..7a4012b7a Binary files /dev/null and b/software/img/filezilla_site_manager.png differ diff --git a/software/img/filezilla_transfer_file.png b/software/img/filezilla_transfer_file.png new file mode 100644 index 000000000..8af9edf0b Binary files /dev/null and b/software/img/filezilla_transfer_file.png differ diff --git a/software/img/filezilla_unknown_host_key_trust_no.png b/software/img/filezilla_unknown_host_key_trust_no.png new file mode 100644 index 000000000..65558b6dc Binary files /dev/null and b/software/img/filezilla_unknown_host_key_trust_no.png differ diff --git a/software/img/filezilla_unknown_host_key_trust_yes.png b/software/img/filezilla_unknown_host_key_trust_yes.png new file mode 100644 index 000000000..d28e845d0 Binary files /dev/null and b/software/img/filezilla_unknown_host_key_trust_yes.png differ diff --git a/software/img/globus_after_first_login.png b/software/img/globus_after_first_login.png new file mode 100644 index 000000000..29ca9e98e Binary files /dev/null and b/software/img/globus_after_first_login.png differ diff --git a/software/img/globus_homepage.png b/software/img/globus_homepage.png new file mode 100644 index 000000000..b6fbc2673 Binary files /dev/null and b/software/img/globus_homepage.png differ diff --git a/software/img/gnu_nano_python.png b/software/img/gnu_nano_python.png new file mode 100644 index 000000000..26ddd08cc Binary files /dev/null and b/software/img/gnu_nano_python.png differ diff --git a/software/img/ipython.png b/software/img/ipython.png new file mode 100644 index 000000000..4fa38d055 Binary files /dev/null and b/software/img/ipython.png differ diff --git a/software/img/ipython_hello_world.png b/software/img/ipython_hello_world.png new file mode 100644 index 000000000..c92ad3b38 Binary files /dev/null and b/software/img/ipython_hello_world.png differ diff --git a/software/img/ipython_matplotlib.png b/software/img/ipython_matplotlib.png new file mode 100644 index 000000000..e54916e00 Binary files /dev/null and b/software/img/ipython_matplotlib.png differ diff --git a/software/img/ipython_matplotlib_ubuntu.png b/software/img/ipython_matplotlib_ubuntu.png new file mode 100644 index 000000000..9abd7643c Binary files /dev/null and b/software/img/ipython_matplotlib_ubuntu.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_01.png b/software/img/jobstats_c_555912-l_1-k_bad_job_01.png new file mode 100644 index 000000000..e95865c8d Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_01.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_01_with_border.png b/software/img/jobstats_c_555912-l_1-k_bad_job_01_with_border.png new file mode 100644 index 000000000..c54617c82 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_01_with_border.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_02.png b/software/img/jobstats_c_555912-l_1-k_bad_job_02.png new file mode 100644 index 000000000..c2637d7ff Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_02.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_02_with_border.png b/software/img/jobstats_c_555912-l_1-k_bad_job_02_with_border.png new file mode 100644 index 000000000..0a7430158 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_02_with_border.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_03.png b/software/img/jobstats_c_555912-l_1-k_bad_job_03.png new file mode 100644 index 000000000..3cd502269 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_03.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_03_with_border.png b/software/img/jobstats_c_555912-l_1-k_bad_job_03_with_border.png new file mode 100644 index 000000000..a5c367cce Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_03_with_border.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_04.png b/software/img/jobstats_c_555912-l_1-k_bad_job_04.png new file mode 100644 index 000000000..c42b39e10 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_04.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_04_with_border.png b/software/img/jobstats_c_555912-l_1-k_bad_job_04_with_border.png new file mode 100644 index 000000000..e78a16b8e Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_04_with_border.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_05.png b/software/img/jobstats_c_555912-l_1-k_bad_job_05.png new file mode 100644 index 000000000..3e71791c8 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_05.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_bad_job_05_with_border.png b/software/img/jobstats_c_555912-l_1-k_bad_job_05_with_border.png new file mode 100644 index 000000000..24be98f61 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_bad_job_05_with_border.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275.png b/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275.png new file mode 100644 index 000000000..013549d24 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275.png differ diff --git a/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275_with_border.png b/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275_with_border.png new file mode 100644 index 000000000..cce0cb428 Binary files /dev/null and b/software/img/jobstats_c_555912-l_1-k_milou-b2010042-douglas-8769275_with_border.png differ diff --git a/software/img/jobstats_two_nodes.png b/software/img/jobstats_two_nodes.png new file mode 100644 index 000000000..d10391b34 Binary files /dev/null and b/software/img/jobstats_two_nodes.png differ diff --git a/software/img/jobstats_two_nodes_with_border.png b/software/img/jobstats_two_nodes_with_border.png new file mode 100644 index 000000000..97796a7aa Binary files /dev/null and b/software/img/jobstats_two_nodes_with_border.png differ diff --git a/software/img/jupyter.png b/software/img/jupyter.png new file mode 100644 index 000000000..88311b591 Binary files /dev/null and b/software/img/jupyter.png differ diff --git a/software/img/jupyter_bianca.png b/software/img/jupyter_bianca.png new file mode 100644 index 000000000..19fc58a85 Binary files /dev/null and b/software/img/jupyter_bianca.png differ diff --git a/software/img/jupyter_rackham.png b/software/img/jupyter_rackham.png new file mode 100644 index 000000000..c26a5ffbe Binary files /dev/null and b/software/img/jupyter_rackham.png differ diff --git a/software/img/jupyter_rackham_thinlinc.png b/software/img/jupyter_rackham_thinlinc.png new file mode 100644 index 000000000..d65be684e Binary files /dev/null and b/software/img/jupyter_rackham_thinlinc.png differ diff --git a/software/img/jupyter_rackham_thinlinc_ubuntu.png b/software/img/jupyter_rackham_thinlinc_ubuntu.png new file mode 100644 index 000000000..3f7e1d6c9 Binary files /dev/null and b/software/img/jupyter_rackham_thinlinc_ubuntu.png differ diff --git a/software/img/jvarkit_tags.png b/software/img/jvarkit_tags.png new file mode 100644 index 000000000..a70bc3298 Binary files /dev/null and b/software/img/jvarkit_tags.png differ diff --git a/software/img/matlab_configCluster.PNG b/software/img/matlab_configCluster.PNG new file mode 100644 index 000000000..d2f3996bd Binary files /dev/null and b/software/img/matlab_configCluster.PNG differ diff --git a/software/img/matlab_enterpasswd.PNG b/software/img/matlab_enterpasswd.PNG new file mode 100644 index 000000000..e53531de7 Binary files /dev/null and b/software/img/matlab_enterpasswd.PNG differ diff --git a/software/img/matlab_jobmonitor.PNG b/software/img/matlab_jobmonitor.PNG new file mode 100644 index 000000000..f968bb7b2 Binary files /dev/null and b/software/img/matlab_jobmonitor.PNG differ diff --git a/software/img/matlab_usercred.PNG b/software/img/matlab_usercred.PNG new file mode 100644 index 000000000..203a217ee Binary files /dev/null and b/software/img/matlab_usercred.PNG differ diff --git a/software/img/mobax_start.jpg b/software/img/mobax_start.jpg new file mode 100644 index 000000000..a92fbb535 Binary files /dev/null and b/software/img/mobax_start.jpg differ diff --git a/software/img/mobax_start1.jpg b/software/img/mobax_start1.jpg new file mode 100644 index 000000000..7caf635b0 Binary files /dev/null and b/software/img/mobax_start1.jpg differ diff --git a/software/img/mobaxterm_use_internal_ssh_agend_mobagent.png b/software/img/mobaxterm_use_internal_ssh_agend_mobagent.png new file mode 100644 index 000000000..cc831139e Binary files /dev/null and b/software/img/mobaxterm_use_internal_ssh_agend_mobagent.png differ diff --git a/software/img/my_products.PNG b/software/img/my_products.PNG new file mode 100644 index 000000000..5d68489bd Binary files /dev/null and b/software/img/my_products.PNG differ diff --git a/software/img/nano.png b/software/img/nano.png new file mode 100644 index 000000000..40be06120 Binary files /dev/null and b/software/img/nano.png differ diff --git a/software/img/pdc_add_new_key_uppmax.png b/software/img/pdc_add_new_key_uppmax.png new file mode 100644 index 000000000..5a62a78fa Binary files /dev/null and b/software/img/pdc_add_new_key_uppmax.png differ diff --git a/software/img/pdc_add_new_key_user_ip_only.png b/software/img/pdc_add_new_key_user_ip_only.png new file mode 100644 index 000000000..4b4e01b64 Binary files /dev/null and b/software/img/pdc_add_new_key_user_ip_only.png differ diff --git a/software/img/pdc_key_management_no_keys.png b/software/img/pdc_key_management_no_keys.png new file mode 100644 index 000000000..7514c31eb Binary files /dev/null and b/software/img/pdc_key_management_no_keys.png differ diff --git a/software/img/pdc_key_management_rackham_key.png b/software/img/pdc_key_management_rackham_key.png new file mode 100644 index 000000000..20abff1f1 Binary files /dev/null and b/software/img/pdc_key_management_rackham_key.png differ diff --git a/software/img/pdc_key_management_uppmax_key.png b/software/img/pdc_key_management_uppmax_key.png new file mode 100644 index 000000000..7fa2fee5a Binary files /dev/null and b/software/img/pdc_key_management_uppmax_key.png differ diff --git a/software/img/pdc_portal_addkey0.png b/software/img/pdc_portal_addkey0.png new file mode 100644 index 000000000..7635d5ec5 Binary files /dev/null and b/software/img/pdc_portal_addkey0.png differ diff --git a/software/img/pdc_portal_addkey1.png b/software/img/pdc_portal_addkey1.png new file mode 100644 index 000000000..bab43d1a1 Binary files /dev/null and b/software/img/pdc_portal_addkey1.png differ diff --git a/software/img/pdc_prove_identity.png b/software/img/pdc_prove_identity.png new file mode 100644 index 000000000..19ffa9714 Binary files /dev/null and b/software/img/pdc_prove_identity.png differ diff --git a/software/img/projplot_over_quota.png b/software/img/projplot_over_quota.png new file mode 100644 index 000000000..8d1becc2b Binary files /dev/null and b/software/img/projplot_over_quota.png differ diff --git a/software/img/projplot_regular_use.png b/software/img/projplot_regular_use.png new file mode 100644 index 000000000..d26861731 Binary files /dev/null and b/software/img/projplot_regular_use.png differ diff --git a/software/img/putty.jpg b/software/img/putty.jpg new file mode 100644 index 000000000..55e1172ed Binary files /dev/null and b/software/img/putty.jpg differ diff --git a/software/img/python3_hello_world.png b/software/img/python3_hello_world.png new file mode 100644 index 000000000..6c6754b46 Binary files /dev/null and b/software/img/python3_hello_world.png differ diff --git a/software/img/python_interpreter.png b/software/img/python_interpreter.png new file mode 100644 index 000000000..8deb47e6b Binary files /dev/null and b/software/img/python_interpreter.png differ diff --git a/software/img/python_logo.png b/software/img/python_logo.png new file mode 100644 index 000000000..0bbc93170 Binary files /dev/null and b/software/img/python_logo.png differ diff --git a/software/img/python_matplotlib.png b/software/img/python_matplotlib.png new file mode 100644 index 000000000..475fa56de Binary files /dev/null and b/software/img/python_matplotlib.png differ diff --git a/software/img/python_matplotlib_ubuntu.png b/software/img/python_matplotlib_ubuntu.png new file mode 100644 index 000000000..658695ef0 Binary files /dev/null and b/software/img/python_matplotlib_ubuntu.png differ diff --git a/software/img/pytorch-nvidia.png b/software/img/pytorch-nvidia.png new file mode 100644 index 000000000..ab273a6dd Binary files /dev/null and b/software/img/pytorch-nvidia.png differ diff --git a/software/img/r_logo.png b/software/img/r_logo.png new file mode 100644 index 000000000..be48e3074 Binary files /dev/null and b/software/img/r_logo.png differ diff --git a/software/img/r_logo_50.png b/software/img/r_logo_50.png new file mode 100644 index 000000000..e909fe2d9 Binary files /dev/null and b/software/img/r_logo_50.png differ diff --git a/software/img/rclone_false_error.png b/software/img/rclone_false_error.png new file mode 100644 index 000000000..979281d32 Binary files /dev/null and b/software/img/rclone_false_error.png differ diff --git a/software/img/rclone_swestore_folder_structure.png b/software/img/rclone_swestore_folder_structure.png new file mode 100644 index 000000000..d8d585fdd Binary files /dev/null and b/software/img/rclone_swestore_folder_structure.png differ diff --git a/software/img/rstudio_debugger_at_error_level.png b/software/img/rstudio_debugger_at_error_level.png new file mode 100644 index 000000000..833795da9 Binary files /dev/null and b/software/img/rstudio_debugger_at_error_level.png differ diff --git a/software/img/rstudio_debugger_at_function_level.png b/software/img/rstudio_debugger_at_function_level.png new file mode 100644 index 000000000..ae75d5ee9 Binary files /dev/null and b/software/img/rstudio_debugger_at_function_level.png differ diff --git a/software/img/rstudio_debugger_at_program_level.png b/software/img/rstudio_debugger_at_program_level.png new file mode 100644 index 000000000..f9e68aa74 Binary files /dev/null and b/software/img/rstudio_debugger_at_program_level.png differ diff --git a/software/img/rstudio_error_r_encountered_a_fatal_error.png b/software/img/rstudio_error_r_encountered_a_fatal_error.png new file mode 100644 index 000000000..7f4d5c436 Binary files /dev/null and b/software/img/rstudio_error_r_encountered_a_fatal_error.png differ diff --git a/software/img/rstudio_error_r_encountered_a_fatal_error_null.png b/software/img/rstudio_error_r_encountered_a_fatal_error_null.png new file mode 100644 index 000000000..cadc4c7a3 Binary files /dev/null and b/software/img/rstudio_error_r_encountered_a_fatal_error_null.png differ diff --git a/software/img/rstudio_in_action.png b/software/img/rstudio_in_action.png new file mode 100644 index 000000000..f31c019e1 Binary files /dev/null and b/software/img/rstudio_in_action.png differ diff --git a/software/img/rstudio_in_action_480_x_270.png b/software/img/rstudio_in_action_480_x_270.png new file mode 100644 index 000000000..e7b7587e7 Binary files /dev/null and b/software/img/rstudio_in_action_480_x_270.png differ diff --git a/software/img/rstudio_inresponsive_interpreter_and_no_files_pane.png b/software/img/rstudio_inresponsive_interpreter_and_no_files_pane.png new file mode 100644 index 000000000..169e3cfa6 Binary files /dev/null and b/software/img/rstudio_inresponsive_interpreter_and_no_files_pane.png differ diff --git a/software/img/rstudio_on_bianca_freeze_du.png b/software/img/rstudio_on_bianca_freeze_du.png new file mode 100644 index 000000000..a0b082957 Binary files /dev/null and b/software/img/rstudio_on_bianca_freeze_du.png differ diff --git a/software/img/rstudio_started.png b/software/img/rstudio_started.png new file mode 100644 index 000000000..069c7b6e4 Binary files /dev/null and b/software/img/rstudio_started.png differ diff --git a/software/img/rstudio_starting_up.png b/software/img/rstudio_starting_up.png new file mode 100644 index 000000000..9498b9bf8 Binary files /dev/null and b/software/img/rstudio_starting_up.png differ diff --git a/software/img/rstudio_with_r_v4_3_1.png b/software/img/rstudio_with_r_v4_3_1.png new file mode 100644 index 000000000..a0dd221ac Binary files /dev/null and b/software/img/rstudio_with_r_v4_3_1.png differ diff --git a/software/img/searchforaddons.PNG b/software/img/searchforaddons.PNG new file mode 100644 index 000000000..c7b31f687 Binary files /dev/null and b/software/img/searchforaddons.PNG differ diff --git a/software/img/start_vscode_ubuntu.png b/software/img/start_vscode_ubuntu.png new file mode 100644 index 000000000..4d662c25c Binary files /dev/null and b/software/img/start_vscode_ubuntu.png differ diff --git a/software/img/sylabs_click_submit_build.png b/software/img/sylabs_click_submit_build.png new file mode 100644 index 000000000..16ba62c7a Binary files /dev/null and b/software/img/sylabs_click_submit_build.png differ diff --git a/software/img/sylabs_container_services_logged_in.png b/software/img/sylabs_container_services_logged_in.png new file mode 100644 index 000000000..f1abd00af Binary files /dev/null and b/software/img/sylabs_container_services_logged_in.png differ diff --git a/software/img/sylabs_container_services_logged_in_click_remote_builder.png b/software/img/sylabs_container_services_logged_in_click_remote_builder.png new file mode 100644 index 000000000..986f4858b Binary files /dev/null and b/software/img/sylabs_container_services_logged_in_click_remote_builder.png differ diff --git a/software/img/sylabs_container_services_not_logged_in.png b/software/img/sylabs_container_services_not_logged_in.png new file mode 100644 index 000000000..f0224b26a Binary files /dev/null and b/software/img/sylabs_container_services_not_logged_in.png differ diff --git a/software/img/sylabs_remote_builder_click_view_image.png b/software/img/sylabs_remote_builder_click_view_image.png new file mode 100644 index 000000000..79fd9aa54 Binary files /dev/null and b/software/img/sylabs_remote_builder_click_view_image.png differ diff --git a/software/img/sylabs_remote_builder_first_content.png b/software/img/sylabs_remote_builder_first_content.png new file mode 100644 index 000000000..094979dbd Binary files /dev/null and b/software/img/sylabs_remote_builder_first_content.png differ diff --git a/software/img/sylabs_select_container_services.png b/software/img/sylabs_select_container_services.png new file mode 100644 index 000000000..4d7191662 Binary files /dev/null and b/software/img/sylabs_select_container_services.png differ diff --git a/software/img/sylabs_sign_in.png b/software/img/sylabs_sign_in.png new file mode 100644 index 000000000..b3b750ebf Binary files /dev/null and b/software/img/sylabs_sign_in.png differ diff --git a/software/img/sylabs_view_image.png b/software/img/sylabs_view_image.png new file mode 100644 index 000000000..4c968ba9f Binary files /dev/null and b/software/img/sylabs_view_image.png differ diff --git a/software/img/sylabs_view_image_download.png b/software/img/sylabs_view_image_download.png new file mode 100644 index 000000000..f55893442 Binary files /dev/null and b/software/img/sylabs_view_image_download.png differ diff --git a/software/img/sylabs_view_remote_build_done.png b/software/img/sylabs_view_remote_build_done.png new file mode 100644 index 000000000..dc2096a3b Binary files /dev/null and b/software/img/sylabs_view_remote_build_done.png differ diff --git a/software/img/sylabs_view_remote_build_in_progress.png b/software/img/sylabs_view_remote_build_in_progress.png new file mode 100644 index 000000000..9079b7368 Binary files /dev/null and b/software/img/sylabs_view_remote_build_in_progress.png differ diff --git a/software/img/thinlinc_clipboard.png b/software/img/thinlinc_clipboard.png new file mode 100644 index 000000000..a1430bb38 Binary files /dev/null and b/software/img/thinlinc_clipboard.png differ diff --git a/software/img/thinlinc_error_no_agent_server_available.png b/software/img/thinlinc_error_no_agent_server_available.png new file mode 100644 index 000000000..71e75ba3c Binary files /dev/null and b/software/img/thinlinc_error_no_agent_server_available.png differ diff --git a/software/img/thinlinc_local_bianca.jpg b/software/img/thinlinc_local_bianca.jpg new file mode 100644 index 000000000..71fef5394 Binary files /dev/null and b/software/img/thinlinc_local_bianca.jpg differ diff --git a/software/img/thinlinc_local_login_rackham.png b/software/img/thinlinc_local_login_rackham.png new file mode 100644 index 000000000..522965b6c Binary files /dev/null and b/software/img/thinlinc_local_login_rackham.png differ diff --git a/software/img/thinlinc_local_rackham.png b/software/img/thinlinc_local_rackham.png new file mode 100644 index 000000000..ead461d1d Binary files /dev/null and b/software/img/thinlinc_local_rackham.png differ diff --git a/software/img/thinlinc_local_rackham_zoom.png b/software/img/thinlinc_local_rackham_zoom.png new file mode 100644 index 000000000..10ffb429b Binary files /dev/null and b/software/img/thinlinc_local_rackham_zoom.png differ diff --git a/software/img/thinlinc_local_select_profile.png b/software/img/thinlinc_local_select_profile.png new file mode 100644 index 000000000..e0d5e3ef0 Binary files /dev/null and b/software/img/thinlinc_local_select_profile.png differ diff --git a/software/img/thinlinc_local_welcome.png b/software/img/thinlinc_local_welcome.png new file mode 100644 index 000000000..0024005bb Binary files /dev/null and b/software/img/thinlinc_local_welcome.png differ diff --git a/software/img/tracer.png b/software/img/tracer.png new file mode 100644 index 000000000..9ff2003df Binary files /dev/null and b/software/img/tracer.png differ diff --git a/software/img/tracer_on_rackham_console.png b/software/img/tracer_on_rackham_console.png new file mode 100644 index 000000000..d25ebac12 Binary files /dev/null and b/software/img/tracer_on_rackham_console.png differ diff --git a/software/img/tracer_release.png b/software/img/tracer_release.png new file mode 100644 index 000000000..f62410813 Binary files /dev/null and b/software/img/tracer_release.png differ diff --git a/software/img/vscode.png b/software/img/vscode.png new file mode 100644 index 000000000..ae4b0d2d6 Binary files /dev/null and b/software/img/vscode.png differ diff --git a/software/img/vscode_add_new_remote.png b/software/img/vscode_add_new_remote.png new file mode 100644 index 000000000..f23988405 Binary files /dev/null and b/software/img/vscode_add_new_remote.png differ diff --git a/software/img/vscode_connect_to_rackham.png b/software/img/vscode_connect_to_rackham.png new file mode 100644 index 000000000..6b195844a Binary files /dev/null and b/software/img/vscode_connect_to_rackham.png differ diff --git a/software/img/vscode_connected_to_rackham.png b/software/img/vscode_connected_to_rackham.png new file mode 100644 index 000000000..3c7047e4e Binary files /dev/null and b/software/img/vscode_connected_to_rackham.png differ diff --git a/software/img/vscode_on_rackham_add_new_remote.png b/software/img/vscode_on_rackham_add_new_remote.png new file mode 100644 index 000000000..c8365aca7 Binary files /dev/null and b/software/img/vscode_on_rackham_add_new_remote.png differ diff --git a/software/img/vscode_on_rackham_connected.png b/software/img/vscode_on_rackham_connected.png new file mode 100644 index 000000000..07403e6bf Binary files /dev/null and b/software/img/vscode_on_rackham_connected.png differ diff --git a/software/img/vscode_on_rackham_install_remote_ssh.png b/software/img/vscode_on_rackham_install_remote_ssh.png new file mode 100644 index 000000000..1382aeb26 Binary files /dev/null and b/software/img/vscode_on_rackham_install_remote_ssh.png differ diff --git a/software/img/vscode_on_rackham_new_remote_click_connect.png b/software/img/vscode_on_rackham_new_remote_click_connect.png new file mode 100644 index 000000000..fd9d51da0 Binary files /dev/null and b/software/img/vscode_on_rackham_new_remote_click_connect.png differ diff --git a/software/img/vscode_on_rackham_new_remote_ssh_command.png b/software/img/vscode_on_rackham_new_remote_ssh_command.png new file mode 100644 index 000000000..79ceb2fbd Binary files /dev/null and b/software/img/vscode_on_rackham_new_remote_ssh_command.png differ diff --git a/software/img/vscode_on_rackham_new_remote_ssh_config.png b/software/img/vscode_on_rackham_new_remote_ssh_config.png new file mode 100644 index 000000000..d89b3988e Binary files /dev/null and b/software/img/vscode_on_rackham_new_remote_ssh_config.png differ diff --git a/software/img/vscode_on_rackham_show_proj_folder.png b/software/img/vscode_on_rackham_show_proj_folder.png new file mode 100644 index 000000000..d70e26cdf Binary files /dev/null and b/software/img/vscode_on_rackham_show_proj_folder.png differ diff --git a/software/img/vscode_remote_tunnels_after_install.png b/software/img/vscode_remote_tunnels_after_install.png new file mode 100644 index 000000000..6fe4bb9bc Binary files /dev/null and b/software/img/vscode_remote_tunnels_after_install.png differ diff --git a/software/img/vscode_remote_tunnels_before_install.png b/software/img/vscode_remote_tunnels_before_install.png new file mode 100644 index 000000000..da8bff86f Binary files /dev/null and b/software/img/vscode_remote_tunnels_before_install.png differ diff --git a/software/img/vscode_remote_tunnels_use_ssh_config_in_home.png b/software/img/vscode_remote_tunnels_use_ssh_config_in_home.png new file mode 100644 index 000000000..f699b5b08 Binary files /dev/null and b/software/img/vscode_remote_tunnels_use_ssh_config_in_home.png differ diff --git a/software/img/vscode_ssh_to_rackham.png b/software/img/vscode_ssh_to_rackham.png new file mode 100644 index 000000000..4842b2b05 Binary files /dev/null and b/software/img/vscode_ssh_to_rackham.png differ diff --git a/software/img/vscodium_module.png b/software/img/vscodium_module.png new file mode 100644 index 000000000..2f0f10b0f Binary files /dev/null and b/software/img/vscodium_module.png differ diff --git a/software/img/vscodium_on_bianca.png b/software/img/vscodium_on_bianca.png new file mode 100644 index 000000000..b9b0f93f0 Binary files /dev/null and b/software/img/vscodium_on_bianca.png differ diff --git a/software/img/vscodium_on_bianca_480_x_270.png b/software/img/vscodium_on_bianca_480_x_270.png new file mode 100644 index 000000000..f28e9361f Binary files /dev/null and b/software/img/vscodium_on_bianca_480_x_270.png differ diff --git a/software/img/vscodium_on_rackham_error.png b/software/img/vscodium_on_rackham_error.png new file mode 100644 index 000000000..051f80a93 Binary files /dev/null and b/software/img/vscodium_on_rackham_error.png differ diff --git a/software/img/whisper-gui.png b/software/img/whisper-gui.png new file mode 100644 index 000000000..c7f23c2c3 Binary files /dev/null and b/software/img/whisper-gui.png differ diff --git a/software/img/whisper_data_transfer.png b/software/img/whisper_data_transfer.png new file mode 100644 index 000000000..c1a1e4e1c Binary files /dev/null and b/software/img/whisper_data_transfer.png differ diff --git a/software/img/whisper_desktop.png b/software/img/whisper_desktop.png new file mode 100644 index 000000000..8bac9b338 Binary files /dev/null and b/software/img/whisper_desktop.png differ diff --git a/software/img/whisper_terminal.png b/software/img/whisper_terminal.png new file mode 100644 index 000000000..129e5f145 Binary files /dev/null and b/software/img/whisper_terminal.png differ diff --git a/software/img/winscp-snaphot1.png b/software/img/winscp-snaphot1.png new file mode 100644 index 000000000..d03e12657 Binary files /dev/null and b/software/img/winscp-snaphot1.png differ diff --git a/software/img/xeyes.png b/software/img/xeyes.png new file mode 100644 index 000000000..80126f6c9 Binary files /dev/null and b/software/img/xeyes.png differ diff --git a/software/img/xeyes_no_ssh_x_forwarding.png b/software/img/xeyes_no_ssh_x_forwarding.png new file mode 100644 index 000000000..f1923e402 Binary files /dev/null and b/software/img/xeyes_no_ssh_x_forwarding.png differ diff --git a/software/img/xeyes_with_ssh_x_forwarding.png b/software/img/xeyes_with_ssh_x_forwarding.png new file mode 100644 index 000000000..b20d5c3da Binary files /dev/null and b/software/img/xeyes_with_ssh_x_forwarding.png differ diff --git a/software/install/index.html b/software/install/index.html new file mode 100644 index 000000000..1ff06d34d --- /dev/null +++ b/software/install/index.html @@ -0,0 +1,3462 @@ + + + + + + + + + + + + + + + + + + + + + + + Installing - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Software and package installation

+

Install software yourself

+

Build from source

+ +

Example

+

This guide might not work on all programs. Read the installation instructions for your program!

+
    +
  • Download the program, with wget or by other means like git clone <https-URL to GITHUB repo>.
  • +
  • If the not cloning, unpack it with tar, gunzip or similar.
  • +
+
tar xvfz program.tgz
+
+

The below is more general again:

+
    +
  • Read the installation instructions!
  • +
  • If Fortran or C or C++, load a compiler. Often you'll have less problems with gcc but intel gives faster code.
  • +
+
module load gcc
+
+
    +
  • If applicable, do:
  • +
+
mkdir $HOME/glob/program_name
+./configure --prefix=$HOME/glob/program_name
+make
+make test
+make install
+
+
    +
  • Try to find a test on the home page of the program or in the installation instructions and try to run it.
  • +
+

Packages and libraries to scripting programs

+
    +
  • Python, R and Julia all have some centrally installed packages that are available from the modules.
  • +
  • R has a special module called R_packages, and some Machine Learning python packages are included in the python_ML_packages module.
  • +
  • If not found there you can try to install those by yourself.
  • +
+
+

Tip Python packages

+
    +
  • Try Conda first directly on Bianca and PyPI on Rackham.
  • +
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.
  • +
  • If you want to keep number of files down, use PyPI (pip).
  • +
  • Also it is easier to get conflicting environments if using both Python module and Conda in parallel.
  • +
+
+

Conda

+
    +
  • We have mirrored all major Conda repositories directly on UPPMAX, on both Rackham and Bianca. These are updated every third day.
  • +
+
+

Available Conda channels

+
    +
  • bioconda
  • +
  • biocore
  • +
  • conda-forge
  • +
  • dranew
  • +
  • free
  • +
  • main
  • +
  • pro
  • +
  • qiime2
  • +
  • r
  • +
  • r2018.11
  • +
  • scilifelab-lts
  • +
+
+ +

Python packages with pip

+ +

R packages

+
    +
  • On UPPMAX the module R_packages is an omnibus package library containing almost all packages in the CRAN and BioConductor repositories.
  • +
  • +

    As of 2023-05-31, there were a total of 23100 R packages installed in R_packages/4.2.1.

    +
      +
    • A total of 23109 packages were available in CRAN and BioConductor, and 23000 of these were installed in R_packages/4.2.1
    • +
    • The additional 100 R packages available in this module were installed from the CRAN/BioConductor archives, or were hosted on github, gitlab or elsewhere.
    • +
    +
  • +
  • +

    Installing R packages

    +
  • +
+

Julia packages

+
    +
  • At UPPMAX there is a central library with installed packages.
  • +
  • This is good, especially when working on Bianca, since you then do not need to install via the wharf.
  • +
  • +

    A selection of the Julia packages and libraries installed on UPPMAX are:

    +
    CSV
    +CUDA
    +MPI
    +Distributed
    +IJulia
    +Plots
    +PyPlot
    +DataFrames
    +
    +
  • +
  • +

    Installing julia packages

    +
  • +
+

Containers

+
+

Info

+
    +
  • Containers let you install programs without needing to think about the computer environment, like
      +
    • operative system
    • +
    • dependencies (libraries and other programs) with correct versions
    • +
    +
  • +
  • Everything is included
  • +
  • Draw-backs
      +
    • you install also things that may be already installed
    • +
    • therefore, probably more disk space is needed
    • +
    +
  • +
+
+

Singularity

+

See the UPPMAX Singularity user guide:

+ +

Docker

+

Docker will unfortunately not work on the clusters, since it requires root permission.

+

However, it is possible to convert a Docker script to a Singularity +container.

+

Spack

+
    +
  • The UPPMAX staff has already other ways to install most software applications.
  • +
  • Please use Spack only if other ways to install your tool is not possible or very difficult, e.g. requiring very many dependencies and it is not available through, e.g. EasyBuild.
  • +
  • UPPMAX Spack user guide
  • +
+

Own development

+ +

Run own scripts or programs

+

Unless your script or program is in the active path, you run it by the full path or ./<file> if you are in the present directory.

+

Summary

+
+

Keypoints

+
    +
  • You have got an overview of the procedures to install packages/libraries and tools on Bianca through the wharf
  • +
  • If you feel uncomfortable or think that many users would benefit from the software, ask the support to install it.
  • +
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/intel_advisor/index.html b/software/intel_advisor/index.html new file mode 100644 index 000000000..5c505e0bf --- /dev/null +++ b/software/intel_advisor/index.html @@ -0,0 +1,3118 @@ + + + + + + + + + + + + + + + + + + + Intel Advisor - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Intel Advisor

+

There are multiple profilers +available on UPPMAX. +This page describes Intel Advisor.

+

Intel Advisor is a broad set of tools +with a focus on performance analysis +of Intel-compiled code.

+

Intel's performance analysis suite can probably answer +any question you have about the performance of your code, +including MPI and OpenMP code.

+

In order to use Advisor, do the following:

+
module load intel-oneapi advisor
+
+

Making sure you have a graphical connection +through SSH X-forwarding or ThinLinc, +then run Advisor graphically like this:

+
advixe-gui
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/intel_vtune/index.html b/software/intel_vtune/index.html new file mode 100644 index 000000000..fd78e5a79 --- /dev/null +++ b/software/intel_vtune/index.html @@ -0,0 +1,3120 @@ + + + + + + + + + + + + + + + + + + + Intel VTune - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Intel VTune

+

There are multiple profilers +available on UPPMAX. +This page describes Intel VTune.

+

Intel VTune is a broad set of tools +with a focus on performance improvement +of Intel-compiled code.

+

Intel's performance analysis suite can probably answer +any question you have about the performance of your code, +including MPI and OpenMP code.

+

VTune is focused choosing optimizing techniques that will yield good results, +whereas Amplifier is more broadly aimed at performance analysis.

+

In order to use VTune do the following:

+
module load intel-oneapi vtune
+
+

Making sure you have a graphical connection +through SSH X-forwarding or ThinLinc, +then run VTune graphically like this:

+
vtune-gui
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/interactive/index.html b/software/interactive/index.html new file mode 100644 index 000000000..e5965850c --- /dev/null +++ b/software/interactive/index.html @@ -0,0 +1,3109 @@ + + + + + + + + + + + + + + + + + + + interactive - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ipython/index.html b/software/ipython/index.html new file mode 100644 index 000000000..c45d4f579 --- /dev/null +++ b/software/ipython/index.html @@ -0,0 +1,3197 @@ + + + + + + + + + + + + + + + + + + + IPython - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

IPython

+

IPython is a console program that extends +the regular Python interpreter: +among others, one can directly run scripts and re-use output.

+
+Want to see a video? +

Here are some videos on IPython:

+ +
+

After loading a Python module, you also have the IPython Python command shell available.

+
+Forgot how to load a Python module? +

See the UPPMAX page about Python here.

+
+
+What is a Python command shell? +

In computing, a shell is a a program around something, +for example, Bash is a shell around a UNIX-like operating system.

+

In computing, a command shell means that the shell +is a command-line shell, i.e. text only.

+

A Python command shell, hence, is a text-only program +around Python.

+
+

Start the IPython command shell by typing:

+
ipython
+
+

or (for explicit Python 3):

+
ipython3
+
+

The ipython3 prompt looks like this:

+
[sven@rackham1 ~]$ ipython
+Python 3.11.4 (main, Aug  7 2023, 16:05:58) [GCC 12.2.0]
+Type 'copyright', 'credits' or 'license' for more information
+IPython 8.14.0 -- An enhanced Interactive Python. Type '?' for help.
+
+In [1]:
+
+

IPython allows one to write code interactively.

+

For example, in IPython, type:

+
print('Hello, world!')
+
+

and IPython will show the result of that line of code.

+

IPython can interact with your file system.

+
+How does IPython interact with the file system? +

For example, within IPython, running ...

+

```python +ls +````

+

... displays a list of files in your current working folder +in the same way as Bash's ls.

+

The Python interpreter will give an error if you do the same.

+
+

IPython has an auto-complete triggered by Tab.

+
+How do I get auto-complete? +

As an example, writing this line of code in IPython ...

+
s = 'Hello, world!'
+
+

... and press enter. Now a variable called s will hold some text.

+

Now type ...

+
s.
+
+

and press Tab. You will see a menu of things you can do with that string. +Hold tab to scroll through the many options.

+
+

IPython can show graphics.

+
+How do I get IPython to show graphics? +

In IPython, run this code line-by-line:

+
import matplotlib.pyplot as plt
+plt.plot([1, 2, 3, 4])
+plt.show()
+
+

(or as a one-liner: import matplotlib.pyplot as plt; plt.plot([1, 2, 3, 4]); plt.show())

+

You will see a window appear:

+

A window with the plot

+

You will only see a window appear, if you've logged in to Rackham with +SSH with X forwarding enabled.

+

Spoiler to login: ssh -X sven@rackham.uppmax.uu.se.

+

Spoiler to confirm: run xeyes.

+
+

IPython can directly run scripts.

+
+How do I get IPython to directly run scripts? +

In IPython, run:

+
run [filename]
+
+

where [filename] is the name of a file, for example:

+
run my_script.py
+
+

IPython will run the script and remember variables, functions and classes +created in that script.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/javac/index.html b/software/javac/index.html new file mode 100644 index 000000000..fea0ad73b --- /dev/null +++ b/software/javac/index.html @@ -0,0 +1,3107 @@ + + + + + + + + + + + + + + + + + + + javac - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/javac_compile_java/index.html b/software/javac_compile_java/index.html new file mode 100644 index 000000000..31b3d1c07 --- /dev/null +++ b/software/javac_compile_java/index.html @@ -0,0 +1,3148 @@ + + + + + + + + + + + + + + + + + + + Compile Java programs using javac - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compile Java programs using javac

+

javac is a Java compilers.

+

This page describes how to compile Java code using javac.

+

Procedure

+

1. Load a GCC module

+

Before compiling a java program, the java +module has to be loaded. +To load the java module, enter the command:

+
module load java
+
+

2. Create a Java source file

+

Create and write a Java source file called hello_world.java:

+
nano hello_world.java
+
+

In nano, write the Java program as such:

+
class hello_world
+{
+  public static void main(String[] args)
+  {
+    System.out.println("hello, world");
+  }
+}
+
+

3. Compile the source file

+

To compile, enter the command:

+
javac hello_world.java
+
+

4. Run

+

to run, enter:

+
java hello_world
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jobinfo/index.html b/software/jobinfo/index.html new file mode 100644 index 000000000..fa5125320 --- /dev/null +++ b/software/jobinfo/index.html @@ -0,0 +1,3200 @@ + + + + + + + + + + + + + + + + + + + jobinfo - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

jobinfo

+

jobinfo is a tool.

+

What do the fields PRIORITY and REASON mean in "jobinfo" output?

+
+For staff +

IG: Running jobs FAQ/Your priority...

+
+

Initial priority, at submit time

+

One of the columns in "jobinfo" output is named PRIORITY. The queue is sorted on priority, i.e. normally the job with the highest priority starts first, so this is an important parameter.

+

When you submit a job at UPPMAX, it gets an initial priority. Normally this is 100000, but some jobs start at a priority of 200000 or more:

+
    +
  • On a limited amount of nodes, a group of people get a higher priority, due to e.g. that they have funded those nodes.
  • +
  • Jobs that have asked for the interactive priority, with the flag "--qos=interact". This is for one-node jobs with a timelimit of at most 12 hours.
  • +
  • Jobs that have asked for the short-job priority, with the flag "--qos=short". This is for jobs of from one to four nodes, with a timelimit of at most 15 minutes.
  • +
+

When your project has overdrafted its 30 days running core hour allocation, the jobs within your project get a low initial priority of 70000 or less. These jobs are named bonus jobs. Instead of disallowing them from running, they are allowed to start, if there are free resources, when all higher-priority jobs have started. For each 10000 more core hours, that the project overdrafts its allocation, the priority gets lowered by 10000 more. The bottom value is 10000, i.e. a bonus job can start queuing with any of the following priorities, depending on how big the overdraft is: 70000, 60000, 50000, 40000, 30000, 20000, or 10000.

+

For every minute waiting in queue, a job gets a priority increase of approximately one, up to a waiting time of 14 days.

+

Now the waiting for each kind of jobs will be described: For high-priority jobs, bonus jobs and normal jobs.

+

High-priority job

+

Getting a high priority, i.e. a priority higher than 210000, already at submit time, this job will probably start quickly.

+

The priority value will slowly increase, for each minute passing, until the job starts.

+

Bonus job

+

Getting a low priority already at submit time, this job may have to wait a long time before starting. It is very difficult to estimate the waiting time, because all new high-priority and normal jobs will have a higher priority.

+

At night or during next weekend, this job may be lucky and start. Waiting long enough, the monthly allocation of the project will not be overdrafted any longer, and the job automatically converted to a normal job.

+

The priority value will slowly increase, for each minute passing, until the job starts.

+

Once the job has started, it will be treated like any other job.

+

Normal job

+

A normal job, starting at priority 100000, increases slowly in priority and may eventually start at a priority a little above 100000.

+

But more likely, something else will happen to it before that: It will be elevated to a higher starting priority: 190000. At the same time it loses the extra priority it accumulated while waiting at the priority 100000 level.

+

Only normal jobs, will be elevated like this, and only one job or a few jobs for each user may be elevated at the same time.

+

The reason for the elevated level, is to give each user a fair chance to start at least one job within a reasonable time, even if other users have thousands of jobs already waiting in queue. The job start time will not depend mainly on the number of jobs that are waiting, but instead on the number of other users that are waiting.

+

At least one job for each user are permitted to wait at the elevated level. Up to 64 jobs for each user are permitted there, if they are very small. Every five minutes the system will try to elevate more jobs and every five minutes each old, elevated job gets five additional priority points.

+

Once the job has been elevated, its START_TIME approximations will be much more accurate. The main risk for a later start, is that someone submits new, high-priority jobs. On the other hand, running jobs usually terminate earlier than what their timelimit suggests.

+

Here is a detailed description on how jobs are picked for elevation:

+
    +
  • Jobs are picked strictly in order of priority.
  • +
  • A job is not elevated, if its timelimit does not allow it to finish before next planned maintenance stop.
  • +
  • At least one job per user is elevated, regardless of size and regardless of the two limitations mentioned below in this list.
  • +
  • The elevated jobs of a user must not together ask for more than 64 cores.
  • +
  • The elevated jobs of a user must not together ask for more than 2688 core hours, i.e. 112 core days.
  • +
+

How does Slurm decide what job to start next?

+

When there are free nodes, an approximate model of Slurm's behaviour is this:

+
    +
  • Step 1: Can the job in position one start now?
  • +
  • Step 2: If it can, remove it from the queue, start it, and continue with step 1.
  • +
  • Step 3: If it can not, look at next job.
  • +
  • Step 4: Can it start now, without risking that the jobs before it in the queue get a higher START_TIME approximation?
  • +
  • Step 5: If it can, remove it from the queue, start it, recalculate what nodes are free, look at next job and continue with step 4.
  • +
  • Step 6: If it can not, look at next job, and continue with step 4.
  • +
+

As soon as a new job is submitted and as soon as a job finishes, Slurm restarts with step 1, so most of the time only jobs at the top of the queue are tested for the possibility to start it. As a side effect of this restart behaviour, START_TIME approximations are normally NOT CALCULATED FOR ALL JOBS.

+

More about other jobinfo columns for waiting jobs

+

Until now, we have looked into the PRIORITY and USER columns. Let us talk about some of the others, for waiting jobs:

+
    +
  • JOBID: This is the best way to identify a job in a unique way. If you succeed to submit a job, it gets a jobid. The jobid of your finished jobs can be found with the finishedjobinfo command.
  • +
  • POS: This is a numbering of the lines, by jobinfo, after sorting with PRIORITY as first key and JOBID as the second. This is an approximation of the queue position.
  • +
  • PARTITION: A Slurm partition is a set of compute nodes, together with some rules about how jobs must be handled, if they ask for this partition. An UPPMAX cluster normally sports the "devel", "core" and "node" partitions.
  • +
  • NAME: This is the job name, specified at submission time with the "-J" or "--job-name" flag. This name can help you to keep track of what the job was meant to do.
  • +
  • ACCOUNT: The specified project name, to keep track of how many core hours each project has needed. The projinfo command sums up those core hours.
  • +
  • ST: Means status. Status "PD" means pending (waiting), status "R" means running, status "CG" means completing (the job has finished, but the clean-up after the job is not finished yet).
  • +
  • START_TIME: An estimation about when the job will start, if all jobs run until the end of their timelimit. You can make guesses about when nodes gets free also by looking at the TIME_LEFT column of running jobs. Slurm computes START_TIME only when it needs the information, i.e. you can not find that information for all jobs.
  • +
  • TIME_LEFT: The specified timelimit for the job. When getting near to a maintenance stop, long jobs can not start, because they may not finish before the maintenance stop starts.
  • +
  • REASON: There are a number of possible reasons for a job not to have started yet. Some are explained here:
      +
    • AssociationJobLimit: probably means that the job never will start, because it breaks some system limit, set by UPPMAX.
    • +
    • BeginTime: says that the user has specified that the job must not start until some specified time in the future.
    • +
    • Dependency: means that the job will not start until some special other job(s) has (have) finished.
    • +
    • JobHeldAdmin: means that some systems administrator has told that the job must not start.
    • +
    • JobHeldUser: means that the job owner has told that job must not start.
    • +
    • None: might mean that Slurm has not yet had time to put a reason there.
    • +
    • Priority, ReqNodeNotAvail, and Resources: are the normal reasons for waiting jobs, meaning that your job can not start yet, because free nodes for your job are not found.
    • +
    • QOSResourceLimit: means that the job has asked for a QOS and that some limit for that QOS has been reached. The job can not start as long as the limit still is reached.
    • +
    • QOSJobLimit: probably means that the job can never start, because it breaks some system limit, set by UPPMAX.
    • +
    +
  • +
  • FEATURES: There are quite a few of these and some are explained here:
      +
    • null: means that no special features have been asked for.
    • +
    • fat: means that a fat node (a node with a more-than-standard -- for this cluster -- amount of memory) is needed.
    • +
    • null: means that no special features have been asked for.
    • +
    • thin: means that a standard (i.e. non-fat) node must be used, and this feature is automatically set for most jobs with no memory requirements and a high timelimit, so the job will not unnecessarily hog a fat node for a long time.
    • +
    +
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jobstats/index.html b/software/jobstats/index.html new file mode 100644 index 000000000..620531d2a --- /dev/null +++ b/software/jobstats/index.html @@ -0,0 +1,3943 @@ + + + + + + + + + + + + + + + + + + + + + + + jobstats - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

jobstats

+

jobstats plot

+
+

An example plot produced by jobstats

+
+

jobststats is an UPPMAX tool to enable discovery of resource usage +for jobs submitted to the Slurm job queue.

+

At this page, it is described:

+ +
    +
  • jobstats --plot: How to use is jobstats --plot to see resource use in a graphical plot
  • +
  • Efficient use: How to use your resources efficiently
  • +
  • Examples: Examples of ineffective resource use plots
  • +
  • Other jobstats functionality
      +
    • Using jobstats --help
    • +
    +
  • +
+

jobstats --plot

+

With the --plot (or -p) option, +a plot is produced showing the resource use per node +for a job that completed successfully and took longer than 5 minutes.

+

There are many ways to use --plot, a minimal use could be:

+
jobstats --plot [job_id]
+
+

for example:

+
jobstats --plot 12345678
+
+

The produced plot will be produced in the local folder +with name +[cluster_name]-[project_name]-[user_name]-[jobid].png, +for example rackham-uppmax1234-sven-876543.png. +Use any image viewer, e.g. eog to see it.

+

Each plot shows:

+
    +
  • detailed information in the title.
  • +
  • CPU usage in blue
  • +
  • current memory usage in solid black
  • +
  • overall memory usage in dotted black (if available)
  • +
+

Interpreting a plot

+

For example, in this plot:

+

jobstats showing a single-node job

+
    +
  • the title shows the detailed info. milou is the name of a former UPPMAX cluster.
  • +
  • CPU usage in blue, which is around 1000%, which is the equivalent of 10 cores + being used 100%
  • +
  • current memory usage in solid black, which is around 20 GB (left-side vertical + axis) or a little bit less than 1 core (right-side vertical axis)
  • +
  • overall memory usage in dotted black, which is around 340 GB (left-side vertical + axis) or a little bit less than 11 cores (right-side vertical axis)
  • +
+

For jobs running on multiple nodes, plots have multiple columns:

+

jobstats showing a job that used two nodes

+

Some plots shows suggestions in red:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Text in plotDescription
nodes_overbooked : nodes booked : nodes used:More nodes were booked than used
overbooked : % usedThe maximum percentage of booked cores and/or memory that was used (if < 80%)
!!half_overbookedNo more than one-half of both cores and memory of a node was used; consider booking half a node instead.
!!severely_overbookedNo more than one-quarter of both cores and memory of a node was used, examine your job requirements closely.
!!swap_usedSwap storage was used at any point within the job run
node_type_overbooked : type booked : type used:A fat node was requested that was larger than was needed. This flag may be produced spuriously if Slurm ran the job on a fat node when a fat node was not requested by the user.
cores_overbooked : cores booked : cores used:More cores were booked than used (if < 80%)
mem_overbooked : GB booked : GB used:More memory was available than was used (if < 25% and more than one core).
core_mem_overbooked : GB in used cores : GB used:Less memory was used than was available in the cores that were used (if < 50%).
+

In this example plot, however, the setup is considered good enough.

+

Determine efficient use

+

To determine if you efficiently use UPPMAX resources, follow this chart:

+
flowchart TD
+  blue_line_close_to_top[CPU usage maximum above 80%?]
+  black_line_close_to_top[Memory usage maximum above 80%?]
+  can_decrease_number_of_cores[Can the number of cores be decreased?]
+  decrease_number_of_cores(Decrease the number of cores)
+  done(Done)
+  blue_line_close_to_top --> |yes| done
+  blue_line_close_to_top --> |no| black_line_close_to_top
+  black_line_close_to_top --> |yes| done
+  black_line_close_to_top --> |no| can_decrease_number_of_cores
+  can_decrease_number_of_cores --> |yes| decrease_number_of_cores
+  can_decrease_number_of_cores --> |no| done
+

If not, follow the strategy at efficient use

+

Efficient use

+

Here is a strategy to effectively use your UPPMAX resources:

+
flowchart TD
+  lower_limit_based_on_memory(Pick the number of cores to have enough memory)
+  limited_by_cpu(For that amount of cores, would runtime by limited by CPU?)
+  lower_limit_based_on_cpu(Increase the number of cores, so that on average, the right amount of CPUs is booked)
+
+  done(Use that amount of cores)
+
+  add_one(Increase the number of cores by one for safety)
+
+  lower_limit_based_on_memory --> limited_by_cpu
+  limited_by_cpu --> |no| add_one
+  limited_by_cpu --> |yes| lower_limit_based_on_cpu
+  lower_limit_based_on_cpu --> done
+  add_one --> done
+
+Why not look at CPU usage? +

Because CPU is more flexible.

+

For example, imagine a job with a short CPU spike, +that can be processed by 16 CPUs. +If 1 core of memory is enough, use 1 core or memory: +the spike will be turned into a 100% CPU use (of that one core) +for a longer duration.

+
+
+Need a worked-out example? +

jobstats showing a single-node job

+
+

Pick the number of cores to have enough memory

+
+

The dotted black line hits the right-hand vertical axis at 1070%. +This means that 11 cores (i.e. 1100%) would be enough for this job.

+
+

For that amount of cores, would runtime by limited by CPU?

+
+

The answer is 'no'. Having 11 cores would +mean that most of the time only 10 are used. +Only in the CPU spike at the end, the runtime is limited by CPU. +This short time only has a minor impact on the runtime speed.

+
+

Increase the number of cores by one for safety

+
+

This means booking 12 cores is recommended.

+
+
+Need another worked-out example? +

jobstats showing a single-node job

+
+

Pick the number of cores to have enough memory

+
+

The dotted black line hits the right-hand vertical axis at 90%. +This means that 1 core (i.e. 100%) would be enough for this job.

+
+

For that amount of cores, would runtime by limited by CPU?

+
+

The answer is 'yes'. Having 1 core would +mean that around half the time there is too little CPU power. +This has an effect.

+
+

Increase the number of cores, so that on average the right amount of CPUs are booked

+
+

This is around 8 cores (800%), as with that amount of cores:

+
    +
  • half of the time, there is 1 out of 7 cores booked, + that is 6 too much
  • +
  • half of the time, there is 7 out of 13 cores booked, + that is 6 too little
  • +
+

This is not an exact algorithm and all numbers from 2 to 9 cores +can be considered okay.

+
+

Sometimes, however, it is inevitable to use resources +inefficiently, see the examples below

+
+

No queue is possible

+

If everyone followed these guidelines, there would probably not even be a queue most of the time!

+
+

Examples

+

Here are some examples of how inefficient jobs can look +and what you can do to make them more efficient.

+

Inefficient job example 1: booking too much cores

+

jobstats showing a single-node job

+

Here booking 5 cores is considered okay.

+
+

Pick the number of cores to have enough memory

+
+

The dotted black line hits the right-hand vertical axis at 390%. +This means that 4 cores (i.e. 400%) would be enough for this job.

+
+

For that amount of cores, would runtime by limited by CPU?

+
+

The answer is 'no'. Having 4 cores would +mean that most of the time only 1 are used. +Only for some CPU spikes, the runtime is limited by CPU. +This short time only has a minor impact on the runtime speed.

+
+

Increase the number of cores by one for safety

+
+

This means booking 5 cores is recommended.

+

Inefficient job example 2: booking too much cores

+

jobstats showing a single-node job

+

This is one of the grayer areas: +booking 2-9 cores is all considered reasonable.

+
+

Pick the number of cores to have enough memory

+
+

The dotted black line hits the right-hand vertical axis at 90%. +This means that 1 core (i.e. 100%) would be enough for this job.

+
+

For that amount of cores, would runtime by limited by CPU?

+
+

The answer is 'yes'. Having 1 core would +mean that around half the time there is too little CPU power. +This has an effect.

+
+

Increase the number of cores, so that on average the right amount of CPUs are booked

+
+

This is around 8 cores (800%), as with that amount of cores:

+
    +
  • half of the time, there is 1 out of 7 cores booked, + that is 6 too much
  • +
  • half of the time, there is 7 out of 13 cores booked, + that is 6 too little
  • +
+

This is not an exact algorithm and all numbers from 2 to 9 cores +can be considered okay.

+

Inefficient job example 3

+

jobstats showing a single-node job

+

Here booking 6 cores is considered okay.

+
+

Pick the number of cores to have enough memory

+
+

The dotted black line hits the right-hand vertical axis at 40%. +This means that 1 core (i.e. 100%) would be enough for this job.

+
+

For that amount of cores, would runtime by limited by CPU?

+
+

The answer is 'yes'. Having 1 core would +mean that most of the time our run is limited by CPU power. +This has an impact on the runtime speed.

+
+

Increase the number of cores, so that on average the right amount of CPUs are booked

+
+

This is around 6 cores (600%), as with that amount of cores:

+
    +
  • most of the time, there is 6 out of 6 cores booked, + that is 0 too much
  • +
  • only rarely, there is a little spike up or a bigger spike down
  • +
+

There are no signs of anything slowing them down, as the line is very even.

+

This jobs should either have been booked with 6 cores, or the program running should be told to use all 8 cores.

+

Inefficient job example 4: slowdown

+

jobstats showing a single-node job

+

This job is using almost all of the cores it has booked, +but there seems to be something holding them back. +The uneven blue curve tells us that something is slowing down the analysis, +and it's not by a constant amount.

+

Usually this is how it looks when the filesystem is the cause of a slowdown. +Since the load of the filesystem is constantly changing, +so will the speed by which a job can read data from it also change.

+

This job should try to copy all the files it will be working +with to the nodes local harddrive before running the analysis, +and by doing so not be affected by the speed of the filesystem.

+

Please see the guide How to use the nodes own hard drive +for analysis for more information.

+

You basically just add 2 more commands to your script file +and the problem should be solved.

+

Inefficient job example 5

+

jobstats showing a single-node job

+

This job has the same problem as the example above, +but in a more extreme way.

+

It's not uncommon that people book whole nodes out of habit +and only run single threaded programs that use almost no memory. +This job is a bit special in the way that it's being run on a high memory node, +as you can see on the left Y-axis, that it goes up to 256 GB RAM. +A normal node on Milou only have 128GB. +These high memory nodes are only bookable of you book the whole node, +so you can't book just a few cores on them. +That means that if you need 130GB RAM and the program is only single threaded, +your only option is to book a whole high memory node. +The job will look really inefficient, +but it's the only way to do it on our system. +The example in the plot does not fall into this category though, +as it uses only ~15GB of RAM, which you could get by booking 2-3 normal cores.

+

jobstats --help

+

Use jobstats --help to see the help of jobstats:

+
jobstats --help
+
+
+How does the output look like? +
USAGE
+-----
+
+    jobstats  -p [-r] [-M cluster] [ jobid [ jobid ... ] | -A project | - ] [other options]
+
+Discover jobstats for the specified job(s) on the specified cluster.  Cluster
+defaults to the value of $SNIC_RESOURCE ('rackham' on the current system) if
+not specified.
+
+With the -p/--plot option, a plot is produced from the jobstats for each
+jobid.  Plots contain one panel per booked node showing CPU (blue) and memory
+usage (black) traces and include text lines indicating the job number, cluster,
+end time and duration, user, project, job name, and usage flags (more on those
+below).  For memory usage, one or two traces are shown: a solid black line
+shows instantaneous memory usage, and a dotted black line shows overall maximum
+memory usage if this information is available.
+
+Plots are saved to the current directory with the name
+
+    cluster-project-user-jobid.png
+
+Note that not all jobs will produce jobstats files, particularly if the job was
+cancelled or ran for less than 5 minutes.  Also, if a job booked nodes
+inefficiently by not using nodes it asked for, jobstats files will not be
+available for the booked but unused nodes.
+
+JOBSTATS DISCOVERY
+------------------
+
+There are five modes for discovery, depending on what the user provides on the
+command line: (1) discovery by job number for a completed job; (2) discovery by
+job number for a currently running job; (3) discovery by node and job number,
+for a completed or running job; (4) discovery by project; or (5) discovery via
+information provided on 'stdin'.  In each of the example command lines below, the
+-p/--plot option requests that plots of job resource usage are created.
+
+Mode 1:  jobstats -p jobid1 jobid2 jobid3
+-------
+Job numbers valid on the cluster.  [finishedjobinfo](finishedjobinfo.md) is used to determine further
+information for each job.  If multiple queries are expected, it might be quicker
+to run [finishedjobinfo](finishedjobinfo.md) yourself separately, see Mode 5 below.  See Mode 2 for a
+currently running job.
+
+Mode 2:  jobstats -p -r jobid1 jobid2 jobid3
+-------
+Job numbers of jobs currently running on the cluster.  The Slurm squeue tool is
+used to determine further information for each running job.
+
+Mode 3:  jobstats -p -n m15,m16 jobid
+-------
+[finishedjobinfo](finishedjobinfo.md) is *not* called and Uppmax's stored job statistics files are
+discovered directly.  If you know which node(s) your job ran on or which nodes
+you are interested in, this will be much faster than Mode 1.
+
+Mode 4:  jobstats -p -A project
+-------
+When providing a project name that is valid for the cluster, [finishedjobinfo](finishedjobinfo.md) is
+used to determine further information on jobs run within the project.  As for
+Mode 1, this can be rather slow.  Furthermore only [finishedjobinfo](finishedjobinfo.md) defaults for
+time span etc. are used for job discovery.  If multiple queries are expected or
+additional [finishedjobinfo](finishedjobinfo.md) options are desired, see Mode 5 below.
+
+Mode 5:  [finishedjobinfo](finishedjobinfo.md) project | jobstats - -p
+-------
+Accept input on stdin formatted like [finishedjobinfo](finishedjobinfo.md) output.  The long form of
+this option is '--stdin'.  This mode can be especially useful if multiple
+queries of the same job information are expected.  In this case, save the
+output of a single comprehensive [finishedjobinfo](finishedjobinfo.md) query, and extract the parts
+of interest and present them to this script on stdin.  For example, to produce
+analyses of all completed jobs in a project during the current calendar year,
+and produce separate tarballs analysing all jobs and providing jobstats plots
+for each user during this same period:
+
+     [finishedjobinfo](finishedjobinfo.md) -y project > proj-year.txt
+     grep 'jobstat=COMPLETED' proj-year.txt | jobstats - > all-completed-jobs.txt
+     grep 'username=user1' proj-year.txt | jobstats - -p > user1-jobs.txt
+     tar czf user1-jobs.tar.gz user1-jobs.txt *-project-user1-*.png
+     grep 'username=user2' proj-year.txt | jobstats - -p > user2-jobs.txt
+     tar czf user2-jobs.tar.gz user2-jobs.txt *-project-user2-*.png
+     ...
+
+COMMAND-LINE OPTIONS
+--------------------
+
+    -p | --plot        Produce CPU and memory usage plot for each jobid
+
+    -r | --running     Jobids are for jobs currently running on the cluster. The
+                       Slurm squeue tool is used to discover further information
+                       for the running jobs, and the rightmost extent of the plot
+                       produced will reflect the scheduled end time of the job.
+
+    -A project         Project valid on the cluster.  [finishedjobinfo](finishedjobinfo.md) is used to
+                       discover jobs for the project.  See further comments
+                       under 'Mode 4' above.
+
+    -M cluster         Cluster on which jobs were run [default current cluster]
+
+    -n node[,node...]  Cluster node(s) on which the job was run.  If specified,
+                       then the [finishedjobinfo](finishedjobinfo.md) script is not run and discovery
+                       is restricted to only the specified nodes.  Nodes can be
+                       specified as a comma-separated list of complete node
+                       names, or using the [finishedjobinfo](finishedjobinfo.md) syntax:
+                             m78,m90,m91,m92,m100  or  m[78,90-92,100]
+                       Nonsensical results will occur if the syntaxes are mixed.
+
+    - | --stdin        Accept input on stdin formatted like [finishedjobinfo](finishedjobinfo.md)
+                       output.  The short form of this option is a single dash
+                       '-'.
+
+    -m | --memory      Always include memory usage flags in output.  Default
+                       behaviour is to include memory usage flags only if CPU
+                       usage flags are also present.
+
+    -v | --verbose     Be wordy when describing flag values.
+
+    -b | --big-plot    Produce 'big plot' with double the usual dimensions.
+                       This implies '-p/--plot'.
+
+    -q | --quiet       Do not produce table output
+
+    -Q | --Quick       Run [finishedjobinfo](finishedjobinfo.md) with the -q option, which is slightly
+                       faster but does not include Slurm's record of maximum
+                       memory used. With this option, memory usage analyses can
+                       only rely upon what is reported at 5-minute intervals,
+                       and the trace of maximum memory used (dotted black line)
+                       is not produced.
+
+    --no-extended      Do *not* use extended jobstats information [default is to use it]
+
+    --paging           Include PAGE_IN/PAGE_OUT statistics from extended jobstats [experimental]
+
+    -d                 Produce a header for table output
+
+    --version          Produce version of this script and plot_jobstats, then exit
+
+    -h | -?            Produce brief help
+
+    --help             Produce detailed help information
+
+The following command-line options are generally useful only for Uppmax staff.
+
+    --cpu-free FLOAT   Maximum CPU busy percentage for the CPU to count as
+                       free at that sampling time.  Default is 3 %.
+    -x directory       Directory prefix to use for jobstats files.  Default is
+                       '/sw/share/slurm', and directory structure is (depending on whether
+                       --no-extended is used):
+
+                       <prefix>/<cluster>/extended_uppmax_jobstats/<node>/<jobid>
+                       <prefix>/<cluster>/uppmax_jobstats/<node>/<jobid>
+
+    -X directory       Hard directory prefix to use for jobstats files.
+                       Jobstats files are assumed available directly:
+                           '<hard-prefix>/<jobid>'
+    --no-multijobs     Run [finishedjobinfo](finishedjobinfo.md) separately for each jobid, rather
+                       than all jobids bundled into one -j option (for debugging)
+    -f file            [finishedjobinfo](finishedjobinfo.md) script [default is '/sw/uppmax/bin/finishedjobinfo']
+    -P file            plot_jobstats script [default is '/sw/uppmax/bin/plot_jobstats']
+
+
+FURTHER DETAILS
+---------------
+
+This script produces two types of output.  If the -p/--plot command line option
+is provided, a plot is created of core and memory usage across the life of the
+job.  The name of the file produced has the format:
+
+    cluster-project-user-jobid.png
+
+Unless the -q/--quiet option is provided, a table is also produces containing
+lines with the following tab-separated fields:
+
+  jobid cluster jobstate user project endtime runtime flags booked cores node[,node...] jobstats[,jobstats...]
+
+Field contents:
+
+  jobid    : Job ID
+  cluster  : Cluster on which the job was run
+  jobstate : End status of the job: COMPLETED, RUNNING, FAILED, TIMEOUT, CANCELLED
+  user     : Username that submitted the job
+  project  : Project account under which the job was run
+  endtime  : End time of the job (with -n/--node, this is '.')
+  runtime  : Runtime of the job (with -n/--node, this is '.')
+  flags    : Flags indicating various types of resource underutilizations
+  booked   : Number of booked cores (with -n/--node, this is '.')
+  maxmem   : Maximum memory used as reported by Slurm (if unavailable, this is '.')
+  cores    : Number of cores represented in the discovered jobstats files.
+  node     : Node(s) booked for the job, expanded into individual node names,
+             separated by commas; if no nodes were found, this is '.'.
+             The nodes for which jobstats files are available are listed first.
+  jobstats : jobstats files for the nodes, in the same order the nodes are
+             listed, separated by commas; if no jobstats files were discovered,
+             this is '.'
+
+If -r/--running was used, an additional field is present:
+
+  timelimit_minutes : The time limit of the job in minutes
+
+
+FLAGS
+-----
+
+An important part of jobstats output are usage flags.  These provide indications
+that booked resources -- processor cores or memory -- might have been
+underused.
+
+In both plot and table output, flags are a comma-separated list of cautions
+regarding core and/or memory underutilisation.  The appearance of a flag does
+not necessarily mean that resources were used incorrectly.  It depends upon the
+tools being used and the contents of the Slurm header, and also depends upon
+the job profile.  Because usage information is gathered every 5 minutes, higher
+transient usage of cores or memory may not be captured in the log files.
+
+Flags most likely to represent real overbooking of resources are
+nodes_overbooked, overbooked, !!half_overbooked, !!severely_overbooked, and
+!!swap_used.
+
+For multinode jobs, flags other than nodes_overbooked are determined based only
+on the usage of the first node.  Multinode jobs require careful analysis so as
+to not waste resources unnecessarily, and it is a common mistake among
+beginning Uppmax users to book multiple nodes and run tools that cannot use
+more than the first.  In this case, nodes_overbooked will appear.
+
+Some flags have a threshold below which they appear.  The default format is
+generally 'flag:value-booked:value-used'.
+
+  nodes_overbooked : nodes booked : nodes used
+      More nodes were booked than used
+  overbooked : % used (if < 80%)
+      The maximum percentage of booked cores and/or memory that was used
+  !!half_overbooked
+      No more than 1/2 of both cores and memory of a node was used; consider booking
+      half a node instead.
+  !!severely_overbooked
+      No more than 1/4 of both cores and memory of a node was used, examine your job
+      requirements closely.
+  !!swap_used
+      Swap storage was used at any point within the job run
+  node_type_overbooked : type booked : type used
+      A fat node was requested that was larger than was needed.  This flag may be
+      produced spuriously if Slurm ran the job on a fat node when a fat node was not
+      requested by the user.
+  cores_overbooked : cores booked : cores used
+      More cores were booked than used (if < 80%)
+  mem_overbooked : GB booked : GB used
+      More memory was available than was used (if < 25% and more than one core).
+  core_mem_overbooked : GB in used cores : GB used
+      Less memory was used than was available in the cores that were used (if < 50%).
+
+By default no flags are indicated for jobs with memory-only cautions except for
+swap usage, because it is common for jobs to heavily use processor cores
+without using a sizable fraction of memory.  Use the -m/--memory option to
+include flags for memory underutilisation when those would be the only flags
+produced.
+
+More verbose flags are output with the -v/--verbose option.
+
+
+Script:   /sw/uppmax/bin/jobstats
+Version:  2023-11-16
+
+
+

Modes of jobstats discovery

+

There are five modes for discovery, +depending on what the user provides on the command line:

+
    +
  • (1) discovery by job number for a completed job;
  • +
  • (2) discovery by job number for a currently running job;
  • +
  • (3) discovery by node and job number, for a completed or running job;
  • +
  • (4) discovery by project
  • +
  • (5) discovery via information provided on stdin.
  • +
+

In the example command lines below, the -p/--plot option requests that plots of job resource usage are created.

+

jobstats discovery mode 1: discovery by job number for a completed job

+

Discovery by job number for a completed job:

+
jobstats --plot jobid1 jobid2 jobid3
+
+

The job numbers valid on the cluster. +finishedjobinfo is used +to determine further information for each job. +This can be rather slow, +and a message asking for your patience is printed for each job.

+

If multiple queries are expected it would be quicker +to run finishedjobinfo yourself separately, +see Mode 4 below. See Mode 2 for a currently running job.

+

jobstats discovery mode 2: discovery by job number for a currently running job

+

Discovery by job number for a currently running job.

+
jobstats --plot -r jobid1 jobid2 jobid3
+
+

Job numbers of jobs currently running on the cluster. +The Slurm schedule is used to determine +further information for each running job.

+

jobstats discovery mode 3: discovery by node and job number, for a completed or running job

+

Discovery by node and job number, for a completed or running job.

+
jobstats --plot -n m15,m16 jobid
+
+

finishedjobinfo is not called and +UPPMAX's stored job statistics files for the cluster of interest are discovered directly. +If you know which node(s) your job ran on +or which nodes you are interested in, this will be much faster than Mode 1.

+

jobstats discovery mode 4: discovery by project

+

Discovery by project.

+
jobstats --plot -A project
+
+

When providing a project name that is valid for the cluster, +finishedjobinfo is used +to determine further information on jobs run within the project. +As for Mode 1, this can be rather slow, +and a message asking for your patience is printed.

+

Furthermore only finishedjobinfo defaults +for time span etc. are used for job discovery. +If multiple queries are expected +or additional finishedjobinfo options are desired, +see Mode 5 below.

+

jobstats discovery mode 5: discovery via information provided on stdin

+

Discovery via information provided on stdin:

+
+What is stdin? +

stdin is an abbreviation for 'Standard input', +see the Wikipedia page on 'stdin'

+
+
finishedjobinfo -q project | jobstats - --plot
+
+

Accept input on stdin formatted like finishedjobinfo output. +Note the single dash (-) option given to jobstats; +the long form of this option is --stdin. +This mode can be especially useful if +multiple queries of the same job information are expected. +In this case, save the output of a single comprehensive finishedjobinfo query, +and extract the parts of interest and present them to this script on stdin.

+

For example, to produce analyses of all completed jobs in a project +during the current calendar year, and produce separate tarballs +analysing all jobs and providing jobstats plots for each user during this same period:

+
project=myproj
+finishedjobinfo -q -y ${project} > ${project}-year.txt
+grep 'jobstat=COMPLETED' ${project}-year.txt | jobstats - > ${project}-completed-jobs.txt
+for u in user1 user2 user3 ; do
+    grep "username=${u}" ${project}-year.txt | jobstats - --plot > ${u}-jobs.txt
+    tar czf ${u}-jobs.tar.gz ${u}-jobs.txt *-${project}-${u}-*.png
+done
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/julia/index.html b/software/julia/index.html new file mode 100644 index 000000000..ede12e89b --- /dev/null +++ b/software/julia/index.html @@ -0,0 +1,3659 @@ + + + + + + + + + + + + + + + + + + + + + + + Julia - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Julia user guide

+

Julia installations

+

There is no system-installed Julia on the clusters. Therefore you need to load Julia with the module system. Different versions of Julia are available via the module system on Rackham, Snowy, and Bianca. Some installed packages are available via the module.

+

As the time of writing we have the following modules:

+
[user@rackham1 ~]$ module avail julia
+------------------------------------------------------
+julia:
+------------------------------------------------------
+Versions:
+        julia/1.0.5_LTS
+        julia/1.1.1
+        julia/1.4.2
+        julia/1.6.1
+        julia/1.6.3
+        julia/1.6.7_LTS
+        julia/1.7.2
+        julia/1.8.5
+        julia/1.9.1
+        julia/1.9.3 (Default)
+
+
    +
  • "LTS" stands for Long term support.
  • +
+

To load a specific version of Julia into your environment, type e.g.

+
module load julia/1.6.7_LTS
+
+

​Doing:

+
module load julia
+
+

will give you the default version (1.9.3), often the latest version.

+

A good and important suggestion is that you always specify a certain version. This is to be able to reproduce your work, a very important key in research!

+

You can run a julia script in the shell by:

+
julia example_script.jl
+
+

After loading the appropriate modules for Julia, you will have access to the read-eval-print-loop (REPL) command line by typing julia.

+
julia
+
+

You will get a prompt like this:

+
julia>
+
+

Julia has different modes, the one mentioned above is the so-called Julian mode where one can execute commands. The description for accessing these modes will be given in the following paragraphs. Once you are done with your work in any of the modes, you can return to the Julian mode by pressing the backspace key.

+

While being on the Julian mode you can enter the shell mode by typing ;:

+
shell>pwd
+julia>;
+/current-folder-path
+
+

This will allow you to use Linux commands. Notice that the availabilty of these commands depend on the OS, for instance, on Windows it will depend on the terminal that you have installed and if it is visible to the Julia installation.

+

Another mode available in Julia is the package manager mode, it can be accessed by typing ] in the Julian mode:

+
julia>]
+(v1.8) pkg>
+
+

This will make your interaction with the package manager Pkg easier, for instance, instead of typing the complete name of Pkg commands such as Pkg.status() in the Julian mode, you can just type status in the package mode.

+

The last mode is the help mode, you can enter this mode from the Julian one by typing ?, then you may type some string from which you need more information:

+
help?> ans
+
+
julia>?
+search: ans transpose transcode contains expanduser instances MathConstants readlines LinearIndices leading_ones leading_zeros
+ans
+A variable referring to the last computed value, automatically set at the interactive promp
+
+
+

Info

+

Backspace will get you back to julian mode

+
+
+

Info

+
+

​ + Exit with <Ctrl-D> or exit().

+
+

See

+

More detailed information about the modes in Julia can be found here: https://docs.julialang.org/en/v1/stdlib/REPL/

+
+

Introduction

+

Julia is according to https://julialang.org/:

+
    +
  • Fast
  • +
  • Dynamic
  • +
  • Reproducible
  • +
  • Composable
  • +
  • General
  • +
  • Open source
  • +
+

Documentation for version 1.8.

+

Julia discussions

+

Packages

+

Some packages are pre-installed. That means that they are available also on Bianca. These include:

+
    +
  • "BenchmarkTools"
  • +
  • "CSV"
  • +
  • "CUDA"
  • +
  • DataFrames"
  • +
  • "Distributed"
  • +
  • "DistributedArrays"
  • +
  • "Gadfly"
  • +
  • "IJulia"
  • +
  • "MPI"
  • +
  • "Plots"
  • +
  • "PlotlyJS"
  • +
  • "PyPlot"
  • +
  • all "standard" libraries.
  • +
+

This list will be extended while you, as users, may wish more packages.

+

You may control the present "central library" by typing in julia shell :

+
using Pkg
+Pkg.activate(DEPOT_PATH[2]*"/environments/v1.8");     #change version accordingly
+Pkg.status()
+Pkg.activate(DEPOT_PATH[1]*"/environments/v1.8");     #to return to user library
+
+

Packages are imported or loaded by the commands import and using, respectively. The difference is shown here. Or briefly:

+

To use module functions, use import Module to import the module, and Module.fn(x) to use the functions. +Alternatively, using Module will import all exported Module functions into the current namespace.

+

Use centrally installed packages the first time

+

You may have to build the package the first time you run it. Julia will in such case ask you to do so. Then:

+
julia> using Pkg
+julia> Pkg.activate(DEPOT_PATH[2]*"/environments/v1.9");      #change version accordingly
+julia> Pkg.build(<package_name>)
+
+

How to install personal packages

+

You may ignore the pre-installed packages. They are there mainly for Bianca users, but may help you to relieving some disk space! If you ignore you can jump over the

+

Check if packages are installed centrally

+

To make sure that the package is not already installed, type in Julia:

+
julia> using Pkg
+julia> Pkg.activate(DEPOT_PATH[2]*"/environments/v1.8");  #change version accordingly
+julia> Pkg.status()
+
+

To go back to your own personal packages:

+
julia> Pkg.activate(DEPOT_PATH[1]*"/environments/v1.8");
+julia> Pkg.status()
+
+

You can load (using/import) ANY package from both local and central installation irrespective to which environment you activate. However, the setup is that your package is prioritized if there are similar names.

+

Start an installation locally

+

To install personal packages, start to be sure that you are in your local environment. You type within Julia:

+
     Pkg.activate(DEPOT_PATH[1]*"/environmentts/v1.8");
+     Pkg.add("<package_name>")
+
+

This will install under the path ~/.julia/packages/. Then you can load it by just doing "using/import ".

+
      using <package_name>
+
+

You can also activate a "package prompt" in julia with ']':

+
(@v1.8) pkg> add <package name>
+
+

For installing specific versions specify with <package name>@<X.Y.Z>.

+

After adding you may be asked to precompile or build. Do so according to instruction given on the screen. Otherwise, first time importing or using the package, Julia may start a precompilation that will take a few seconds up to several minutes.

+

Exit with <backspace>:

+
julia>
+
+

Own packages on Bianca

+

You can use make an installation on Rackham and then use the wharf to copy it over to your ~/.julia/ directory.

+

Otherwise, send an email to support@uppmax.uu.se and we'll help you.

+

Running IJulia from Jupyter notebook

+

Like for python it is possible to run a Julia in a notebook, i.e. in a web interface with possibility of inline figures and debugging. An easy way to do this is to load the python module as well. In shell:

+
module load julia/1.8.5
+module load python/3.10.8
+julia
+
+

In Julia:

+

using IJulia

+
notebook(dir="</path/to/work/dir/>")
+
+

A Firefox session will start with the Jupyter notebook interface.

+

If not, you may have to build IJulia the first time with Pkg.build(“IJulia”). Since “IJulia” is pre-installed centrally on UPPMAX you must activate the central environment by following these steps belo. This should only be needed the first time like this

+
> using Pkg
+> Pkg.activate(DEPOT_PATH[2]*"/environments/v1.8");
+> Pkg.build("IJulia")
+> notebook(dir="</path/to/work/dir/>")
+
+

This builds the package also locally before starting the notebook. If not done, Jupyter will not find the julia kernel of that version. With notebook(dir="", detached=true) the notebook will not be killed when you exit your REPL julia session in the terminal.

+

How to run parallel jobs

+

There are several packages available for Julia that let you run parallel jobs. Some of them are only able to run on one node, while others try to leverage several machines. You'll find an introduction here.

+

Run interactively on compute node

+

Always run parallel only on the compute nodes. This is an example with 4 cores on Rackham

+
$ interactive -A <proj> -n 4 -t 3:00:00
+Running interactively at UPPMAX
+
+

Slurm user guide

+

Threading

+

Threading divides up your work among a number of cores within a node. The threads share their memory. Below is an example from within Julia. First, in the shell type:

+
export JULIA_NUM_THREADS=4
+julia
+
+

in Julia:

+
using Base.Threads
+nthreads()
+      a = zeros(10)
+@threads for i = 1:10
+        a[i] = Threads.threadid()
+end
+
+

Distributed computing

+

Distributed processing uses individual processes with individual memory, that communicate with each other. In this case, data movement and communication is explicit. +Julia supports various forms of distributed computing.

+
    +
  • A native master-worker system based on remote procedure calls: Distributed.jl
  • +
  • MPI through MPI.jl : a Julia wrapper for the MPI protocol, see further down.
  • +
  • DistributedArrays.jl: distribute an array among workers
  • +
+

If choosing between distributed and MPI, distributed is easier to program, whereas MPI may be more suitable for multi-node applications.

+

For more detailed info please confer the manual for distributed computing and julia MPI.

+

Master-Worker model

+

We need to launch Julia with

+
julia -p 4
+
+

then inside Julia you can check

+
nprocs()
+workers()
+
+

which should print 5 and [2,3,4,5]. Why 5, you ask? Because "worker 1" is the "boss". And bosses don't work.

+

As you can see, you can run distributed computing directly from the julia shell.

+

Batch example

+

Julia script hello_world_distributed.jl:

+
using Distributed
+# launch worker processes
+num_cores = parse(Int, ENV["SLURM_CPUS_PER_TASK"])
+addprocs(19)
+println("Number of cores: ", nprocs())
+println("Number of workers: ", nworkers())
+# each worker gets its id, process id and hostname
+for i in workers()
+    id, pid, host = fetch(@spawnat i (myid(), getpid(), gethostname()))
+    println(id, " " , pid, " ", host)
+end
+# remove the workers
+for i in workers()
+    rmprocs(i)
+end
+
+
    +
  • Batch script job_distributed.slurm:
  • +
+
#!/bin/bash
+#SBATCH -A j<proj>
+#SBATCH -p devel
+#SBATCH --job-name=distrib_jl     # create a short name for your job
+#SBATCH --nodes=1                # node count
+#SBATCH --ntasks=20              # total number of tasks across all nodes
+#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)
+#SBATCH --time=00:01:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+#SBATCH --mail-user=<email>
+module load julia/1.8.5
+julia hello_world_distributed.jl
+
+

​Put job in queue:

+
sbatch job_distributed.slurm
+
+

Interactive example

+
salloc -A <proj> -p node -N 1 -n 10 -t 1:0:0
+julia hello_world_distributed.jl
+
+

MPI

+

The Threaded and Distributed packages are included in the Base installation. However, in order to use MPI with Julia you will need to follow the next steps (only the first time):

+
    +
  • Load the tool chain which contains a MPI library
  • +
+

For julia/1.6.3 and earlier:

+
module load gcc/9.3.0 openmpi/3.1.5
+
+

For julia/1.6.7_LTS & 1.7.2:

+
module load gcc/10.3.0 openmpi/3.1.6
+
+

For julia/1.8.5:

+
module load gcc/11.3.0 openmpi/4.1.3
+
+
    +
  • Load Julia
  • +
+
ml julia/1.8.5   # or other
+
+
    +
  • Start Julia on the command line
  • +
+
julia
+
+
    +
  • Change to package mode and add the MPI package
  • +
+
(v1.8) pkg> add MPI
+
+
    +
  • In the julian mode run these commands:
  • +
+
julia> using MPI
+julia> MPI.install_mpiexecjl()
+[ Info: Installing `mpiexecjl` to `~/.julia/bin`...
+[ Info: Done!
+
+
    +
  • Add the installed mpiexecjl wrapper to your path on the Linux command line
  • +
+
export PATH=~/.julia/bin:$PATH
+
+
    +
  • Now the wrapper should be available on the command line
  • +
+

Because of how MPI works, we need to explicitly write our code into a file, juliaMPI.jl:

+
import MPI
+MPI.Init()
+comm = MPI.COMM_WORLD
+MPI.Barrier(comm)
+root = 0
+r = MPI.Comm_rank(comm)
+sr = MPI.Reduce(r, MPI.SUM, root, comm)
+if(MPI.Comm_rank(comm) == root)
+@printf("sum of ranks: %s\n", sr)
+end
+MPI.Finalize()
+
+

You can execute your code as in an interactive session with several cores (at least 3 in this case):

+
module load gcc/11.3.0 openmpi/4.1.3
+mpiexecjl -np 3 julia juliaMPI.jl
+
+

A batch script, job_MPI.slurm, should include a "module load gcc/XXX openmpi/XXX"

+
#!/bin/bash
+#SBATCH -A j<proj>
+#SBATCH -p devel
+#SBATCH --job-name=MPI_jl        # create a short name for your job
+#SBATCH --nodes=1                # node count
+#SBATCH --ntasks=20              # total number of tasks across all nodes
+#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)
+#SBATCH --time=00:05:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+#SBATCH --mail-user=<email>
+module load julia/1.8.5
+module load gcc/11.3.0 openmpi/4.1.3
+export PATH=~/.julia/bin:$PATH
+mpiexecjl -n 20 julia juliaMPI.jl
+
+
    +
  • Run with
  • +
+
sbatch job_MPI.slurm
+
+

See the MPI.jl examples for more input!

+

GPU

+

Example Julia script, juliaCUDA.jl:

+
using CUDA, Test
+N = 2^20
+x_d = CUDA.fill(1.0f0, N)
+y_d = CUDA.fill(2.0f0, N)
+y_d .+= x_d
+@test all(Array(y_d) .== 3.0f0)
+println("Success")
+
+

Batch script juliaGPU.slurm, note settings for Bianca vs. Snowy:

+
#!/bin/bash
+#SBATCH -A <proj-id>
+#SBATCH -M <snowy OR bianca>
+#SBATCH -p node
+#SBATCH -C gpu   #NB: Only for Bianca
+#SBATCH -N 1
+#SBATCH --job-name=juliaGPU         # create a short name for your job
+#SBATCH --gpus-per-node=<1 OR 2>             # number of gpus per node (Bianca 2, Snowy 1)
+#SBATCH --time=00:15:00          # total run time limit (HH:MM:SS)
+#SBATCH --qos=short              # if test run t<15 min
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+#SBATCH --mail-user=<email>
+module purge
+module load julia/1.8.5          # system CUDA works as of today
+julia juliaCUDA.jl
+
+
    +
  • Put job in queue:
  • +
+
sbatch juliaGPU.slurm
+
+

Interactive session with GPU

+

On Snowy, getting 1 cpu and 1 gpu:

+
interactive -A <proj> -n 1 -M snowy --gres=gpu:1  -t 3:00:00
+
+

On Bianca, getting 2 cpu:s and 1 gpu:

+
interactive -A <proj> -n 2 -C gpu --gres=gpu:1 -t 01:10:00
+
+
    +
  • wait until session is started
  • +
+
julia/1.7.2
+julia/1.8.5 (Default)
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jupyter/index.html b/software/jupyter/index.html new file mode 100644 index 000000000..7350087ad --- /dev/null +++ b/software/jupyter/index.html @@ -0,0 +1,3304 @@ + + + + + + + + + + + + + + + + + + + + + + + Jupyter - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Jupyter

+

Jupyter on Rackham

+

There are multiple IDEs on the UPPMAX clusters, +among other Jupyter. +Here we describe how to run Jupyter.

+

Jupyter is an IDE specialized for the Python programming language.

+
+

Info

+
    +
  • You can run Python in a Jupyter-notebook, + i.e. in a web interface with possibility of inline figures and debugging.
  • +
  • Jupyter-lab is installed in the python>=3.10.8 module
  • +
+
+
+

Warning

+

Always start Jupyter in a ThinLinc session +and preferably in an interactive session.

+
+

Introduction

+

Jupyter is web application that allows literature programming for Python. That is, Jupyter allows to create documents where Python code is shown and run and its results shown, surrounded by written text (e.g. English).

+

Additionally, Jupyter allows to share files and hence includes a file manager.

+

Jupyter is:

+
    +
  • started and running on a server, for example, an interactive node
  • +
  • displayed in a web browser, such as firefox.
  • +
+

Jupyter can be slow when using remote desktop webpage +(e.g. https://rackham-gui.uppmax.uu.se).

+
    +
  • +

    For UPPMAX, one can use a locally installed ThinLinc client to speed up Jupyter. See the UPPMAX documentation on ThinLinc <https://www.uppmax.uu.se/support/user-guides/thinlinc-graphical-connection-guide>_ on how to install the ThinLinc client locally.

    +
  • +
  • +

    It is also possible to run Jupyter with a local browser to speed up the graphics but still use the benefits of many CPU:s and much RAM.

    + +
  • +
+

How to start Jupyter

+ +

Run Jupyter in a virtual environment (venv)

+

You could also use jupyter- (lab or notebook) in a venv virtual environment.

+

If you decide to use the --system-site-packages configuration you will get jupyter from the python modules you created you virtual environment with. +However, you won't find your locally installed packages from that jupyter session. To solve this, reinstall jupyter within the virtual environment by force (option -I):

+
pip install -I jupyter
+
+

and run it as above.

+

Be sure to start the kernel with the virtual environment name, like "project A", and not "Python 3 (ipykernel)".

+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jupyter_local/index.html b/software/jupyter_local/index.html new file mode 100644 index 000000000..06de46b32 --- /dev/null +++ b/software/jupyter_local/index.html @@ -0,0 +1,3205 @@ + + + + + + + + + + + + + + + + + + + Jupyter in local browser - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Jupyter in local browser

+

To increase the speed of graphics it is possible to run Jupyter on a compute +node, but using the graphics on your local computer. +That will speed up the interaction with plotting figures and GUI management.

+

This possible for the [Rackham[(../cluster_guides/rackham.md) and +Snowy clusters.

+
+

Warning

+

This feature is not possible for Bianca

+
+

Step 1: Login to an UPPMAX cluster

+
    +
  • Using ThinLinc or a terminal does not matter.
  • +
+

Step 2: start an interactive session

+

Start a terminal. Within that terminal, start an interactive session from the login node (change to the correct NAISS project ID).

+

For Rackham

+
interactive -A <naiss-project-id>  -t 4:00:00
+
+

For Snowy

+
interactive -M snowy -A <naiss-project-id>  -t 4:00:00
+
+

Step 3: start Jupyter in the interactive session

+

Within your terminal with the interactive session, load a modern Python module:

+
module load python/3.11.8
+
+

Then, start jupyter-notebook (or jupyter-lab):

+
jupyter-notebook --ip 0.0.0.0 --no-browser
+
+

Leave this terminal open.

+

The terminal will display multiple URLs.

+

Copy one of these, like:

+
http://r486:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2
+
+

Step 4: On own computer

+
    +
  • If you use ssh to connect to Rackham, you need to forward the port of the interactive node to your local computer.
      +
    • On Linux or Mac this is done by running in another terminal. Make sure you have the ports changed if they are not at the default 8888.
    • +
    +
  • +
+
ssh -L 8888:r486:8888 username@rackham.uppmax.uu.se
+
+
    +
  • Replace r486 if you got another node
  • +
  • If you use Windows it may be better to do this in the PowerShell instead of a WSL2 terminal.
  • +
  • If you use PuTTY - you need to change the settings in "Tunnels" accordingly (could be done for the current connection as well).
  • +
+

putty

+

SSH port forwarding

+

On your computer open the address you got but replace r486 with localhost or 127.0.0.0 i.e.

+
http://localhost:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2
+
+

or

+
http://127.0.0.1:8888/?token=5c3aeee9fbfc75f7a11c4a64b2b5b7ec49622231388241c2
+
+

This should bring the jupyter interface on your computer and all calculations and files will be on Rackham compute node.

+

Back to jupyter page

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jupyter_on_bianca/index.html b/software/jupyter_on_bianca/index.html new file mode 100644 index 000000000..1b8683f98 --- /dev/null +++ b/software/jupyter_on_bianca/index.html @@ -0,0 +1,3198 @@ + + + + + + + + + + + + + + + + + + + Jupyter on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Jupyter on Bianca

+

Jupyter on Bianca

+

There are multiple IDEs on the UPPMAX clusters, +among other Jupyter. +Here we describe how to run Jupyter +on Bianca.

+

Jupyter is an IDE specialized for +the Python programming language.

+

Procedure

+
+Prefer a video? +

This procedure is also demonstrated in this YouTube video. +go here

+
+

1. Get within SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start the Bianca remote desktop environment

+
+Forgot how to start Bianca's remote desktop environment? +

See the 'Logging in to Bianca' page.

+
+

3. Start an interactive session

+

Within the Bianca remote desktop environment, start a terminal. +Within that terminal, +start an interactive node:

+
interactive -A [project_number] -t 8:00:00
+
+

Where [project_number] is your +UPPMAX project, for example:

+
interactive -A sens2016001 -t 8:00:00
+
+
+What is my UPPMAX project number? +

Easy answers that is probably true:

+

The one you used to login, which is part of your prompt. +For example, in the prompt below, the project is sens2016001.

+
[sven@sens2016001-bianca sven]$
+
+
+

4. Load a Python module

+

Within the terminal of the interactive session, +load a Python module

+
module load python/3.11.4
+
+
+Forgot what the module system is? +

See the UPPMAX pages on the module system here.

+
+
+Can I use other Python modules? +

Yes, you can use any module later than (and including) the python/3.10.8 +module.

+
+

5. Start the Jupyter notebook

+

Still within the terminal of the interactive session, +start a notebook like this:

+
jupyter-notebook --ip 0.0.0.0 --no-browser
+
+

or jupyter lab:

+
jupyter-lab --ip 0.0.0.0 --no-browser
+
+

Jupyter will show some IP address in the terminal, +which you will need in the next step.

+

6. Browser to the Jupyter notebook

+

In the remote desktop environment on Bianca, start Firefox. +Set Firefox to the URL addresses from the Jupyter output.

+
+Can I start Firefox from the terminal too? +

Yes, in another terminal, one can use:

+
firefox [URL]
+
+

where [URL] is a URL produced by Jupyter, for example:

+
firefox http://127.0.0.1:8889/tree?token=7c305e62f7dacf65d74a4b966e2851987479ad0a258de34f
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jupyter_on_rackham/index.html b/software/jupyter_on_rackham/index.html new file mode 100644 index 000000000..e932f32d1 --- /dev/null +++ b/software/jupyter_on_rackham/index.html @@ -0,0 +1,3189 @@ + + + + + + + + + + + + + + + + + + + Jupyter on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Jupyter on Rackham

+

Jupyter on Rackham

+

There are multiple IDEs on the UPPMAX clusters, +among other Jupyter. +Here we describe how to run Jupyter +on Rackham.

+

Jupyter is an IDE specialized for +the Python programming language.

+

Procedure

+
+Prefer a video? +

This procedure is also demonstrated in this YouTube video

+
+

1. Start a Rackham remote desktop environment

+

This can be either:

+ +

2. Start an interactive session

+

Within the Rackham remote desktop environment, start a terminal. +Within that terminal, +start an interactive node:

+
interactive -A [project_number] -t 8:00:00
+
+

Where [project_number] is your +UPPMAX project, for example:

+
interactive -A sens2016001 -t 8:00:00
+
+
+What is my UPPMAX project number? +

See the UPPMAX documentation on how to see your UPPMAX projects

+
+

3. Load a Python module

+

Within the terminal of the interactive session, +load a Python module

+
module load python/3.11.4
+
+
+Forgot what the module system is? +

See the UPPMAX pages on the module system here.

+
+
+Can I use other Python modules? +

Yes, you can use any module later than (and including) the python/3.10.8 +module.

+
+

4. Start the Jupyter notebook

+

Still within the terminal of the interactive session, +start a notebook like this:

+
jupyter-notebook --ip 0.0.0.0 --no-browser
+
+

or jupyter lab:

+
jupyter-lab --ip 0.0.0.0 --no-browser
+
+

Jupyter will show some IP address in the terminal, +which you will need in the next step.

+

5. Browser to the Jupyter notebook

+

In the remote desktop environment on Rackham, start Firefox. +Set Firefox to the URL addresses from the Jupyter output.

+
+Can I start Firefox from the terminal too? +

Yes, in another terminal, one can use:

+
firefox [URL]
+
+

where [URL] is a URL produced by Jupyter, for example:

+
firefox http://127.0.0.1:8889/tree?token=7c305e62f7dacf65d74a4b966e2851987479ad0a258de34f
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/jvarkit/index.html b/software/jvarkit/index.html new file mode 100644 index 000000000..a71391119 --- /dev/null +++ b/software/jvarkit/index.html @@ -0,0 +1,3145 @@ + + + + + + + + + + + + + + + + + + + jvarkit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

jvarkit

+

According to the jvarkit GitHub repository +jvarkit is 'Java utilities for Bioinformatics',

+

jvarkit is unavailable in the UPPMAX module system.

+

Create a jvarkit Singularity container

+

To create a Singularity container +one can follow the procedure documented at 'Create a Singularity container from Docker_Hub'.

+

Spoiler:

+
sudo singularity build my_container.sif docker:lindenb/jvarkit:1b2aedf24
+
+

Note that 1b2aedf24 is the tag of the latest version of this Docker script. +In the future, they may be newer tags.

+

Usage:

+
./jvarkit.sif java -jar /opt/jvarkit/dist/jvarkit.jar --help
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/matlab/index.html b/software/matlab/index.html new file mode 100644 index 000000000..a3ceaacbc --- /dev/null +++ b/software/matlab/index.html @@ -0,0 +1,3647 @@ + + + + + + + + + + + + + + + + + + + + + + + MATLAB - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MATLAB user guide

+

The MATLAB module

+

MATLAB can be started only if you load the matlab module first. Most of available official toolboxes are also available. At the time of this writing, our most recent installation is: matlab/R2023b

+

Doing:

+
module load matlab
+
+

will give you the latest version.

+

If you need a different version, check the availability by:

+
module avail matlab
+
+

To get started with MATLAB do (for instance):

+
module load matlab/R2023a
+matlab &
+
+

That will start a matlab session with the common GUI. Use & to have MATLAB in background making terminal still active for other work.

+

A good and important suggestion is that you always specify a certain version. This is to be able to reproduce your work, a very important key in research!

+

First time, since May 13 2024

+
    +
  • +

    If you use MATLAB after May 13 2024, of any version, you have to do the following step to be able to use the full features of running parallel jobs.

    +
      +
    • only needs to be called once per version of MATLAB.
    • +
    • Note, however, that on Bianca this has to be done separately.
    • +
    +
  • +
  • +

    After logging into the cluster, configure MATLAB to run parallel jobs on the cluster by calling the shell script configCluster.sh.

    +
  • +
+
module load matlab/<version>
+configCluster.sh <project-ID>    # Note: no '-A'
+
+
    +
  • This will run a short configuration job in an interactive session.
  • +
  • Jobs will now default to the cluster rather than submit to the local machine.
  • +
  • It should look like this (example for Bianca)
  • +
+

matlab configCluster.sh

+
    +
  • The session should exit automatically but if not you can end the session by
      +
    • exit
    • +
    • or <CTRL-C>
    • +
    +
  • +
  • When done, start Matlab as you usually do with matlab &.
  • +
+
+

Warning

+
    +
  • Do these steps for each matlab version you will use.
  • +
  • On Bianca you need to do this for each sens project that will use MATLAB, as well.
  • +
+
+
+

Tip

+
    +
  • Check the Matlab version for which you have set the slurm configuration by
  • +
+
ls -l .matlab/*/parallel.mlsettings
+
+
    +
  • Look for dates from May 2024 and onwards.
  • +
+
+

Introduction

+

Using MATLAB on the cluster enables you to utilize high performance facilities like:

+ +

See MathWorks's complete user guide

+

Some online tutorials and courses:

+ +

Running MATLAB

+
+

Warning

+
    +
  • It is possible to start Matlab on the Login node.
  • +
  • +

    This can be a way to work if you

    +
      +
    • work with just light analysis
    • +
    • just use Matlab to start batch jobs from the graphical user interface.
    • +
    +
  • +
  • +

    Then you should start matlab with just ONE thread

    +
  • +
+
matlab -singleCompThread &
+
+
+

Graphical user interface

+

To start MATLAB with its usual graphical interface (GUI), start it with:

+
matlab
+
+

If you will use significant resources, like processor or RAM, you should start an interactive session on a calculation node. Use at least 2 cores (-n 2), when running interactive. Otherwise MATLAB may not start. You can use several cores if you will do some parallel calculations (see parallel section below). Example:

+
interactive -A <proj> -p core -n 2 -t 1:0:0
+
+

This example starts a session with 2 cores for a wall time of 1 hour.

+

MATLAB in terminal

+

For simple calculations it is possible to start just a command shell in your terminal:

+
matlab -nodisplay
+
+

Exit with 'exit'.

+

Run script from terminal or bash script

+

In order to run a script directly from terminal:

+
matlab -batch "run('<path/to/script.m>')" | tail -n +2
+
+

List all ways to run/start MATLAB:

+
matlab -h
+
+

ThinLinc

+

You may get the best of the MATLAB graphics by running it the ThinLinc environment.

+ +

You may want to confer our UPPMAX ThinLinc user guide.

+

How to run parallel jobs

+

How to run parallel jobs for the first time, since May 13 2024

+
    +
  • If you use MATLAB after May 13 2024, of any version, you have to do the following step to be able to use the full features of running parallel jobs.
      +
    • only needs to be called once per version of MATLAB.
    • +
    • Note, however, that on Bianca this has to be done separately.
    • +
    +
  • +
  • After logging into the cluster, configure MATLAB to run parallel jobs on the cluster by calling the shell script configCluster.sh.
  • +
+
module load matlab
+configCluster.sh <project-ID>    # Note: no '-A'
+
+
    +
  • This will run a short configuration job in an interactive session, closing itself when done.
  • +
  • Jobs will now default to the cluster rather than submit to the local machine.
  • +
+

Two MATLAB commands

+

Two commands in MATLAB are important to make your code parallel:

+
    +
  • parfor will distribute your "for loop" among several workers (cores)
  • +
  • parfeval runs a section or a function on workers in the background
  • +
+

Use interactive matlab

+

First, start an interactive session on a calculation node with, for instance 8 cores by:

+
interactive -A <project> -p core -n 8 -t 3:00:00
+
+

In MATLAB open a parallel pool of 8 local workers:

+
>> p = parpool(8)
+
+

What happens if you try to run the above command twice? You can't run multiple parallel pools at the same time. Query the number of workers in the parallel pool:

+
>> p.NumWorkers
+
+

gcp will "get current pool" and return a handle to it. If a pool has not already been started, it will create a new one first and then return the handle to it:

+
>> p = gcp
+
+

Shutdown the parallel pool:

+
>> delete(p)
+
+

Will check to see if a pool is open and if so, deletes it.

+
>> delete(gcp('nocreate'))
+
+

This will delete a pool if it exists, but won't create one first if it doesn't already exist.

+

With parpool('local') or parcluster('local') you will use settings for 'local' . With parpool('local',20) you will get 20 cores, but else the 'local' settings, like automatic shutdown after 30 minutes. +You can change your settings here: HOME > ENVIRONMENT > Parallel > Parallel preferences.

+

MATLAB Batch

+

With MATLAB you can e.g. submit jobs directly to our job queue scheduler, without having to use Slurm's commands directly. Let us first make two small function. The first one, little simpler, saved in the file parallel_example.m:

+
    function t = parallel_example(nLoopIters, sleepTime)
+      t0 = tic;
+      parfor idx = 1:nLoopIters
+        A(idx) = idx;
+        pause(sleepTime);
+      end
+      t = toc(t0);
+
+

and the second, little longer, saved in parallel_example_hvy.m:

+
    function t = parallel_example_hvy(nLoopIters, sleepTime)
+      t0 = tic;
+      ml = 'module list';
+      [status, cmdout] = system(ml);
+      parfor idx = 1:nLoopIters
+        A(idx) = idx;
+        for foo = 1:nLoopIters*sleepTime
+          A(idx) = A(idx) + A(idx);
+          A(idx) = A(idx)/3;
+        end
+      end
+
+

Begin by running the command

+
>> configCluster %(on Bianca it will look a little different)
+
+

in Matlab Command Window to choose a cluster configuration. Matlab will set up a configuration and will then print out some instructions, seen below. You can also set environments that is read if you don't specify it. Go to HOME > ENVIRONMENT > Parallel > Parallel preferences.

+
       [1] rackham
+       [2] snowy
+    Select a cluster [1-2]: 1
+    >>
+    >> c = parcluster('rackham'); %on Bianca 'bianca Rxxxxx'
+    >> c.AdditionalProperties.AccountName = 'snic2021-X-YYY';
+    >> c.AdditionalProperties.QueueName = 'node';
+    >> c.AdditionalProperties.WallTime = '00:10:00';
+    >> c.saveProfile
+    >> job = c.batch(@parallel_example, 1, {90, 5}, 'pool', 19) %19 is for 20 cores. On Snowy and Bianca use 15.
+    >> job.wait
+    >> job.fetchOutputs{:}
+
+

Follow them. These inform you what is needed in your script or in command line to run in parallel on the cluster. The line c.batch(@parallel_example, 1, {90, 5}, 'pool', 19) can be understood as put the function parallel_example to the batch queue. The arguments to batch are:

+
    c.batch(function name, number of output arguments, {the inputs to the function}, 'pool', no of additional workers to the master)
+
+    c.batch(@parallel_example, 1 (t=toc(t0)), {nLoopIters=90, sleepTime=5}, 'pool', 19)
+
+

To see the output to screen from jobs, use job.Tasks.Diary. Output from the submitted function is fetched with 'fetchOutputs()'.

+

For jobs using several nodes (in this case 2) you may modify the call to:

+
    >> configCluster
+       [1] rackham
+       [2] snowy
+    Select a cluster [1-2]: 1
+    >>
+    >> c = parcluster('rackham'); %on Bianca 'bianca R<version>'
+    >> c.AdditionalProperties.AccountName = 'snic2021-X-YYY';
+    >> c.AdditionalProperties.QueueName = 'node';
+    >> c.AdditionalProperties.WallTime = '00:10:00';
+    >> c.saveProfile
+    >> job = c.batch(@parallel_example_hvy, 1, {1000, 1000000}, 'pool', 39)% 31 on Bianca or Snowy
+    >> job.wait
+    >> job.fetchOutputs{:}
+
+

where parallel_example-hvy.m was the script presented above.

+

For the moment jobs are hard coded to be node jobs. This means that if you request 21 tasks instead (20 + 1) you will get a 2 node job, but only 1 core will be used on the second node. In this case you'd obviously request 40 tasks (39 + 1) instead.

+

For more information about Matlab's Distributed Computing features please see Matlab's HPC Portal.

+

GPU

+

Running MATLAB with GPU is, as of now, only possible on the Snowy and Bianca clusters. Uppsala University affiliated staff and students with allocation on Snowy can use this resource.

+

Start an interactive session with at least 2 cores (otherwise MATLAB may not start). On Snowy, getting (for instance) 2 cpu:s (-n 2) and 1 gpu:

+
interactive -A <proj> -n 2 -M snowy --gres=gpu:1  -t 3:00:00
+
+

On Bianca, getting 3 cpu:s and 1 gpu:

+
interactive -A <proj> -n 3 -C gpu --gres=gpu:1 -t 01:10:00
+
+

Note that wall time -t should be set to more than one hour to not automatically put job in devel or devcore queue, which is not allowed for gpu jobs. Also check the GPU quide for Snowy at Using the GPU nodes on Snowy.

+

Load MATLAB module and start matlab as usual (with &) in the new session. Then test if the gpu device is found by typing:

+
>> gpuDevice
+>> gpuDeviceCount
+
+

On Bianca you may get an error. Follow the instructons and you can run anyway. Example code:

+
>> A = gpuArray([1 0 1; -1 -2 0; 0 1 -1]);
+>> e = eig(A);
+
+

For more information about GPU computing confer the MathWorks web about GPU computing.

+

Deep Learning with GPUs

+

For many functions in Deep Learning Toolbox, GPU support is automatic if you have a suitable GPU and Parallel Computing Toolbox™. You do not need to convert your data to gpuArray. The following is a non-exhaustive list of functions that, by default, run on the GPU if available.

+ +

Shell batch jobs

+

Sometimes when matlab scripts are part of workflows/pipelines it may be easier to work directly with the batch scripts.

+

Batch script example with 2 nodes (Rackham), matlab_submit.sh.

+
#!/bin/bash -l
+#SBATCH -A <proj>
+#SBATCH -p devel
+#SBATCH -N 2
+#SBATCH -n 40
+module load matlab/R2020b &> /dev/null
+srun -N 2 -n 40  matlab -batch "run('<path/to/m-script>')"
+
+

Run with

+
sbatch matlab_submit.sh
+
+

Common problems

+

Sometimes things do not work out.

+

As a first step, try with removing local files:

+
rm -rf ~/.matlab
+
+

If the graphics is slow, try:

+
vglrun matlab -nosoftwareopengl
+
+

Unfortunately this only works from login nodes.

+

You may want to run MATLAB on a single thread. This makes it work:

+
matlab -singleCompThread
+
+

Matlab Add-Ons

+

Matlab Add-ons

+

MATLAB client on the desktop

+

Guideline here

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/matlab_addons/index.html b/software/matlab_addons/index.html new file mode 100644 index 000000000..1181f45df --- /dev/null +++ b/software/matlab_addons/index.html @@ -0,0 +1,3331 @@ + + + + + + + + + + + + + + + + + + + Matlab Add-Ons - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Matlab Add-Ons

+
+

MATLAB Add-Ons

+
    +
  • Add-ons extend the capabilities of MATLAB® by providing additional functionality for specific tasks and applications, such as:
      +
    • connecting to hardware devices
    • +
    • additional algorithms
    • +
    • interactive apps
    • +
    +
  • +
  • Available from:
      +
    • MathWorks®
    • +
    • the global MATLAB user community
    • +
    +
  • +
  • Encompass a wide variety of resources
      +
    • products
    • +
    • apps
    • +
    • toolboxes
    • +
    • support packages
    • +
    +
  • +
  • More information from Mathworks
  • +
+
+
+

Learners should be able to

+
    +
  • navigate to toolboxes and Add-Ons
  • +
  • view Add-Ons and toolboxes
  • +
  • install and use Add-Ons
  • +
+
+
    +
  • Before going into installing Add-Ons let's have a background to the MATLAB environments and ecosystem!
  • +
+

MATLAB Add-Ons manager

+

toolbar

+
    +
  • +

    In the GUI, the Add-Ons manager can be selected from the menu at the top. The drop-down menu options allow users to:

    +
      +
    • Browse a library of Add-Ons to download. Note that some Add-Ons require a separate license.
    • +
    +

    addon explorer

    +
      +
    • Manage Add-Ons already downloaded.
    • +
    +

    addon manager

    +
      +
    • +

      Package user-generated code as a Toolbox or App

      +
    • +
    • +

      Get hardware-related support packages

      +
    • +
    +
  • +
  • +

    Here we will only focus on the first two options.

    +
  • +
+
+

Note

+

Note that very many packages are already included in the Academic installation and license

+
+

my products

+
+

Some toolboxes

+
    +
  • Matlab products
      +
    • Parallel Computing Toolbox
    • +
    • MATLAB Parallel Server
    • +
    • Deep Learning Toolbox
    • +
    • Statistics and Machine Learning Toolbox
    • +
    +
  • +
  • Simulink
      +
    • Stateflow
    • +
    • SimEvents
    • +
    • Simscape
    • +
    +
  • +
+
+
+

Some toolboxes provides GUI for their tools Apps

+
    +
  • Matlab products
      +
    • Deep Network Designer - Design and visualize deep learning networks Deep Network Designer
    • +
    • Curve Fitter - Fit curves and surfaces to data
    • +
    • Deep Learning Toolbox
    • +
    • Statistics and Machine Learning Toolbox
    • +
    +
  • +
  • Simulink
      +
    • Stateflow
    • +
    • SimEvents
    • +
    • Simscape
    • +
    +
  • +
+

apps

+
    +
  • We won't cover the usage of the toolboxes here!
  • +
+
+

Install Add-Ons

+
    +
  • Search in add-ons explorer and install.
  • +
+

search for addons

+
    +
  • +

    Ends up in local folder and is in the part so it should be reached wherever you are in the file tree.

    +
  • +
  • +

    ~/MATLAB Add-Ons

    +
  • +
  • +

    It's in the path so it should be possible to run directly if you don't need to run a installation file.

    +
  • +
  • +

    For more information about a specific support package install location, see the documentation for the package.

    +
  • +
+
+

Warning

+

To be able to install you need to use the email for a personal mathworks account.

+
+
+

Seealso

+

You can install some Add-Ons manually using an installation file. This is useful in several situations:

+
    +
  • The add-on is not available for installation through the Add-On Explorer, for example, if you create a custom add-on yourself or receive one from someone else.
  • +
  • You downloaded the add-on from the Add-On Explorer without installing it.
  • +
  • You downloaded the add-on from the File Exchange at MATLAB Central™.
  • +
  • MathWorkds page on getting Add-Ons
  • +
+
+
+

Demo

+
    +
  • Search for kalmanf
  • +
  • Click "Learning the Kalman Filter"
  • +
  • Look at the documentation
  • +
  • Test if the command works today:
  • +
+
  >> kalmanf
+  Unrecognized function or variable 'kalmanf'.
+
+
    +
  • OK, it is not there
  • +
  • Click "Add", and "Download and Add to path"
  • +
  • Type email address connected to your MathWorks account
  • +
  • Installation starts
  • +
  • It will end up in:
  • +
+
  $ tree MATLAB\ Add-Ons/
+  MATLAB\ Add-Ons/
+  └── Collections
+  |   └── Efficient\ GRIB1\ data\ reader
+  |       ├── core.28328
+  |       ├── license.txt
+  |       ├── readGRIB1.c
+  |       ├── readGRIB1.mexa64
+  |       └── resources
+  |           ├── addons_core.xml
+  |           ├── matlab_path_entries.xml
+  |           ├── metadata.xml
+  |           ├── previewImage.png
+  |           ├── readGRIB1.zip
+  |           └── screenshot.png
+  └── Functions
+      └── Learning\ the\ Kalman\ Filter
+          ├── kalmanf.m
+          └── resources
+              ├── addons_core.xml
+              ├── kalmanf.zip
+              ├── matlab_path_entries.xml
+              ├── metadata.xml
+              ├── previewImage.png
+              └── screenshot.png
+
+
    +
  • Evidently it is a function. Note that I already have something classified as collections
  • +
  • Now test:
  • +
+
  >> kalmanf()
+  'kalmanf' requires Learning the Kalman Filter version 1.0.0.0 to be enabled.
+
+
    +
  • OK. It is installed but may need some other things. Just an example!!
  • +
+
+
+

Keypoints

+
    +
  • Many Add-Ons, like toolboxes and packages are available at the Clusters
  • +
  • +

    You can view Add-Ons and toolboxes

    +
      +
    • It is all more or less graphical
    • +
    +
  • +
  • +

    To install Add-Ons

    +
      +
    • Search in Add-Ons explorer and install.
    • +
    • Ends up in local folder and is in the path so it should be reached wherever you are in the file tree.
    • +
    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/matlab_local/index.html b/software/matlab_local/index.html new file mode 100644 index 000000000..fd7c205fd --- /dev/null +++ b/software/matlab_local/index.html @@ -0,0 +1,3347 @@ + + + + + + + + + + + + + + + + + + + MATLAB client on the desktop - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MATLAB client on the desktop

+
+

Use own computer's matlab

+
    +
  • Would you like to try run batch jobs on the Rackham or Snowy cluster but use the faster graphics that you can achieve on your own computer?
  • +
  • Do you have all your work locally but sometimes need the cluster to do parallel runs?
  • +
  • UPPMAX offers this now.
  • +
+
+
+

Warning

+
    +
  • +

    This solution is possible only if:

    +
      +
    • you have an UPPMAX compute project
    • +
    • +

      a working matlab on your computer with one of the version available on the cluster:

      +
    • +
    • +

      check with module avail matlab

      +
    • +
    • +

      Examples of the newest ones:

      +
        +
      • R2020b
      • +
      • R2022a
      • +
      • R2022b
      • +
      • R2023a
      • +
      • R2023b
      • +
      +
    • +
    +
  • +
+
+

Let's get started

+

The Rackham MATLAB support package can be found at uppsala.Desktop.zip.

+
    +
  • Download the ZIP file and start MATLAB locally.
  • +
  • The ZIP file should be unzipped in the location returned by calling.
  • +
+
>> userpath
+
+
    +
  • You can unzip from MATLAB's Command window.
  • +
  • Configure MATLAB to run parallel jobs on the cluster by calling configCluster. configCluster only needs to be called once per version of MATLAB.
  • +
+
>> configCluster
+Username on RACKHAM (e.g. jdoe):
+
+
    +
  • Type your rackham user name.
  • +
  • As a result:
  • +
+
Complete.  Default cluster profile set to "Rackham R2022b".
+
+
+

Note

+
    +
  • To submit jobs to the local machine instead of the cluster, run the following:
  • +
+
>> % Get a handle to the local resources
+>> c = parcluster('local');
+
+
+

Configuring Slurm details

+

Prior to submitting the job, various parameters can be assigned, such as queue, e-mail, walltime, etc. The following is a partial list of parameters. See AdditionalProperties for the complete list. Only AccountName, Partition, MemUsage and WallTime.

+
>> % Get a handle to the cluster
+>> c = parcluster;
+
+c = 
+
+  Generic Cluster
+
+    Properties: 
+
+                      Profile: Rackham R2022b
+                     Modified: false
+                         Host: UUC-4GM8L33.user.uu.se
+                   NumWorkers: 100000
+                   NumThreads: 1
+
+        JobStorageLocation: <path to job outputs locally>
+         ClusterMatlabRoot: /sw/apps/matlab/x86_64/R2022b
+           OperatingSystem: unix
+
+
    +
  • Set some additional parameters related to Slurm on Rackham
  • +
+
>> % Specify the account
+>> c.AdditionalProperties.AccountName = 'naiss2024-22-1202';
+
+>> % Specify the wall time (e.g., 1 day, 5 hours, 30 minutes
+>> c.AdditionalProperties.WallTime = '00:30:00';
+
+>> % Specify cores per node
+>> c.AdditionalProperties.ProcsPerNode = 20;
+
+[OPTIONAL]
+
+>> % Specify the partition
+>> c.AdditionalProperties.Partition = 'devcore';
+
+>> % Specify another cluster: 'snowy'
+>> c.AdditionalProperties.ClusterName='snowy'
+>> c.AdditionalProperties.ProcsPerNode = 16;
+
+>> % Specify number of GPUs
+>> c.AdditionalProperties.GPUsPerNode = 1;
+>> c.AdditionalProperties.GPUCard = 'gpu-card';
+
+
    +
  • Save the profile
  • +
+
>> c.saveProfile
+
+

To see the values of the current configuration options, display AdditionalProperties.

+
>> % To view current properties
+>> c.AdditionalProperties
+
+

Unset a value when no longer needed.

+
>> % Example Turn off email notifications
+>> c.AdditionalProperties.EmailAddress = '';
+>> c.saveProfile
+
+

Start job

+
    +
  • +

    Copy this script and paste in a new file parallel_example_local.m that you save in the working directory where you are (check with pwd in the Matlab Command Window).

    +
      +
    • The script is supposed to loop over sleepTime seconds of work nLoopIters times.
    • +
    • We will define the number of processes in the batch submit line.
    • +
    +
  • +
+
   function t = parallel_example_local(nLoopIters, sleepTime)
+   t0 = tic;
+   parfor idx = 1:nLoopIters
+      A(idx) = idx;
+      pause(sleepTime);
+   end
+   t = toc(t0);
+
+
>> job = c.batch(@parallel_example_local, 1, {16,1}, 'Pool',8,'CurrentFolder','.');
+
+- Submission to the cluster requires SSH credentials. 
+- You will be prompted for username and password or identity file (private key). 
+    - It will not ask again until you define a new cluster handle ``c`` or in next session.
+
+

matlab user credentials

+

matlab enter password

+
    +
  • Jobs will now default to the cluster rather than submit to the local machine.
  • +
+
>> job.State
+
+ans =
+
+    'running'
+
+
    +
  • You can run this several times until it gives:
  • +
+
>> job.State
+
+ans =
+
+    'finished'
+
+
    +
  • You can also watch queue
  • +
+

matlab job monitor

+
    +
  • Or on Rackham (it really runs there!):
  • +
+
[bjornc2@rackham2 ~]$ squeue -u bjornc2
+        JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+        50827312   devcore MATLAB_R  bjornc2  R       2:20      1 r483
+
+
>> job.fetchOutputs{:}
+
+ans =
+
+    2.4853
+
+
    +
  • The script looped over 1 s work 16 times, but with 8 processes.
  • +
  • In an ideal world it would have taken 16 / 8 = 2 s. Now it took 2.5 s with some "overhead"
  • +
+
+

Run on Snowy

+
>> c.AdditionalProperties.ClusterName='snowy'
+>> c.AdditionalProperties.ProcsPerNode = 16;
+
+
+
+

Keypoints

+
    +
  • Steps to configure
      +
    • First time download and decompress UPPMAX configure file.
    • +
    • run configCluster on local MATLAB and set user name
    • +
    +
  • +
  • Steps to run
      +
    • set parcluster settings, like you do otherwise.
    • +
    +
  • +
  • Note: only parcluster will work, not parpool.
  • +
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/metontiime/index.html b/software/metontiime/index.html new file mode 100644 index 000000000..a7970e2e8 --- /dev/null +++ b/software/metontiime/index.html @@ -0,0 +1,3199 @@ + + + + + + + + + + + + + + + + + + + + + + + MetONTIIME - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/mobaxterm/index.html b/software/mobaxterm/index.html new file mode 100644 index 000000000..48d8603ba --- /dev/null +++ b/software/mobaxterm/index.html @@ -0,0 +1,3112 @@ + + + + + + + + + + + + + + + + + + + MobaXterm - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MobaXterm

+

There are multiple SSH clients. +This page describes the MobaXterm SSH clients.

+

MobaXterm is an SSH client that is easy to use and install for Windows. +When MobaXterm is started, start a terminal to run ssh. +The usage of ssh is described at the UPPMAX page on ssh here.

+

In MobaXterm you can use the internal MobAgent or/and the Peagent +from the PuTTy tools.

+

MobaXterm

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/multiqc/index.html b/software/multiqc/index.html new file mode 100644 index 000000000..2f958b11d --- /dev/null +++ b/software/multiqc/index.html @@ -0,0 +1,3214 @@ + + + + + + + + + + + + + + + + + + + MultiQC - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MultiQC

+

MultiQC is a tool with homepage https://github.com/ewels/MultiQC.

+

MultiQC can be found among the UPPMAX modules.

+
module spider MultiQC
+
+
+How does that look like? +

You output will look similar to this:

+
[sven@rackham2 ~]$ module spider MultiQC
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  MultiQC:
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+     Versions:
+        MultiQC/0.6
+        MultiQC/0.7
+        MultiQC/0.8
+        MultiQC/0.9
+        MultiQC/1.0
+        MultiQC/1.2
+        MultiQC/1.3
+        MultiQC/1.5
+        MultiQC/1.6
+        MultiQC/1.7
+        MultiQC/1.8
+        MultiQC/1.9
+        MultiQC/1.10
+        MultiQC/1.10.1
+        MultiQC/1.11
+        MultiQC/1.12
+        MultiQC/1.22.2
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  For detailed information about a specific "MultiQC" package (including how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modules.
+  For example:
+
+     $ module spider MultiQC/1.22.2
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+
+

To find out how to load a specific version:

+
module spider MultiQC/1.22.2
+
+
+How does that look like? +

Output will look similar to:

+
[sven@rackham2 ~]$ module spider MultiQC/1.22.2
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  MultiQC: MultiQC/1.22.2
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+    You will need to load all module(s) on any one of the lines below before the "MultiQC/1.22.2" module is available to load.
+
+      bioinfo-tools
+
+    Help:
+       MultiQC - use MultiQC 1.22.2
+
+       Version 1.22.2
+
+
+      Version 1.22.2 is installed using python/3.8.7
+
+
+

After reading that documentation, we know how to load it:

+
module load bioinfo-tools 
+module load MultiQC/1.22.2
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham2 ~]$ module load bioinfo-tools 
+[sven@rackham2 ~]$ module load MultiQC/1.22.2
+[sven@rackham2 ~]$ 
+
+
+

Singularity script

+

If you want to put MultiQC in a Singularity container, +here is an example script:

+
BootStrap: library
+From: ubuntu:18.04
+
+%runscript
+  multiqc "$@"
+
+%post
+  echo "Hello from inside the container"
+  apt-get update
+  apt-get -y dist-upgrade
+  apt-get clean
+  apt-get -y install python-pip
+  pip install multiqc
+
+

See the documentation on Singularity +how to do so.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/nano/index.html b/software/nano/index.html new file mode 100644 index 000000000..997b8fbf3 --- /dev/null +++ b/software/nano/index.html @@ -0,0 +1,3163 @@ + + + + + + + + + + + + + + + + + + + nano - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

nano

+

nano in action

+
+

Using nano to edit the file my_file.txt.

+
+

UPPMAX has multiple text editors available. +This page describes the GNU nano text editor.

+
+Want to see a video? +

You can find a video on using nano on Rackham here

+
+

GNU nano is a simple terminal text editor +that is easy to learn.

+

Starting nano

+

Start nano on a terminal with:

+
nano
+
+

To start nano to edit a file (for example, my_file.txt, use:

+
nano my_file.txt
+
+

Using nano

+

The keyboard shortcuts are shown on-screen, +where ^ denotes Ctrl and M the meta key.

+

OS specifics:

+
    +
  • On Windows, Alt is the meta key
  • +
  • On Mac: in the Terminal.app, go to 'Preferences -> Settings -> Keyboard' + and turn on "Use option as meta key", after which Alt is the meta key
  • +
+

Common tasks:

+
    +
  • Save a file: CTRL + O , then edit the filename and press enter
  • +
  • Exit: CTRL + X, press "y" or "n" on some questions + and/or press Enter to confirm.
  • +
  • Help: CTRL + G
  • +
+

More tips can be found at the nano cheat sheet.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/nextflow/index.html b/software/nextflow/index.html new file mode 100644 index 000000000..963642e73 --- /dev/null +++ b/software/nextflow/index.html @@ -0,0 +1,3389 @@ + + + + + + + + + + + + + + + + + + + + + + + Nextflow & nf-core - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

nextflow & nf-core on UPPMAX

+

https://www.nextflow.io

+ +

nextflow from the module system

+
    +
  • latest nextflow
  • +
+
module load bioinfo-tools
+module load Nextflow/latest  # this also loads java as reqirement
+
+nextflow -v
+nextflow version 24.04.4.5917
+
+
    +
  • alternative versions
  • +
+
export NXF_VER=23.10.1
+
+nextflow -v
+nextflow version 23.10.1.5891
+
+
# To check the available versions on Rackham and Bianca
+ls /sw/bioinfo/Nextflow/latest/rackham/nxf_home/framework/
+20.04.1  20.10.0  21.10.6  22.10.1  22.10.3  22.10.8  23.04.2  23.04.4  23.10.1  24.04.2  24.04.4
+20.07.1  21.04.3  22.10.0  22.10.2  22.10.4  23.04.1  23.04.3  23.10.0  24.04.1  24.04.3
+
+
+

nf-core from the module system

+

https://nf-co.re

+
+

nf-core and and all other required modules are available on the transit server as well.

+
+
module load bioinfo-tools
+module load nf-core   # this also load the nextflow and java as requirements
+
+
+

nf-core pipelines on Bianca

+ + + + + +
    +
  1. Login to transit.uppmax.uu.se - documentation
  2. +
  3. +

    Mount the wharf of your project.

    +
    user@transit:~$ mount_wharf sens2023531
    +Mounting wharf (accessible for you only) to /home/<user>/sens2023531
    +<user>-sens2023531@bianca-sftp.uppmax.uu.se's password: 
    +
    +
  4. +
  5. +

    Navigate to your wharf folder

    +
  6. +
  7. +

    Disable Singularity cache

    +
    export SINGULARITY_DISABLE_CACHE=true
    +export APPTAINER_DISABLE_CACHE=true
    +unset NXF_SINGULARITY_CACHEDIR
    +
    +
  8. +
  9. +

    Load nf-core software module

    +
    module load uppmax bioinfo-tools nf-core
    +
    +
  10. +
  11. +

    Run nf-core to download the pipeline.

    +
    nf-core download pixelator
    +                                      ,--./,-.
    +      ___     __   __   __   ___     /,-._.--~\
    +|\ | |__  __ /  ` /  \ |__) |__         }  {
    +| \| |       \__, \__/ |  \ |___     \`-._,-`-,
    +                                      `._,._,'
    +
    +nf-core/tools version 2.11.1 - https://nf-co.re
    +
    +WARNING  Could not find GitHub authentication token. Some API requests may fail.                                                    
    +? Select release / branch: 1.0.2  [release]
    +? Include the nf-core's default institutional configuration files into the download? Yes
    +
    +In addition to the pipeline code, this tool can download software containers.
    +? Download software container images: singularity
    +
    +Nextflow and nf-core can use an environment variable called $NXF_SINGULARITY_CACHEDIR that is a path to a directory where remote 
    +Singularity images are stored. This allows downloaded images to be cached in a central location.
    +? Define $NXF_SINGULARITY_CACHEDIR for a shared Singularity image download folder? [y/n]: n
    +
    +If transferring the downloaded files to another system, it can be convenient to have everything compressed in a single file.
    +This is not recommended when downloading Singularity images, as it can take a long time and saves very little space.
    +? Choose compression type: none
    +INFO     Saving 'nf-core/pixelator'                                                                                                 
    +          Pipeline revision: '1.0.2'                                                                                                
    +          Use containers: 'singularity'                                                                                             
    +          Container library: 'quay.io'                                                                                              
    +          Output directory: 'nf-core-pixelator_1.0.2'                                                                               
    +          Include default institutional configuration: 'True'                                                                       
    +INFO     Downloading centralised configs from GitHub                                                                                
    +INFO     Downloading workflow files from GitHub                                                                                     
    +INFO     Processing workflow revision 1.0.2, found 4 container images in     total.
    +Downloading singularity images ???????????????????????????????????????????????????????????????????????????????? 100% ? 4/4 completed
    +
    +
  12. +
  13. +

    Running on Bianca

    +
    module load bioinfo-tools Nextflow
    +nextflow run ... -profile uppmax --project sens-XXXX-XX .... 
    +
    +
  14. +
+ + +

Note: you might need -c configs/conf/uppmax.config, make sure you have the file (it is an option to download it during the pipeline download process).
+https://github.com/nf-core/configs/blob/master/conf/uppmax.config
+https://nf-co.re/configs/uppmax

+

Common problems

+
    +
  • Task is running out of resources (memory or time)
  • +
+

Add lines to your configuration that overrides the settings for the problematic task, for example:

+
process {
+    withName: 'NFCORE_RNASEQ:RNASEQ:ALIGN_STAR:STAR_ALIGN' {
+        cpus   = 12
+        memory = '72.GB'
+        time   = '24.h'
+    }
+}
+
+

More: https://www.nextflow.io/docs/latest/config.html#process-selectors

+

Troubleshooting - nf-core

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/nvidia-deep-learning-frameworks/index.html b/software/nvidia-deep-learning-frameworks/index.html new file mode 100644 index 000000000..4f306baf4 --- /dev/null +++ b/software/nvidia-deep-learning-frameworks/index.html @@ -0,0 +1,3201 @@ + + + + + + + + + + + + + + + + + + + + + + + NVIDIA DLF - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

NVIDIA Deep Learning Frameworks

+

Here is how easy one can use an NVIDIA environment for deep learning with all the following tools preset. A screenshot of that page is shown below.

+

web screenshot

+

First - pull the container (6.5GB).

+
singularity pull docker://nvcr.io/nvidia/pytorch:22.03-py3
+
+

Get an interactive shell.

+
singularity shell --nv ~/external_1TB/tmp/pytorch_22.03-py3.sif
+
+Singularity> python3
+Python 3.8.12 | packaged by conda-forge | (default, Jan 30 2022, 23:42:07)
+[GCC 9.4.0] on linux
+Type "help", "copyright", "credits" or "license" for more information.
+
+>>> import torch
+# Check torch version
+>>> print(torch.__version__)
+1.12.0a0+2c916ef
+
+# Check if CUDA is available
+>>> print(torch.cuda.is_available())
+True
+
+# Check which GPU architectures are supported
+>>> print(torch.cuda.get_arch_list())
+['sm_52', 'sm_60', 'sm_61', 'sm_70', 'sm_75', 'sm_80', 'sm_86', 'compute_86']
+
+# test torch
+>>> torch.zeros(1).to('cuda')
+tensor([0.], device='cuda:0')
+
+

From the container shell, check what else is available...

+
Singularity> nvcc -V
+nvcc: NVIDIA (R) Cuda compiler driver
+Copyright (c) 2005-2022 NVIDIA Corporation
+Built on Thu_Feb_10_18:23:41_PST_2022
+Cuda compilation tools, release 11.6, V11.6.112
+Build cuda_11.6.r11.6/compiler.30978841_0
+
+# Check what conda packages are already there
+Singularity> conda list -v
+
+# Start a jupyter-lab (keep in mind the hostname)
+Singularity> jupyter-lab
+...
+[I 13:35:46.270 LabApp] [jupyter_nbextensions_configurator] enabled 0.4.1
+[I 13:35:46.611 LabApp] jupyter_tensorboard extension loaded.
+[I 13:35:46.615 LabApp] JupyterLab extension loaded from /opt/conda/lib/python3.8/site-packages/jupyterlab
+[I 13:35:46.615 LabApp] JupyterLab application directory is /opt/conda/share/jupyter/lab
+[I 13:35:46.616 LabApp] [Jupytext Server Extension] NotebookApp.contents_manager_class is (a subclass of) jupytext.TextFileContentsManager already - OK
+[I 13:35:46.616 LabApp] Serving notebooks from local directory: /home/pmitev
+[I 13:35:46.616 LabApp] Jupyter Notebook 6.4.8 is running at:
+[I 13:35:46.616 LabApp] http://hostname:8888/?token=d6e865a937e527ff5bbccfb3f150480b76566f47eb3808b1
+[I 13:35:46.616 LabApp] Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).
+...
+
+

You can use this container to add more packages.

+
Bootstrap: docker
+From: nvcr.io/nvidia/pytorch:22.03-py3
+...
+
+

Just keep in mind that "upgrading" the build-in torch package might install a package that is compatible with less GPU architectures and it might not work anymore on your hardware.

+
Singularity> python3 -c "import torch; print(torch.__version__); print(torch.cuda.is_available()); print(torch.cuda.get_arch_list()); torch.zeros(1).to('cuda')"
+
+1.10.0+cu102
+True
+['sm_37', 'sm_50', 'sm_60', 'sm_70']
+NVIDIA A100-PCIE-40GB with CUDA capability sm_80 is not compatible with the current PyTorch installation.
+The current PyTorch install supports CUDA capabilities sm_37 sm_50 sm_60 sm_70.
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/openmolcas/index.html b/software/openmolcas/index.html new file mode 100644 index 000000000..297fa4996 --- /dev/null +++ b/software/openmolcas/index.html @@ -0,0 +1,3370 @@ + + + + + + + + + + + + + + + + + + + + + + + Molcas - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

MOLCAS user guide

+

How to run the program MOLCAS on UPPMAX

+

Information

+

MOLCAS is an ab initio computational chemistry program. Focus in the program is placed on methods for calculating general electronic structures in molecular systems in both ground and excited states. MOLCAS is, in particular, designed to study the potential surfaces of excited states

+

This guide will help you get started running MOLCAS on UPPMAX. More detailed information on how to use Molcas can be found on the official website.

+

Licensing

+

A valid license key is required to run Molcas on UPPMAX. The licence key should be kept in a directory named .Molcas under the home directory.

+

Molcas is currently free of charge for academic researchers active in the Nordic countries. You can get hold of a license by following these instructions.

+

Versions installed at UPPMAX

+

At UPPMAX the following versions are installed:

+
    +
  • 8.0 (serial)
  • +
  • 7.8 (serial)
  • +
+

Modules needed to run MOLCAS

+

In order to run MOLCAS you must first load the molcas +module. You can see all available versions of MOLCAS installed at UPPMAX with:

+
module avail molcas
+
+

Load a MOLCAS module with, eg:

+
module load molcas/7.8.082
+
+

How to run MOLCAS interactively

+

If you would like to do tests or short runs, we recommend using the interactive command:

+
interactive -A your_project_name
+
+

This will reserve a node for you to do your test on. Note that you must provide the name of an active project in order to run on UPPMAX resources. After a short wait you will get access to the node. Then you can run MOLCAS by:

+
module load molcas/7.8.082
+molcas -f test000.input
+
+

The test000.input looks like:

+
*$Revision: 7.7 $
+************************************************************************
+* Molecule: H2
+* Basis: DZ
+* Symmetry: x y z
+* SCF: conventional
+*
+*  This is a test to be run during first run to verify
+*  that seward and scf works at all
+*
+
+>export MOLCAS_PRINT=VERBOSE
+ &GATEWAY
+coord
+2
+angstrom
+H  0.350000000  0.000000000  0.000000000
+H -0.350000000  0.000000000  0.000000000
+basis
+H.DZ....
+
+ &SEWARD
+
+ &SCF
+Title
+ H2, DZ Basis set
+
+ &RASSCF
+Title
+ H2, DZ Basis set
+nActEl
+ 2  0 0
+Ras2
+ 1 1 0 0 0 0 0 0
+
+ &ALASKA
+
+ &SLAPAF
+
+ &CASPT2
+
+

See the Slurm user guide for more information on the interactive command. Don't forget to exit your interactive job when you have finished your calculation. Exiting will free the resource for others to use.

+

Batch scripts for Slurm

+

It's possible to run MOLCAS in the batch queue. Here is an example running MOLCAS on one core:

+
#!/bin/bash -l
+#
+#SBATCH -A <em>your_project_name</em>
+#SBATCH -J molcastest
+#SBATCH -t 00:10:00
+#SBATCH -p core -n 1
+
+module load molcas/7.8.082
+
+#In order to let MOLCAS use more memory
+export MOLCASMEM=2000
+
+molcas -f test000.input
+
+

Again you'll have to provide your project name.

+

If the script is called test000.job you can submit it to the batch queue with:

+
sbatch test000.job
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/orthofinder/index.html b/software/orthofinder/index.html new file mode 100644 index 000000000..6fbcf407e --- /dev/null +++ b/software/orthofinder/index.html @@ -0,0 +1,3133 @@ + + + + + + + + + + + + + + + + + + + OrthoFinder - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/overview/index.html b/software/overview/index.html new file mode 100644 index 000000000..80e58ca43 --- /dev/null +++ b/software/overview/index.html @@ -0,0 +1,3275 @@ + + + + + + + + + + + + + + + + + + + + + + + Overview - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Software

+

At the UPPMAX clusters, +a lot of software is pre-installed +and accessible via the module system.

+
+What are the UPPMAX clusters? +

See the UPPMAX documentation on its clusters here

+
+
+What is the module system? +

See the UPPMAX documentation on modules here

+
+

Software table

+

Automatically updated software table

+

Conflicting modules

+ +

Reach the Bioinformatics tools

+
    +
  • Before you can list available bioinformatics tools you need to issue the command:
  • +
+
module load bioinfo-tools
+
+
    +
  • +

    When you list available modules with module avail after this, you will see that the bioinformatics tools are now also available in the listing.

    +
  • +
  • +

    Note that the module spider command will show bioinformatics modules regardless of whether you have loaded the bioinfo-tools module.

    +
  • +
  • This command can also tell you whether a particular module requires the bioinfo-tools module, e.g. "module spider GEMINI/0.18.3".
  • +
+

How can I request new software to be installed?

+

You can always install software in your home on any UPPMAX system. If there are many users who would like to request the same software, it can be installed by UPPMAX application or system experts.

+

Please send such requests to support@uppmax.uu.se.

+

Installing yourself

+

Go to our installation page

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/parallel_comb/index.html b/software/parallel_comb/index.html new file mode 100644 index 000000000..93f663a60 --- /dev/null +++ b/software/parallel_comb/index.html @@ -0,0 +1,3724 @@ + + + + + + + + + + + + + + + + + + + Combinations of parallel libraries and compilers - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Combinations of parallel libraries and compilers

+

Before compiling a program for MPI we must choose, in addition to the compiler, which version of MPI we want to use. At UPPMAX there are two, openmpi and intelmpi. These, with their versions, are compatible only to a subset of the gcc and intel compiler versions. The lists below summarise the best choices.

+

Suggestions for compatibility Rackham, Snowy, Bianca

+

GCC

+
    +
  • v5: gcc/5.3.0 openmpi/1.10.3
  • +
  • v6: gcc/6.3.0 openmpi/2.1.0
  • +
  • v7: gcc/7.4.0 openmpi/3.1.3
  • +
  • v8: gcc/8.3.0 openmpi/3.1.3
  • +
  • v9: gcc/9.3.0 openmpi/3.1.5
  • +
  • v10: gcc/10.3.0 openmpi/3.1.6 or openmpi/4.1.0
  • +
  • v11: gcc/11.2.0 openmpi/4.1.1 will work also on Miarka
  • +
  • v12: gcc/12.2.0 openmpi/4.1.4
  • +
  • v13: gcc/13.2.0 openmpi/4.1.5
  • +
+

Intel

+
    +
  • v18: intel/18.3 openmpi/3.1.3
  • +
  • v20: intel/20.4 openmpi/3.1.6 or openmpi/4.0.4
  • +
+

Intel & intelmpi

+
    +
  • Load the corresponding version of intelmpi as of the intel compiler (versions up to 20.4)
  • +
+

Intel after version 20.4

+
    +
  • For all versions of intel from 2021 there is not necessarily a mpi library with same version as the compiler.
  • +
+
module load intel-oneapi
+
+
    +
  • Check availability and load desired version
  • +
+
module avail mpi  # showing both compilers and mpi ;-)
+
+
    +
  • Example:
  • +
+
module load compiler/2023.1.0 mpi/2021.9.0    
+
+

Suggestions for compatibility Rackham and Snowy

+
    +
  • +

    GCC

    +
      +
    • v4: gcc/4.8.2 openmpi/1.7.4
    • +
    • v5: gcc/5.3.0 openmpi/1.10.3
    • +
    • v6: gcc/6.3.0 openmpi/2.1.0
    • +
    • v7: gcc/7.4.0 openmpi/3.1.3
    • +
    • v8: gcc/8.3.0 openmpi/3.1.3
    • +
    • v9: gcc/9.3.0 openmpi/3.1.3 or openmpi/4.0.3
    • +
    • v10: gcc/10.3.0 openmpi/3.1.6*- #or openmpi/4.1.1**
    • +
    • v11: gcc/11.3.0 openmpi/4.1.2
    • +
    • v12: gcc/12.2.0 openmpi/4.1.4
    • +
    • v13: gcc/13.1.0 openmpi/4.1.5
    • +
    +
  • +
  • +

    Intel

    +
      +
    • v18: intel/18.3 openmpi/3.1.3
    • +
    • v20: intel/20.4 openmpi/3.1.6*- # or openmpi/4.1.1**
    • +
    +
  • +
+

Rackham

+
    +
  • Also on Snowy in italic
  • +
  • Also on Snowy AND Bianca in bold
  • +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GCCopenmpi
4.8.21.7.4
5.2.01.8.8
5.3.01.10.1
5.5.01.10.3
6.3.02.0.1, 2.0.2, 2.1.0
6.4.02.1.1
7.1.02.1.0, 2.1.1
7.2.02.1.1, 2.1.2, 3.0.0
7.3.02.1.3, 3.0.0, 3.1.0
7.4.03.1.3
8.1.03.0.1, 3.1.0
8.2.03.0.2, 3.1.0, 3.1.1, 3.1.2, 3.1.3, 4.0.0
8.3.03.1.3
8.4.03.1.5, 4.0.2
9.1.03.1.3
9.2.03.1.3, 3.1.4, 3.1.5, 4.0.2
9.3.03.1.5, 4.0.2, 4.0.3
10.1.03.1.6, 4.0.3
10.2.03.1.6, 4.0.4, 4.1.0
10.3.03.1.6, 4.0.5, 4.1.0, 4.1.1
11.2.04.1.1, 4.1.2
11.3.04.1.2, 4.1.3
12.1.04.1.3
12.2.04.1.3, 4.1.4
12.3.04.1.5
13.1.04.1.5
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Intelopenmpi
15.31.10.0, 1.10.1, 2.1.0
16.11.10.1, 1.10.2
17.12.0.1, 2.0.2,
17.22.0.2, 2.1.0
17.42.1.1, 3.0.0
18.03.0.0
18.12.1.2, 2.1.3, 3.0.0
18.22.1.3, 3.0.0, 3.1.0
18.33.0.2, 3.1.0, 3.1.1, 3.1.2, 3.1.3
19.43.1.4
19.53.1.4
20.03.1.5, 3.1.6, 4.0.3, 4.0.4
20.23.1.6, 4.0.4
20.43.1.6, 4.0.4, 4.1.0, 4.1.1
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
openmpigccintelpgi
1.7.44.8.2--
1.8.85.2.0--
1.10.015.3-
1.10.15.3.015.3, 16.1-
1.10.216.116.9, 17.4, 17.7, 17.10
1.10.35.5.0--
2.0.16.3.017.1-
2.0.26.3.017.1, 17.2-
2.1.06.3.0, 7.1.015.3, 17.2-
2.1.16.4.0, 7.1.0, 7.2.017.417.4, 17.7
2.1.27.2.018.117.10, 18.1, 18.3
2.1.37.3.018.1, 18.2 18.1
3.0.07.2.0, 7.3.017.4, 18.0, 18.1, 18.217.7, 17.10, 18.0 18.1
3.0.18.1.0--
3.0.28.2.018.3-
3.1.07.3.0, 8.1.0, 8.2.018.2, 18.318.3
3.1.18.2.018.3-
3.1.28.2.018.318.3
3.1.37.4.0, 8.2.0, 8.3.0, 9.1.0, 9.2.018.318.3
3.1.49.2.019.4, 19.5-
3.1.58.4.0, 9.2.0, 9.3.020.0-
3.1.610.1.0, 10.2.0, 10.3.020.0, 20.2, 20.4-
4.0.08.2.0-
4.0.28.4.0, 9.2.0, 9.3.0-
4.0.39.3.0, 10.1.020.0-
4.0.410.220.0, 20.2, 20.4-
4.0.510.3.0--
4.1.010.2.0, 10.3.020.4-
4.1.110.3.0, 11.2.020.4-
4.1.211.2.0--
4.1.312.1.0, 12.2.0--
4.1.412.2.0--
4.1.512.3.0, 13.1.0--
+

Bianca

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
GCCopenmpi
5.3.01.10.1
5.4.02.0.0, 2.0.1
6.1.02.0.0, 2.0.1
6.2.02.0.1
6.3.02.0.1, 2.0.2, 2.1.0
6.4.02.1.1
7.1.02.1.0, 2.1.1
7.2.02.1.1, 3.0.0
7.3.03.0.0
8.1.03.1.0
8.2.03.1.2, 3.1.3
8.3.03.1.3
9.3.03.1.5
10.1.03.1.6
10.2.04.1.0
10.3.03.1.6, 4.0.5, 4.1.0
11.2.04.1.1
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Intelopenmpi
15.31.10.0, 1.10.1
16.11.10.1, 1.10.2
16.32.0.0, 2.0.1
17.02.0.1
17.12.0.1, 2.0.2
17.22.0.2, 2.1.0
17.42.1.1, 3.0.0
18.33.1.2, 3.1.3
20.23.1.6, 4.0.4
20.43.1.6, 4.0.4
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/perl/index.html b/software/perl/index.html new file mode 100644 index 000000000..bb745d1db --- /dev/null +++ b/software/perl/index.html @@ -0,0 +1,27219 @@ + + + + + + + + + + + + + + + + + + + + + + + Perl - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + + +

Perl_modules guide

+

A number of modules/packages are available by default with all Perl versions.

+

This is a list of modules for perl/5.26.2 available by loading the module perl_modules/5.26.2.

+

For previous Perl versions 5.18.4 and 5.24.1 (available through the software module system as perl/5.18.4 and perl/5.24.1), many more Perl modules are available by loading the software module perl_modules/5.18.4 or perl_modules/5.24.1.

+

A complete list of the Perl modules available in perl_modules/5.26.2 module is as follows:

+

Perl Module Search on perl_modules/5.26.2/rackham

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Module nameVersion
$pkg2.019
Acme::Damn0.08
Algorithm::C30.10
Algorithm::Combinatorics0.27
Algorithm::Diff1.1903
Algorithm::FastPermute0.999
Algorithm::Loops1.032
Algorithm::Munkres0.08
Algorithm::Permute0.16
aliased0.34
Apache::Htpasswd1.9
Apache::LogFormat::Compiler0.35
Apache::SOAP1.27
App::Ack2.24
App::Ack::ConfigDefaultunknown
App::Ack::ConfigFinderunknown
App::Ack::ConfigLoaderunknown
App::Ack::Filterunknown
App::Ack::Filter::Collectionunknown
App::Ack::Filter::Defaultunknown
App::Ack::Filter::Extensionunknown
App::Ack::Filter::ExtensionGroupunknown
App::Ack::Filter::FirstLineMatchunknown
App::Ack::Filter::Inverseunknown
App::Ack::Filter::Isunknown
App::Ack::Filter::IsGroupunknown
App::Ack::Filter::IsPathunknown
App::Ack::Filter::IsPathGroupunknown
App::Ack::Filter::Matchunknown
App::Ack::Filter::MatchGroupunknown
App::Ack::Resourceunknown
App::Ack::Resourcesunknown
App::Cmd0.331
App::Cmd::ArgProcessor0.331
App::Cmd::Command0.331
App::Cmd::Command::commands0.331
App::Cmd::Command::help0.331
App::Cmd::Command::version0.331
App::Cmd::Plugin0.331
App::Cmd::Setup0.331
App::Cmd::Simple0.331
App::Cmd::Subdispatch0.331
App::Cmd::Subdispatch::DashedStyle0.331
App::Cmd::Tester0.331
App::Cmd::Tester::CaptureExternal0.331
App::cpanminus1.7044
App::cpanminus::fatscript1.7044
App::FatPacker0.010007
App::FatPacker::Traceunknown
App::Nopaste1.012
App::Nopaste::Command1.012
App::Nopaste::Service1.012
App::Nopaste::Service::Codepeek1.012
App::Nopaste::Service::Debian1.012
App::Nopaste::Service::Gist1.012
App::Nopaste::Service::GitLab1.012
App::Nopaste::Service::Mojopaste1.012
App::Nopaste::Service::PastebinCom1.012
App::Nopaste::Service::Pastie1.012
App::Nopaste::Service::Shadowcat1.012
App::Nopaste::Service::Snitch1.012
App::Nopaste::Service::ssh1.012
App::Nopaste::Service::Ubuntu1.012
App::perlbrew0.84
App::Pinto0.14
App::Pinto::Command0.14
App::Pinto::Command::add0.14
App::Pinto::Command::clean0.14
App::Pinto::Command::copy0.14
App::Pinto::Command::default0.14
App::Pinto::Command::delete0.14
App::Pinto::Command::diff0.14
App::Pinto::Command::help0.14
App::Pinto::Command::init0.14
App::Pinto::Command::install0.14
App::Pinto::Command::kill0.14
App::Pinto::Command::list0.14
App::Pinto::Command::lock0.14
App::Pinto::Command::log0.14
App::Pinto::Command::look0.14
App::Pinto::Command::manual0.14
App::Pinto::Command::merge0.14
App::Pinto::Command::migrate0.14
App::Pinto::Command::new0.14
App::Pinto::Command::nop0.14
App::Pinto::Command::pin0.14
App::Pinto::Command::props0.14
App::Pinto::Command::pull0.14
App::Pinto::Command::register0.14
App::Pinto::Command::rename0.14
App::Pinto::Command::reset0.14
App::Pinto::Command::revert0.14
App::Pinto::Command::roots0.14
App::Pinto::Command::stacks0.14
App::Pinto::Command::statistics0.14
App::Pinto::Command::thanks0.14
App::Pinto::Command::unlock0.14
App::Pinto::Command::unpin0.14
App::Pinto::Command::unregister0.14
App::Pinto::Command::update0.14
App::Pinto::Command::verify0.14
App::Prove3.42
App::Prove::State3.42
App::Prove::State::Result3.42
App::Prove::State::Result::Test3.42
AppConfig1.71
AppConfig::Args1.71
AppConfig::CGI1.71
AppConfig::File1.71
AppConfig::Getopt1.71
AppConfig::State1.71
AppConfig::Sys1.71
Archive::Any::Create0.03
Archive::Any::Create::Tarunknown
Archive::Any::Create::Zipunknown
Archive::Extract0.80
Archive::Zip1.60
Archive::Zip::Archive1.60
Archive::Zip::BufferedFileHandle1.60
Archive::Zip::DirectoryMember1.60
Archive::Zip::FileMember1.60
Archive::Zip::Member1.60
Archive::Zip::MemberRead1.60
Archive::Zip::MockFileHandle1.60
Archive::Zip::NewFileMember1.60
Archive::Zip::StringMember1.60
Archive::Zip::Tree1.60
Archive::Zip::ZipFileMember1.60
Array::Compare3.0.1
Array::Unique0.08
Array::Utils0.5
asa1.03
Astro::FITS::Header3.04
Astro::FITS::Header::AST3.01
Astro::FITS::Header::CFITSIO3.02
Astro::FITS::Header::GSD3.01
Astro::FITS::Header::Item3.02
Astro::FITS::Header::NDF3.02
Authen::SASL2.16
Authen::SASL::CRAM_MD52.14
Authen::SASL::EXTERNAL2.14
Authen::SASL::Perl2.14
Authen::SASL::Perl::ANONYMOUS2.14
Authen::SASL::Perl::CRAM_MD52.14
Authen::SASL::Perl::DIGEST_MD52.14
Authen::SASL::Perl::EXTERNAL2.14
Authen::SASL::Perl::GSSAPI0.05
Authen::SASL::Perl::LOGIN2.14
Authen::SASL::Perl::PLAIN2.14
Authen::Simple0.5
Authen::Simple::Adapterunknown
Authen::Simple::Apacheunknown
Authen::Simple::Logunknown
Authen::Simple::Passwd0.6
Authen::Simple::Passwordunknown
autoboxunknown
autobox::universalunknown
B::Hooks::EndOfScope0.24
B::Hooks::EndOfScope::PP0.24
B::Hooks::EndOfScope::XS0.24
B::Hooks::OP::Check0.22
B::Hooks::OP::Check::Install::Filesunknown
B::Hooks::OP::PPAddr0.06
B::Hooks::OP::PPAddr::Install::Filesunknown
B::Keywords1.18
B::Utils0.27
B::Utils::Install::Filesunknown
B::Utils::OP0.27
bareword::filehandles0.006
Bit::Vector7.4
Bit::Vector::Overload7.4
Bit::Vector::String7.4
boolean0.46
Browser::Open0.04
Bundle::DBD::mysql4.046
Bundle::DBI12.008696
Bundle::Object::InsideOut4.04
C::StructTypeunknown
C::Typeunknown
C::Varunknown
Cache::BaseCacheunknown
Cache::BaseCacheTesterunknown
Cache::Cache1.08
Cache::CacheMetaDataunknown
Cache::CacheSizerunknown
Cache::CacheTesterunknown
Cache::CacheUtilsunknown
Cache::FileBackendunknown
Cache::FileCacheunknown
Cache::LRU0.04
Cache::MemoryBackendunknown
Cache::MemoryCacheunknown
Cache::NullCacheunknown
Cache::Objectunknown
Cache::SharedMemoryBackendunknown
Cache::SharedMemoryCacheunknown
Cache::SizeAwareCacheunknown
Cache::SizeAwareCacheTesterunknown
Cache::SizeAwareFileCacheunknown
Cache::SizeAwareMemoryCacheunknown
Cache::SizeAwareSharedMemoryCacheunknown
Capture::Tiny0.48
Carp::Always0.13
Carp::Assert0.21
Carp::Assert::More1.16
Carp::Clan6.06
Carp::REPL0.18
Cartonunknown
Carton::Builderunknown
Carton::CLIunknown
Carton::CPANfileunknown
Carton::Dependencyunknown
Carton::Distunknown
Carton::Dist::Coreunknown
Carton::Environmentunknown
Carton::Errorunknown
Carton::Indexunknown
Carton::Mirrorunknown
Carton::Packageunknown
Carton::Packerunknown
Carton::Snapshotunknown
Carton::Snapshot::Emitterunknown
Carton::Snapshot::Parserunknown
Carton::Treeunknown
Carton::Utilunknown
Catalyst5.90118
Catalyst::Actionunknown
Catalyst::Action::Deserialize1.21
Catalyst::Action::Deserialize::Callback1.21
Catalyst::Action::Deserialize::JSON1.21
Catalyst::Action::Deserialize::JSON::XS1.21
Catalyst::Action::Deserialize::View1.21
Catalyst::Action::Deserialize::XML::Simple1.21
Catalyst::Action::Deserialize::YAML1.21
Catalyst::Action::DeserializeMultiPart1.21
Catalyst::Action::RenderView0.16
Catalyst::Action::REST1.21
Catalyst::Action::REST::ForBrowsers1.21
Catalyst::Action::Role::ACL0.07
Catalyst::Action::Serialize1.21
Catalyst::Action::Serialize::Callback1.21
Catalyst::Action::Serialize::JSON1.21
Catalyst::Action::Serialize::JSON::XS1.21
Catalyst::Action::Serialize::JSONP1.21
Catalyst::Action::Serialize::View1.21
Catalyst::Action::Serialize::XML::Simple1.21
Catalyst::Action::Serialize::YAML1.21
Catalyst::Action::Serialize::YAML::HTML1.21
Catalyst::Action::SerializeBase1.21
Catalyst::ActionChainunknown
Catalyst::ActionContainerunknown
Catalyst::ActionRole::ACL0.07
Catalyst::ActionRole::ConsumesContentunknown
Catalyst::ActionRole::HTTPMethodsunknown
Catalyst::ActionRole::NeedsLoginunknown
Catalyst::ActionRole::QueryMatchingunknown
Catalyst::ActionRole::Schemeunknown
Catalyst::Authentication::Credential::HTTP1.018
Catalyst::Authentication::Credential::NoPasswordunknown
Catalyst::Authentication::Credential::Passwordunknown
Catalyst::Authentication::Credential::Remoteunknown
Catalyst::Authentication::Realmunknown
Catalyst::Authentication::Realm::Compatibilityunknown
Catalyst::Authentication::Realm::Progressiveunknown
Catalyst::Authentication::Realm::SimpleDBunknown
Catalyst::Authentication::Store::DBIx::Class0.1506
Catalyst::Authentication::Store::DBIx::Class::Userunknown
Catalyst::Authentication::Store::Minimalunknown
Catalyst::Authentication::Store::Nullunknown
Catalyst::Authentication::Userunknown
Catalyst::Authentication::User::Hashunknown
Catalyst::Baseunknown
Catalyst::ClassDataunknown
Catalyst::Componentunknown
Catalyst::Component::ApplicationAttributeunknown
Catalyst::Component::ContextClosureunknown
Catalyst::Component::InstancePerContext0.001001
Catalyst::Controllerunknown
Catalyst::Controller::ActionRole0.17
Catalyst::Controller::REST1.21
Catalyst::Devel1.39
Catalyst::Dispatcherunknown
Catalyst::DispatchTypeunknown
Catalyst::DispatchType::Chainedunknown
Catalyst::DispatchType::Defaultunknown
Catalyst::DispatchType::Indexunknown
Catalyst::DispatchType::Pathunknown
Catalyst::Engineunknown
Catalyst::EngineLoaderunknown
Catalyst::Exceptionunknown
Catalyst::Exception::Basicunknown
Catalyst::Exception::Detachunknown
Catalyst::Exception::Gounknown
Catalyst::Exception::Interfaceunknown
Catalyst::Helper1.39
Catalyst::Helper::Model::Adaptorunknown
Catalyst::Helper::Model::DBIC::Schema0.65
Catalyst::Helper::Model::Factoryunknown
Catalyst::Helper::Model::Factory::PerRequestunknown
Catalyst::Helper::View::Email0.36
Catalyst::Helper::View::Email::Template0.36
Catalyst::Helper::View::TT0.44
Catalyst::Helper::View::TTSite0.44
Catalyst::Logunknown
Catalyst::Manual5.9009
Catalyst::Middleware::Stashunknown
Catalyst::Modelunknown
Catalyst::Model::Adaptor0.10
Catalyst::Model::Adaptor::Baseunknown
Catalyst::Model::DBIC::Schema0.65
Catalyst::Model::Factory0.10
Catalyst::Model::Factory::PerRequest0.10
Catalyst::Plugin::Authentication0.10023
Catalyst::Plugin::Authentication::Credential::Passwordunknown
Catalyst::Plugin::Authentication::Store::Minimalunknown
Catalyst::Plugin::Authentication::Userunknown
Catalyst::Plugin::Authentication::User::Hashunknown
Catalyst::Plugin::ConfigLoader0.34
Catalyst::Plugin::I18N0.10
Catalyst::Plugin::Session0.40
Catalyst::Plugin::Session::Stateunknown
Catalyst::Plugin::Session::State::Cookie0.17
Catalyst::Plugin::Session::Storeunknown
Catalyst::Plugin::Session::Store::DBIC0.14
Catalyst::Plugin::Session::Store::DBIC::Delegateunknown
Catalyst::Plugin::Session::Store::Delegate0.06
Catalyst::Plugin::Session::Store::Dummyunknown
Catalyst::Plugin::Session::Store::File0.18
Catalyst::Plugin::Session::Test::Store123
Catalyst::Plugin::StackTrace0.12
Catalyst::Plugin::Static::Simple0.36
Catalyst::Plugin::Unicode::Encoding5.90118
Catalyst::Requestunknown
Catalyst::Request::PartDataunknown
Catalyst::Request::REST1.21
Catalyst::Request::REST::ForBrowsers1.21
Catalyst::Request::Uploadunknown
Catalyst::Responseunknown
Catalyst::Response::Writerunknown
Catalyst::Restarterunknown
Catalyst::Restarter::Forkingunknown
Catalyst::Restarter::Win32unknown
Catalyst::Runtime5.90118
Catalyst::Script::CGIunknown
Catalyst::Script::Createunknown
Catalyst::Script::FastCGIunknown
Catalyst::Script::Serverunknown
Catalyst::Script::Testunknown
Catalyst::ScriptRoleunknown
Catalyst::ScriptRunnerunknown
Catalyst::Statsunknown
Catalyst::Testunknown
Catalyst::TraitFor::Model::DBIC::Schema::Cachingunknown
Catalyst::TraitFor::Model::DBIC::Schema::PerRequestSchemaunknown
Catalyst::TraitFor::Model::DBIC::Schema::Replicatedunknown
Catalyst::TraitFor::Model::DBIC::Schema::SchemaProxyunknown
Catalyst::TraitFor::Request::REST1.21
Catalyst::TraitFor::Request::REST::ForBrowsers1.21
Catalyst::Utilsunknown
Catalyst::Viewunknown
Catalyst::View::Email0.36
Catalyst::View::Email::Template0.36
Catalyst::View::TT0.44
CatalystX::Component::Traits0.19
CatalystX::InjectComponent0.025
CatalystX::LeakChecker0.06
CatalystX::Profile0.02
CatalystX::Profile::Controller::ControlProfiling0.02
CatalystX::REPL0.04
CatalystX::SimpleLogin0.20
CatalystX::SimpleLogin::Controller::Loginunknown
CatalystX::SimpleLogin::Form::Loginunknown
CatalystX::SimpleLogin::Form::LoginOpenIDunknown
CatalystX::SimpleLogin::TraitFor::Controller::Login::Logoutunknown
CatalystX::SimpleLogin::TraitFor::Controller::Login::OpenIDunknown
CatalystX::SimpleLogin::TraitFor::Controller::Login::RenderAsTTTemplateunknown
CatalystX::SimpleLogin::TraitFor::Controller::Login::WithRedirectunknown
CGI4.38
CGI::Carp4.38
CGI::Cookie4.38
CGI::File::Temp4.38
CGI::FormBuilder3.10
CGI::FormBuilder::Field3.10
CGI::FormBuilder::Field::button3.10
CGI::FormBuilder::Field::checkbox3.10
CGI::FormBuilder::Field::date3.10
CGI::FormBuilder::Field::datetime3.10
CGI::FormBuilder::Field::datetime_local3.10
CGI::FormBuilder::Field::email3.10
CGI::FormBuilder::Field::file3.10
CGI::FormBuilder::Field::hidden3.10
CGI::FormBuilder::Field::image3.10
CGI::FormBuilder::Field::number3.10
CGI::FormBuilder::Field::password3.10
CGI::FormBuilder::Field::radio3.10
CGI::FormBuilder::Field::select3.10
CGI::FormBuilder::Field::static3.10
CGI::FormBuilder::Field::submit3.10
CGI::FormBuilder::Field::text3.10
CGI::FormBuilder::Field::textarea3.10
CGI::FormBuilder::Field::time3.10
CGI::FormBuilder::Field::url3.10
CGI::FormBuilder::Messages3.10
CGI::FormBuilder::Messages::base3.10
CGI::FormBuilder::Messages::default3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Messages::locale3.10
CGI::FormBuilder::Multi3.10
CGI::FormBuilder::Source3.10
CGI::FormBuilder::Source::File3.10
CGI::FormBuilder::Source::Perl0.01
CGI::FormBuilder::Template3.10
CGI::FormBuilder::Template::Builtin3.10
CGI::FormBuilder::Template::CGI_SSI3.10
CGI::FormBuilder::Template::Div3.10
CGI::FormBuilder::Template::Fast3.10
CGI::FormBuilder::Template::HTML3.10
CGI::FormBuilder::Template::Text3.10
CGI::FormBuilder::Template::TT23.10
CGI::FormBuilder::Test3.10
CGI::FormBuilder::Util3.10
CGI::HTML::Functionsunknown
CGI::Pretty4.38
CGI::Push4.38
CGI::Simple1.15
CGI::Simple::Cookie1.15
CGI::Simple::Standard1.15
CGI::Simple::Util1.15
CGI::Struct1.21
CGI::Util4.38
CHI0.60
CHI::CacheObject0.60
CHI::Constants0.60
CHI::Driver0.60
CHI::Driver::Base::CacheContainer0.60
CHI::Driver::CacheCache0.60
CHI::Driver::FastMmap0.60
CHI::Driver::File0.60
CHI::Driver::Memory0.60
CHI::Driver::Metacache0.60
CHI::Driver::Null0.60
CHI::Driver::RawMemory0.60
CHI::Driver::Role::HasSubcaches0.60
CHI::Driver::Role::IsSizeAware0.60
CHI::Driver::Role::IsSubcache0.60
CHI::Driver::Role::Universal0.60
CHI::Serializer::JSON0.60
CHI::Serializer::Storable0.60
CHI::Stats0.60
CHI::t::Bugs0.60
CHI::t::Config0.60
CHI::t::Constants0.60
CHI::t::Driver0.60
CHI::t::Driver::CacheCache0.60
CHI::t::Driver::FastMmap0.60
CHI::t::Driver::File0.60
CHI::t::Driver::File::DepthZero0.60
CHI::t::Driver::Memory0.60
CHI::t::Driver::NonMoose0.60
CHI::t::Driver::RawMemory0.60
CHI::t::Driver::Subcache0.60
CHI::t::Driver::Subcache::l1_cache0.60
CHI::t::Driver::Subcache::mirror_cache0.60
CHI::t::GetError0.60
CHI::t::Initialize0.60
CHI::t::Null0.60
CHI::t::RequiredModules0.60
CHI::t::Sanity0.60
CHI::t::SetError0.60
CHI::t::Subcache0.60
CHI::t::Subclass0.60
CHI::t::Util0.60
CHI::Test0.60
CHI::Test::Class0.60
CHI::Test::Driver::NonMoose0.60
CHI::Test::Driver::Readonly0.60
CHI::Test::Driver::Role::CheckKeyValidity0.60
CHI::Test::Driver::Writeonly0.60
CHI::Test::Util0.60
CHI::Types0.60
CHI::Util0.60
Class::Accessor0.51
Class::Accessor::Chained0.01
Class::Accessor::Chained::Fastunknown
Class::Accessor::Fast0.51
Class::Accessor::Faster0.51
Class::Accessor::Grouped0.10014
Class::Accessor::Lite0.08
Class::AutoClass1.56
Class::AutoClass::Root1
Class::C30.34
Class::C3::Adopt::NEXT0.14
Class::C3::Componentised1.001002
Class::C3::Componentised::ApplyHooksunknown
Class::Data::Inheritable0.08
Class::Factory::Util1.7
Class::Inspector1.32
Class::Inspector::Functions1.32
Class::Load0.25
Class::Load::PP0.25
Class::Load::XS0.10
Class::Method::Modifiers2.12
Class::MethodMaker2.24
Class::MethodMaker::arrayunknown
Class::MethodMaker::Constantsunknown
Class::MethodMaker::Engine2.24
Class::MethodMaker::hashunknown
Class::MethodMaker::OptExtunknown
Class::MethodMaker::scalarunknown
Class::MethodMaker::V1Compatunknown
Class::MOP2.2011
Class::MOP::Attribute2.2011
Class::MOP::Class2.2011
Class::MOP::Class::Immutable::Trait2.2011
Class::MOP::Deprecated2.2011
Class::MOP::Instance2.2011
Class::MOP::Method2.2011
Class::MOP::Method::Accessor2.2011
Class::MOP::Method::Constructor2.2011
Class::MOP::Method::Generated2.2011
Class::MOP::Method::Inlined2.2011
Class::MOP::Method::Meta2.2011
Class::MOP::Method::Wrapped2.2011
Class::MOP::MiniTrait2.2011
Class::MOP::Mixin2.2011
Class::MOP::Mixin::AttributeCore2.2011
Class::MOP::Mixin::HasAttributes2.2011
Class::MOP::Mixin::HasMethods2.2011
Class::MOP::Mixin::HasOverloads2.2011
Class::MOP::Module2.2011
Class::MOP::Object2.2011
Class::MOP::Overload2.2011
Class::MOP::Package2.2011
Class::Singleton1.5
Class::Tiny1.006
Class::Trigger0.14
Class::Unload0.11
Class::XSAccessor1.19
Class::XSAccessor::Array1.19
Clipboard0.13
Clipboard::MacPasteboardunknown
Clipboard::Win32unknown
Clipboard::Xclipunknown
Clone0.39
Clone::Choose0.010
Clone::PP1.07
Commandable0.01
Commandable::Invocation0.01
common::sense3.74
Compress::Bzip22.26
Compress::Raw::Bzip22.081
Compress::Raw::Zlib2.081
Config::Any0.32
Config::Any::Baseunknown
Config::Any::Generalunknown
Config::Any::INIunknown
Config::Any::JSONunknown
Config::Any::Perlunknown
Config::Any::XMLunknown
Config::Any::YAMLunknown
Config::General2.63
Config::General::Extended2.07
Config::General::Interpolated2.15
Config::INI0.025
Config::INI::Reader0.025
Config::INI::Writer0.025
Config::MVP2.200011
Config::MVP::Assembler2.200011
Config::MVP::Assembler::WithBundles2.200011
Config::MVP::Error2.200011
Config::MVP::Reader2.200011
Config::MVP::Reader::Findable2.200011
Config::MVP::Reader::Findable::ByExtension2.200011
Config::MVP::Reader::Finder2.200011
Config::MVP::Reader::Hash2.200011
Config::MVP::Reader::INI2.101463
Config::MVP::Section2.200011
Config::MVP::Sequence2.200011
Config::Tiny2.23
constant::boolean0.02
Context::Preserve0.03
Contextual::Return0.004014
Contextual::Return::Failureunknown
Convert::Binary::C0.78
Convert::Binary::C::Cached0.78
Convert::BinHex1.125
Convert::Color0.11
Convert::Color::CMY0.11
Convert::Color::CMYK0.11
Convert::Color::HSL0.11
Convert::Color::HSV0.11
Convert::Color::RGB0.11
Convert::Color::RGB160.11
Convert::Color::RGB80.11
Convert::Color::VGA0.11
Convert::Color::X110.11
Convert::Color::XTerm0.05
Convert::UU0.5201
Cookie::Baker0.09
CPAN::Changes0.400002
CPAN::Changes::Groupunknown
CPAN::Changes::Releaseunknown
CPAN::Checksums2.12
CPAN::Common::Index0.010
CPAN::Common::Index::LocalPackage0.010
CPAN::Common::Index::MetaDB0.010
CPAN::Common::Index::Mirror0.010
CPAN::Common::Index::Mux::Ordered0.010
CPAN::DistnameInfo0.12
CPAN::Meta::Check0.014
CPAN::Mini1.111016
CPAN::Mini::App1.111016
CPAN::Perl::Releases3.68
CPAN::Uploader0.103013
Cpanel::JSON::XS4.04
Cpanel::JSON::XS::Typeunknown
Crypt::Blowfish2.14
Crypt::CBC2.33
Crypt::PasswdMD51.40
Crypt::Random::Seed0.03
Crypt::Random::TESHA20.01
Crypt::Random::TESHA2::Config0.01
Crypt::RC42.02
CSS::Tiny1.20
curry1.001000
curry::weakunknown
Curses::Window1.36
Cwd3.74
Cwd::Guard0.05
Dancer1.3400
Dancer20.206000
Dancer2::CLI0.206000
Dancer2::CLI::Command::gen0.206000
Dancer2::CLI::Command::version0.206000
Dancer2::Core0.206000
Dancer2::Core::App0.206000
Dancer2::Core::Cookie0.206000
Dancer2::Core::Dispatcher0.206000
Dancer2::Core::DSL0.206000
Dancer2::Core::Error0.206000
Dancer2::Core::Factory0.206000
Dancer2::Core::Hook0.206000
Dancer2::Core::HTTP0.206000
Dancer2::Core::MIME0.206000
Dancer2::Core::Request0.206000
Dancer2::Core::Request::Upload0.206000
Dancer2::Core::Response0.206000
Dancer2::Core::Response::Delayed0.206000
Dancer2::Core::Role::ConfigReader0.206000
Dancer2::Core::Role::DSL0.206000
Dancer2::Core::Role::Engine0.206000
Dancer2::Core::Role::Handler0.206000
Dancer2::Core::Role::HasLocation0.206000
Dancer2::Core::Role::Hookable0.206000
Dancer2::Core::Role::Logger0.206000
Dancer2::Core::Role::Serializer0.206000
Dancer2::Core::Role::SessionFactory0.206000
Dancer2::Core::Role::SessionFactory::File0.206000
Dancer2::Core::Role::StandardResponses0.206000
Dancer2::Core::Role::Template0.206000
Dancer2::Core::Route0.206000
Dancer2::Core::Runner0.206000
Dancer2::Core::Session0.206000
Dancer2::Core::Time0.206000
Dancer2::Core::Types0.206000
Dancer2::FileUtils0.206000
Dancer2::Handler::AutoPage0.206000
Dancer2::Handler::File0.206000
Dancer2::Logger::Capture0.206000
Dancer2::Logger::Capture::Trap0.206000
Dancer2::Logger::Console0.206000
Dancer2::Logger::Diag0.206000
Dancer2::Logger::File0.206000
Dancer2::Logger::LogReport1.27
Dancer2::Logger::Note0.206000
Dancer2::Logger::Null0.206000
Dancer2::Plugin0.206000
Dancer2::Plugin::LogReport1.27
Dancer2::Plugin::LogReport::Message1.27
Dancer2::Serializer::Dumper0.206000
Dancer2::Serializer::JSON0.206000
Dancer2::Serializer::Mutable0.206000
Dancer2::Serializer::YAML0.206000
Dancer2::Session::Simple0.206000
Dancer2::Session::YAML0.206000
Dancer2::Template::Implementation::ForkedTiny0.206000
Dancer2::Template::Simple0.206000
Dancer2::Template::TemplateToolkit0.206000
Dancer2::Template::Tiny0.206000
Dancer2::Test0.206000
Dancer::App1.3400
Dancer::Config1.3400
Dancer::Config::Object1.3400
Dancer::Continuation1.3400
Dancer::Continuation::Halted1.3400
Dancer::Continuation::Route1.3400
Dancer::Continuation::Route::ErrorSent1.3400
Dancer::Continuation::Route::FileSent1.3400
Dancer::Continuation::Route::Forwarded1.3400
Dancer::Continuation::Route::Passed1.3400
Dancer::Continuation::Route::Templated1.3400
Dancer::Cookie1.3400
Dancer::Cookies1.3400
Dancer::Deprecation1.3400
Dancer::Engine1.3400
Dancer::Error1.3400
Dancer::Exception1.3400
Dancer::Exception::Base1.3400
Dancer::Factory::Hook1.3400
Dancer::FileUtils1.3400
Dancer::GetOpt1.3400
Dancer::Handler1.3400
Dancer::Handler::Debug1.3400
Dancer::Handler::PSGI1.3400
Dancer::Handler::Standalone1.3400
Dancer::Hook1.3400
Dancer::Hook::Properties1.3400
Dancer::HTTP1.3400
Dancer::Logger1.3400
Dancer::Logger::Abstract1.3400
Dancer::Logger::Capture1.3400
Dancer::Logger::Capture::Trap1.3400
Dancer::Logger::Console1.3400
Dancer::Logger::Diag1.3400
Dancer::Logger::File1.3400
Dancer::Logger::LogReport1.27
Dancer::Logger::Note1.3400
Dancer::Logger::Null1.3400
Dancer::MIME1.3400
Dancer::ModuleLoader1.3400
Dancer::Object1.3400
Dancer::Object::Singleton1.3400
Dancer::Plugin1.3400
Dancer::Plugin::Ajax1.3400
Dancer::Renderer1.3400
Dancer::Request1.3400
Dancer::Request::Upload1.3400
Dancer::Response1.3400
Dancer::Route1.3400
Dancer::Route::Cache1.3400
Dancer::Route::Registry1.3400
Dancer::Serializer1.3400
Dancer::Serializer::Abstract1.3400
Dancer::Serializer::Dumper1.3400
Dancer::Serializer::JSON1.3400
Dancer::Serializer::JSONP1.3400
Dancer::Serializer::Mutable1.3400
Dancer::Serializer::XML1.3400
Dancer::Serializer::YAML1.3400
Dancer::Session1.3400
Dancer::Session::Abstract1.3400
Dancer::Session::Simple1.3400
Dancer::Session::YAML1.3400
Dancer::SharedData1.3400
Dancer::Template1.3400
Dancer::Template::Abstract1.3400
Dancer::Template::Simple1.3400
Dancer::Template::TemplateToolkit1.3400
Dancer::Test1.3400
Dancer::Timer1.3400
Data::Clone0.004
Data::Compare1.25
Data::Compare::Plugins::Scalar::Properties1
Data::Dump1.23
Data::Dump::FilterContextunknown
Data::Dump::Filteredunknown
Data::Dump::Streamer2.40
Data::Dump::Streamer::_::StringPrinter0.1
Data::Dump::Trace0.02
Data::Dumper::Again0.01
Data::Dumper::Concise2.023
Data::Dumper::Concise::Sugar2.023
Data::Dumper::Perltidy0.03
Data::Grove0.08
Data::Grove::Parent0.08
Data::Grove::Visitor0.08
Data::Munge0.097
Data::OptList0.110
Data::Page2.02
Data::Paginator0.08
Data::Paginator::Types0.08
Data::Perl0.002009
Data::Perl::Bool0.002009
Data::Perl::Bool::MooseLike0.001008
Data::Perl::Code0.002009
Data::Perl::Collection::Array0.002009
Data::Perl::Collection::Array::MooseLike0.001008
Data::Perl::Collection::Hash0.002009
Data::Perl::Collection::Hash::MooseLike0.001008
Data::Perl::Counter0.002009
Data::Perl::Number0.002009
Data::Perl::Number::MooseLike0.001008
Data::Perl::Role::Bool0.002009
Data::Perl::Role::Code0.002009
Data::Perl::Role::Collection::Array0.002009
Data::Perl::Role::Collection::Hash0.002009
Data::Perl::Role::Counter0.002009
Data::Perl::Role::Number0.002009
Data::Perl::Role::String0.002009
Data::Perl::String0.002009
Data::Perl::String::MooseLike0.001008
Data::PowerSet0.05
Data::Printer0.40
Data::Printer::Filterunknown
Data::Printer::Filter::DateTimeunknown
Data::Printer::Filter::DBunknown
Data::Printer::Filter::Digestunknown
Data::Section0.200007
Data::Stag0.14
Data::Stag::Arr2HTML0.14
Data::Stag::Base0.14
Data::Stag::BaseGeneratorunknown
Data::Stag::ChainHandler0.14
Data::Stag::DTDWriter0.14
Data::Stag::GraphHandler0.14
Data::Stag::HashDB0.14
Data::Stag::IndentParser0.14
Data::Stag::IndentWriter0.14
Data::Stag::ITextParser0.14
Data::Stag::ITextWriter0.14
Data::Stag::JSONWriter0.14
Data::Stag::null0.14
Data::Stag::PerlWriter0.14
Data::Stag::PodParser0.14
Data::Stag::SAX2Stag0.14
Data::Stag::Simple0.14
Data::Stag::StagDB0.14
Data::Stag::StagIunknown
Data::Stag::StagImpl0.14
Data::Stag::SxprParser0.14
Data::Stag::SxprWriter0.14
Data::Stag::Util0.14
Data::Stag::Writer0.14
Data::Stag::XMLParser0.14
Data::Stag::XMLWriter0.14
Data::Stag::XSLHandlerunknown
Data::Stag::XSLTHandlerunknown
Data::UUID1.221
Data::Validate::Domain0.14
Data::Validate::IP0.27
Data::Validate::URI0.07
Data::Visitor0.30
Data::Visitor::Callback0.30
Date::Format2.24
Date::Language1.10
Date::Language::Afar0.99
Date::Language::Amharic1.00
Date::Language::Austrian1.01
Date::Language::Brazilian1.01
Date::Language::Bulgarian1.01
Date::Language::Chinese1.00
Date::Language::Chinese_GB1.01
Date::Language::Czech1.01
Date::Language::Danish1.01
Date::Language::Dutch1.02
Date::Language::English1.01
Date::Language::Finnish1.01
Date::Language::French1.04
Date::Language::Gedeo0.99
Date::Language::German1.02
Date::Language::Greek1.00
Date::Language::Hungarian1.01
Date::Language::Icelandic1.01
Date::Language::Italian1.01
Date::Language::Norwegian1.01
Date::Language::Oromo0.99
Date::Language::Romanian1.01
Date::Language::Russian1.01
Date::Language::Russian_cp12511.01
Date::Language::Russian_koi8r1.01
Date::Language::Sidama0.99
Date::Language::Somali0.99
Date::Language::Spanish1.00
Date::Language::Swedish1.01
Date::Language::Tigrinya1.00
Date::Language::TigrinyaEritrean1.00
Date::Language::TigrinyaEthiopian1.00
Date::Language::Turkish1.0
Date::Parse2.30
Date::Tiny1.07
DateTime1.49
DateTime::Astro1.03
DateTime::Astrounknown
DateTime::Astrounknown
DateTime::Calendar::Chinese1.00
DateTime::Calendar::Japanese::Era0.08003
DateTime::Duration1.49
DateTime::Event::Chinese1.00
DateTime::Event::ICal0.13
DateTime::Event::SolarTermunknown
DateTime::Format::Builder0.81
DateTime::Format::Builder::Parser0.81
DateTime::Format::Builder::Parser::Dispatch0.81
DateTime::Format::Builder::Parser::generic0.81
DateTime::Format::Builder::Parser::Quick0.81
DateTime::Format::Builder::Parser::Regex0.81
DateTime::Format::Builder::Parser::Strptime0.81
DateTime::Format::DateParse0.05
DateTime::Format::Duration1.04
DateTime::Format::Epoch0.16
DateTime::Format::Epoch::ActiveDirectory0.13
DateTime::Format::Epoch::DotNet0.13
DateTime::Format::Epoch::JD0.13
DateTime::Format::Epoch::Lilian0.13
DateTime::Format::Epoch::MacOS0.13
DateTime::Format::Epoch::MJD0.13
DateTime::Format::Epoch::NTP0.14
DateTime::Format::Epoch::RataDie0.13
DateTime::Format::Epoch::RJD0.13
DateTime::Format::Epoch::TAI640.13
DateTime::Format::Epoch::TJD0.13
DateTime::Format::Epoch::Unix0.13
DateTime::Format::Flexible0.30
DateTime::Format::Flexible::langunknown
DateTime::Format::Flexible::lang::deunknown
DateTime::Format::Flexible::lang::enunknown
DateTime::Format::Flexible::lang::esunknown
DateTime::Format::ICal0.09
DateTime::Format::Mail0.403
DateTime::Format::Natural1.05
DateTime::Format::Natural::Calc1.41
DateTime::Format::Natural::Compat0.07
DateTime::Format::Natural::Duration0.06
DateTime::Format::Natural::Duration::Checks0.04
DateTime::Format::Natural::Expand0.03
DateTime::Format::Natural::Extract0.11
DateTime::Format::Natural::Formatted0.07
DateTime::Format::Natural::Helpers0.06
DateTime::Format::Natural::Lang::Base1.08
DateTime::Format::Natural::Lang::EN1.62
DateTime::Format::Natural::Rewrite0.06
DateTime::Format::Natural::Test0.10
DateTime::Format::Natural::Utils0.05
DateTime::Format::Natural::Wrappers0.03
DateTime::Format::Strptime1.75
DateTime::Format::Strptime::Types1.75
DateTime::Format::W3CDTF0.07
DateTime::Helpers1.49
DateTime::Infinite1.49
DateTime::LeapSecond1.49
DateTime::Locale1.22
DateTime::Locale::Base1.22
DateTime::Locale::Catalog1.22
DateTime::Locale::Data1.22
DateTime::Locale::FromData1.22
DateTime::Locale::Util1.22
DateTime::PP1.49
DateTime::PPExtra1.49
DateTime::Set0.3900
DateTime::Set::ICal0.19
DateTime::Spanunknown
DateTime::SpanSetunknown
DateTime::TimeZone2.19
DateTime::TimeZone::Africa::Abidjan2.19
DateTime::TimeZone::Africa::Accra2.19
DateTime::TimeZone::Africa::Algiers2.19
DateTime::TimeZone::Africa::Bissau2.19
DateTime::TimeZone::Africa::Cairo2.19
DateTime::TimeZone::Africa::Casablanca2.19
DateTime::TimeZone::Africa::Ceuta2.19
DateTime::TimeZone::Africa::El_Aaiun2.19
DateTime::TimeZone::Africa::Johannesburg2.19
DateTime::TimeZone::Africa::Juba2.19
DateTime::TimeZone::Africa::Khartoum2.19
DateTime::TimeZone::Africa::Lagos2.19
DateTime::TimeZone::Africa::Maputo2.19
DateTime::TimeZone::Africa::Monrovia2.19
DateTime::TimeZone::Africa::Nairobi2.19
DateTime::TimeZone::Africa::Ndjamena2.19
DateTime::TimeZone::Africa::Sao_Tome2.19
DateTime::TimeZone::Africa::Tripoli2.19
DateTime::TimeZone::Africa::Tunis2.19
DateTime::TimeZone::Africa::Windhoek2.19
DateTime::TimeZone::America::Adak2.19
DateTime::TimeZone::America::Anchorage2.19
DateTime::TimeZone::America::Araguaina2.19
DateTime::TimeZone::America::Argentina::Buenos_Aires2.19
DateTime::TimeZone::America::Argentina::Catamarca2.19
DateTime::TimeZone::America::Argentina::Cordoba2.19
DateTime::TimeZone::America::Argentina::Jujuy2.19
DateTime::TimeZone::America::Argentina::La_Rioja2.19
DateTime::TimeZone::America::Argentina::Mendoza2.19
DateTime::TimeZone::America::Argentina::Rio_Gallegos2.19
DateTime::TimeZone::America::Argentina::Salta2.19
DateTime::TimeZone::America::Argentina::San_Juan2.19
DateTime::TimeZone::America::Argentina::San_Luis2.19
DateTime::TimeZone::America::Argentina::Tucuman2.19
DateTime::TimeZone::America::Argentina::Ushuaia2.19
DateTime::TimeZone::America::Asuncion2.19
DateTime::TimeZone::America::Atikokan2.19
DateTime::TimeZone::America::Bahia2.19
DateTime::TimeZone::America::Bahia_Banderas2.19
DateTime::TimeZone::America::Barbados2.19
DateTime::TimeZone::America::Belem2.19
DateTime::TimeZone::America::Belize2.19
DateTime::TimeZone::America::Blanc_Sablon2.19
DateTime::TimeZone::America::Boa_Vista2.19
DateTime::TimeZone::America::Bogota2.19
DateTime::TimeZone::America::Boise2.19
DateTime::TimeZone::America::Cambridge_Bay2.19
DateTime::TimeZone::America::Campo_Grande2.19
DateTime::TimeZone::America::Cancun2.19
DateTime::TimeZone::America::Caracas2.19
DateTime::TimeZone::America::Cayenne2.19
DateTime::TimeZone::America::Chicago2.19
DateTime::TimeZone::America::Chihuahua2.19
DateTime::TimeZone::America::Costa_Rica2.19
DateTime::TimeZone::America::Creston2.19
DateTime::TimeZone::America::Cuiaba2.19
DateTime::TimeZone::America::Curacao2.19
DateTime::TimeZone::America::Danmarkshavn2.19
DateTime::TimeZone::America::Dawson2.19
DateTime::TimeZone::America::Dawson_Creek2.19
DateTime::TimeZone::America::Denver2.19
DateTime::TimeZone::America::Detroit2.19
DateTime::TimeZone::America::Edmonton2.19
DateTime::TimeZone::America::Eirunepe2.19
DateTime::TimeZone::America::El_Salvador2.19
DateTime::TimeZone::America::Fort_Nelson2.19
DateTime::TimeZone::America::Fortaleza2.19
DateTime::TimeZone::America::Glace_Bay2.19
DateTime::TimeZone::America::Godthab2.19
DateTime::TimeZone::America::Goose_Bay2.19
DateTime::TimeZone::America::Grand_Turk2.19
DateTime::TimeZone::America::Guatemala2.19
DateTime::TimeZone::America::Guayaquil2.19
DateTime::TimeZone::America::Guyana2.19
DateTime::TimeZone::America::Halifax2.19
DateTime::TimeZone::America::Havana2.19
DateTime::TimeZone::America::Hermosillo2.19
DateTime::TimeZone::America::Indiana::Indianapolis2.19
DateTime::TimeZone::America::Indiana::Knox2.19
DateTime::TimeZone::America::Indiana::Marengo2.19
DateTime::TimeZone::America::Indiana::Petersburg2.19
DateTime::TimeZone::America::Indiana::Tell_City2.19
DateTime::TimeZone::America::Indiana::Vevay2.19
DateTime::TimeZone::America::Indiana::Vincennes2.19
DateTime::TimeZone::America::Indiana::Winamac2.19
DateTime::TimeZone::America::Inuvik2.19
DateTime::TimeZone::America::Iqaluit2.19
DateTime::TimeZone::America::Jamaica2.19
DateTime::TimeZone::America::Juneau2.19
DateTime::TimeZone::America::Kentucky::Louisville2.19
DateTime::TimeZone::America::Kentucky::Monticello2.19
DateTime::TimeZone::America::La_Paz2.19
DateTime::TimeZone::America::Lima2.19
DateTime::TimeZone::America::Los_Angeles2.19
DateTime::TimeZone::America::Maceio2.19
DateTime::TimeZone::America::Managua2.19
DateTime::TimeZone::America::Manaus2.19
DateTime::TimeZone::America::Martinique2.19
DateTime::TimeZone::America::Matamoros2.19
DateTime::TimeZone::America::Mazatlan2.19
DateTime::TimeZone::America::Menominee2.19
DateTime::TimeZone::America::Merida2.19
DateTime::TimeZone::America::Metlakatla2.19
DateTime::TimeZone::America::Mexico_City2.19
DateTime::TimeZone::America::Miquelon2.19
DateTime::TimeZone::America::Moncton2.19
DateTime::TimeZone::America::Monterrey2.19
DateTime::TimeZone::America::Montevideo2.19
DateTime::TimeZone::America::Nassau2.19
DateTime::TimeZone::America::New_York2.19
DateTime::TimeZone::America::Nipigon2.19
DateTime::TimeZone::America::Nome2.19
DateTime::TimeZone::America::Noronha2.19
DateTime::TimeZone::America::North_Dakota::Beulah2.19
DateTime::TimeZone::America::North_Dakota::Center2.19
DateTime::TimeZone::America::North_Dakota::New_Salem2.19
DateTime::TimeZone::America::Ojinaga2.19
DateTime::TimeZone::America::Panama2.19
DateTime::TimeZone::America::Pangnirtung2.19
DateTime::TimeZone::America::Paramaribo2.19
DateTime::TimeZone::America::Phoenix2.19
DateTime::TimeZone::America::Port_au_Prince2.19
DateTime::TimeZone::America::Port_of_Spain2.19
DateTime::TimeZone::America::Porto_Velho2.19
DateTime::TimeZone::America::Puerto_Rico2.19
DateTime::TimeZone::America::Punta_Arenas2.19
DateTime::TimeZone::America::Rainy_River2.19
DateTime::TimeZone::America::Rankin_Inlet2.19
DateTime::TimeZone::America::Recife2.19
DateTime::TimeZone::America::Regina2.19
DateTime::TimeZone::America::Resolute2.19
DateTime::TimeZone::America::Rio_Branco2.19
DateTime::TimeZone::America::Santarem2.19
DateTime::TimeZone::America::Santiago2.19
DateTime::TimeZone::America::Santo_Domingo2.19
DateTime::TimeZone::America::Sao_Paulo2.19
DateTime::TimeZone::America::Scoresbysund2.19
DateTime::TimeZone::America::Sitka2.19
DateTime::TimeZone::America::St_Johns2.19
DateTime::TimeZone::America::Swift_Current2.19
DateTime::TimeZone::America::Tegucigalpa2.19
DateTime::TimeZone::America::Thule2.19
DateTime::TimeZone::America::Thunder_Bay2.19
DateTime::TimeZone::America::Tijuana2.19
DateTime::TimeZone::America::Toronto2.19
DateTime::TimeZone::America::Vancouver2.19
DateTime::TimeZone::America::Whitehorse2.19
DateTime::TimeZone::America::Winnipeg2.19
DateTime::TimeZone::America::Yakutat2.19
DateTime::TimeZone::America::Yellowknife2.19
DateTime::TimeZone::Antarctica::Casey2.19
DateTime::TimeZone::Antarctica::Davis2.19
DateTime::TimeZone::Antarctica::DumontDUrville2.19
DateTime::TimeZone::Antarctica::Macquarie2.19
DateTime::TimeZone::Antarctica::Mawson2.19
DateTime::TimeZone::Antarctica::Palmer2.19
DateTime::TimeZone::Antarctica::Rothera2.19
DateTime::TimeZone::Antarctica::Syowa2.19
DateTime::TimeZone::Antarctica::Troll2.19
DateTime::TimeZone::Antarctica::Vostok2.19
DateTime::TimeZone::Asia::Almaty2.19
DateTime::TimeZone::Asia::Amman2.19
DateTime::TimeZone::Asia::Anadyr2.19
DateTime::TimeZone::Asia::Aqtau2.19
DateTime::TimeZone::Asia::Aqtobe2.19
DateTime::TimeZone::Asia::Ashgabat2.19
DateTime::TimeZone::Asia::Atyrau2.19
DateTime::TimeZone::Asia::Baghdad2.19
DateTime::TimeZone::Asia::Baku2.19
DateTime::TimeZone::Asia::Bangkok2.19
DateTime::TimeZone::Asia::Barnaul2.19
DateTime::TimeZone::Asia::Beirut2.19
DateTime::TimeZone::Asia::Bishkek2.19
DateTime::TimeZone::Asia::Brunei2.19
DateTime::TimeZone::Asia::Chita2.19
DateTime::TimeZone::Asia::Choibalsan2.19
DateTime::TimeZone::Asia::Colombo2.19
DateTime::TimeZone::Asia::Damascus2.19
DateTime::TimeZone::Asia::Dhaka2.19
DateTime::TimeZone::Asia::Dili2.19
DateTime::TimeZone::Asia::Dubai2.19
DateTime::TimeZone::Asia::Dushanbe2.19
DateTime::TimeZone::Asia::Famagusta2.19
DateTime::TimeZone::Asia::Gaza2.19
DateTime::TimeZone::Asia::Hebron2.19
DateTime::TimeZone::Asia::Ho_Chi_Minh2.19
DateTime::TimeZone::Asia::Hong_Kong2.19
DateTime::TimeZone::Asia::Hovd2.19
DateTime::TimeZone::Asia::Irkutsk2.19
DateTime::TimeZone::Asia::Jakarta2.19
DateTime::TimeZone::Asia::Jayapura2.19
DateTime::TimeZone::Asia::Jerusalem2.19
DateTime::TimeZone::Asia::Kabul2.19
DateTime::TimeZone::Asia::Kamchatka2.19
DateTime::TimeZone::Asia::Karachi2.19
DateTime::TimeZone::Asia::Kathmandu2.19
DateTime::TimeZone::Asia::Khandyga2.19
DateTime::TimeZone::Asia::Kolkata2.19
DateTime::TimeZone::Asia::Krasnoyarsk2.19
DateTime::TimeZone::Asia::Kuala_Lumpur2.19
DateTime::TimeZone::Asia::Kuching2.19
DateTime::TimeZone::Asia::Macau2.19
DateTime::TimeZone::Asia::Magadan2.19
DateTime::TimeZone::Asia::Makassar2.19
DateTime::TimeZone::Asia::Manila2.19
DateTime::TimeZone::Asia::Nicosia2.19
DateTime::TimeZone::Asia::Novokuznetsk2.19
DateTime::TimeZone::Asia::Novosibirsk2.19
DateTime::TimeZone::Asia::Omsk2.19
DateTime::TimeZone::Asia::Oral2.19
DateTime::TimeZone::Asia::Pontianak2.19
DateTime::TimeZone::Asia::Pyongyang2.19
DateTime::TimeZone::Asia::Qatar2.19
DateTime::TimeZone::Asia::Qyzylorda2.19
DateTime::TimeZone::Asia::Riyadh2.19
DateTime::TimeZone::Asia::Sakhalin2.19
DateTime::TimeZone::Asia::Samarkand2.19
DateTime::TimeZone::Asia::Seoul2.19
DateTime::TimeZone::Asia::Shanghai2.19
DateTime::TimeZone::Asia::Singapore2.19
DateTime::TimeZone::Asia::Srednekolymsk2.19
DateTime::TimeZone::Asia::Taipei2.19
DateTime::TimeZone::Asia::Tashkent2.19
DateTime::TimeZone::Asia::Tbilisi2.19
DateTime::TimeZone::Asia::Tehran2.19
DateTime::TimeZone::Asia::Thimphu2.19
DateTime::TimeZone::Asia::Tokyo2.19
DateTime::TimeZone::Asia::Tomsk2.19
DateTime::TimeZone::Asia::Ulaanbaatar2.19
DateTime::TimeZone::Asia::Urumqi2.19
DateTime::TimeZone::Asia::Ust_Nera2.19
DateTime::TimeZone::Asia::Vladivostok2.19
DateTime::TimeZone::Asia::Yakutsk2.19
DateTime::TimeZone::Asia::Yangon2.19
DateTime::TimeZone::Asia::Yekaterinburg2.19
DateTime::TimeZone::Asia::Yerevan2.19
DateTime::TimeZone::Atlantic::Azores2.19
DateTime::TimeZone::Atlantic::Bermuda2.19
DateTime::TimeZone::Atlantic::Canary2.19
DateTime::TimeZone::Atlantic::Cape_Verde2.19
DateTime::TimeZone::Atlantic::Faroe2.19
DateTime::TimeZone::Atlantic::Madeira2.19
DateTime::TimeZone::Atlantic::Reykjavik2.19
DateTime::TimeZone::Atlantic::South_Georgia2.19
DateTime::TimeZone::Atlantic::Stanley2.19
DateTime::TimeZone::Australia::Adelaide2.19
DateTime::TimeZone::Australia::Brisbane2.19
DateTime::TimeZone::Australia::Broken_Hill2.19
DateTime::TimeZone::Australia::Currie2.19
DateTime::TimeZone::Australia::Darwin2.19
DateTime::TimeZone::Australia::Eucla2.19
DateTime::TimeZone::Australia::Hobart2.19
DateTime::TimeZone::Australia::Lindeman2.19
DateTime::TimeZone::Australia::Lord_Howe2.19
DateTime::TimeZone::Australia::Melbourne2.19
DateTime::TimeZone::Australia::Perth2.19
DateTime::TimeZone::Australia::Sydney2.19
DateTime::TimeZone::Catalog2.19
DateTime::TimeZone::CET2.19
DateTime::TimeZone::CST6CDT2.19
DateTime::TimeZone::EET2.19
DateTime::TimeZone::EST2.19
DateTime::TimeZone::EST5EDT2.19
DateTime::TimeZone::Europe::Amsterdam2.19
DateTime::TimeZone::Europe::Andorra2.19
DateTime::TimeZone::Europe::Astrakhan2.19
DateTime::TimeZone::Europe::Athens2.19
DateTime::TimeZone::Europe::Belgrade2.19
DateTime::TimeZone::Europe::Berlin2.19
DateTime::TimeZone::Europe::Brussels2.19
DateTime::TimeZone::Europe::Bucharest2.19
DateTime::TimeZone::Europe::Budapest2.19
DateTime::TimeZone::Europe::Chisinau2.19
DateTime::TimeZone::Europe::Copenhagen2.19
DateTime::TimeZone::Europe::Dublin2.19
DateTime::TimeZone::Europe::Gibraltar2.19
DateTime::TimeZone::Europe::Helsinki2.19
DateTime::TimeZone::Europe::Istanbul2.19
DateTime::TimeZone::Europe::Kaliningrad2.19
DateTime::TimeZone::Europe::Kiev2.19
DateTime::TimeZone::Europe::Kirov2.19
DateTime::TimeZone::Europe::Lisbon2.19
DateTime::TimeZone::Europe::London2.19
DateTime::TimeZone::Europe::Luxembourg2.19
DateTime::TimeZone::Europe::Madrid2.19
DateTime::TimeZone::Europe::Malta2.19
DateTime::TimeZone::Europe::Minsk2.19
DateTime::TimeZone::Europe::Monaco2.19
DateTime::TimeZone::Europe::Moscow2.19
DateTime::TimeZone::Europe::Oslo2.19
DateTime::TimeZone::Europe::Paris2.19
DateTime::TimeZone::Europe::Prague2.19
DateTime::TimeZone::Europe::Riga2.19
DateTime::TimeZone::Europe::Rome2.19
DateTime::TimeZone::Europe::Samara2.19
DateTime::TimeZone::Europe::Saratov2.19
DateTime::TimeZone::Europe::Simferopol2.19
DateTime::TimeZone::Europe::Sofia2.19
DateTime::TimeZone::Europe::Stockholm2.19
DateTime::TimeZone::Europe::Tallinn2.19
DateTime::TimeZone::Europe::Tirane2.19
DateTime::TimeZone::Europe::Ulyanovsk2.19
DateTime::TimeZone::Europe::Uzhgorod2.19
DateTime::TimeZone::Europe::Vienna2.19
DateTime::TimeZone::Europe::Vilnius2.19
DateTime::TimeZone::Europe::Volgograd2.19
DateTime::TimeZone::Europe::Warsaw2.19
DateTime::TimeZone::Europe::Zaporozhye2.19
DateTime::TimeZone::Europe::Zurich2.19
DateTime::TimeZone::Floating2.19
DateTime::TimeZone::HST2.19
DateTime::TimeZone::Indian::Chagos2.19
DateTime::TimeZone::Indian::Christmas2.19
DateTime::TimeZone::Indian::Cocos2.19
DateTime::TimeZone::Indian::Kerguelen2.19
DateTime::TimeZone::Indian::Mahe2.19
DateTime::TimeZone::Indian::Maldives2.19
DateTime::TimeZone::Indian::Mauritius2.19
DateTime::TimeZone::Indian::Reunion2.19
DateTime::TimeZone::Local2.19
DateTime::TimeZone::Local::Android2.19
DateTime::TimeZone::Local::Unix2.19
DateTime::TimeZone::Local::VMS2.19
DateTime::TimeZone::MET2.19
DateTime::TimeZone::MST2.19
DateTime::TimeZone::MST7MDT2.19
DateTime::TimeZone::OffsetOnly2.19
DateTime::TimeZone::OlsonDB2.19
DateTime::TimeZone::OlsonDB::Change2.19
DateTime::TimeZone::OlsonDB::Observance2.19
DateTime::TimeZone::OlsonDB::Rule2.19
DateTime::TimeZone::OlsonDB::Zone2.19
DateTime::TimeZone::Pacific::Apia2.19
DateTime::TimeZone::Pacific::Auckland2.19
DateTime::TimeZone::Pacific::Bougainville2.19
DateTime::TimeZone::Pacific::Chatham2.19
DateTime::TimeZone::Pacific::Chuuk2.19
DateTime::TimeZone::Pacific::Easter2.19
DateTime::TimeZone::Pacific::Efate2.19
DateTime::TimeZone::Pacific::Enderbury2.19
DateTime::TimeZone::Pacific::Fakaofo2.19
DateTime::TimeZone::Pacific::Fiji2.19
DateTime::TimeZone::Pacific::Funafuti2.19
DateTime::TimeZone::Pacific::Galapagos2.19
DateTime::TimeZone::Pacific::Gambier2.19
DateTime::TimeZone::Pacific::Guadalcanal2.19
DateTime::TimeZone::Pacific::Guam2.19
DateTime::TimeZone::Pacific::Honolulu2.19
DateTime::TimeZone::Pacific::Kiritimati2.19
DateTime::TimeZone::Pacific::Kosrae2.19
DateTime::TimeZone::Pacific::Kwajalein2.19
DateTime::TimeZone::Pacific::Majuro2.19
DateTime::TimeZone::Pacific::Marquesas2.19
DateTime::TimeZone::Pacific::Nauru2.19
DateTime::TimeZone::Pacific::Niue2.19
DateTime::TimeZone::Pacific::Norfolk2.19
DateTime::TimeZone::Pacific::Noumea2.19
DateTime::TimeZone::Pacific::Pago_Pago2.19
DateTime::TimeZone::Pacific::Palau2.19
DateTime::TimeZone::Pacific::Pitcairn2.19
DateTime::TimeZone::Pacific::Pohnpei2.19
DateTime::TimeZone::Pacific::Port_Moresby2.19
DateTime::TimeZone::Pacific::Rarotonga2.19
DateTime::TimeZone::Pacific::Tahiti2.19
DateTime::TimeZone::Pacific::Tarawa2.19
DateTime::TimeZone::Pacific::Tongatapu2.19
DateTime::TimeZone::Pacific::Wake2.19
DateTime::TimeZone::Pacific::Wallis2.19
DateTime::TimeZone::PST8PDT2.19
DateTime::TimeZone::UTC2.19
DateTime::TimeZone::WET2.19
DateTime::Tiny1.07
DateTime::Types1.49
DateTimeX::Easy0.089
DBunknown
DBD::DBM0.08
DBD::ExampleP12.014311
DBD::File0.44
DBD::Gofer0.015327
DBD::Gofer::Policy::Base0.010088
DBD::Gofer::Policy::classic0.010088
DBD::Gofer::Policy::pedantic0.010088
DBD::Gofer::Policy::rush0.010088
DBD::Gofer::Transport::Base0.014121
DBD::Gofer::Transport::corostreamunknown
DBD::Gofer::Transport::null0.010088
DBD::Gofer::Transport::pipeone0.010088
DBD::Gofer::Transport::stream0.014599
DBD::Mem0.001
DBD::mysql4.046
DBD::mysql::GetInfounknown
DBD::NullP12.014715
DBD::Proxy0.2004
DBD::Sponge12.010003
DBD::SQLite1.58
DBD::SQLite::Constantsunknown
DBD::SQLite::VirtualTable1.58
DBD::SQLite::VirtualTable::FileContentunknown
DBD::SQLite::VirtualTable::PerlDataunknown
DBIunknown
DBI::Const::GetInfo::ANSI2.008697
DBI::Const::GetInfo::ODBC2.011374
DBI::Const::GetInfoReturn2.008697
DBI::Const::GetInfoType2.008697
DBI::DBD12.015129
DBI::DBD::Metadata2.014214
DBI::DBD::SqlEngine0.06
DBI::FAQ1.014935
DBI::Gofer::Execute0.014283
DBI::Gofer::Request0.012537
DBI::Gofer::Response0.011566
DBI::Gofer::Serializer::Base0.009950
DBI::Gofer::Serializer::DataDumper0.009950
DBI::Gofer::Serializer::Storable0.015586
DBI::Gofer::Transport::Base0.012537
DBI::Gofer::Transport::pipeone0.012537
DBI::Gofer::Transport::stream0.012537
DBI::Profile2.015065
DBI::ProfileData2.010008
DBI::ProfileDumper2.015325
DBI::ProfileDumper::Apache2.014121
DBI::ProfileSubs0.009396
DBI::ProxyServer0.3005
DBI::SQL::Nano1.015544
DBI::Util::_accessor0.009479
DBI::Util::CacheMemory0.010315
DBIx::Class0.082841
DBIx::Class::AccessorGroupunknown
DBIx::Class::Adminunknown
DBIx::Class::CDBICompatunknown
DBIx::Class::CDBICompat::Iteratorunknown
DBIx::Class::CDBICompat::SQLTransformerunknown
DBIx::Class::CDBICompat::Tied::ColumnValueunknown
DBIx::Class::Coreunknown
DBIx::Class::Cursorunknown
DBIx::Class::Cursor::Cached1.001004
DBIx::Class::DBunknown
DBIx::Class::Exceptionunknown
DBIx::Class::FilterColumnunknown
DBIx::Class::InflateColumnunknown
DBIx::Class::InflateColumn::DateTimeunknown
DBIx::Class::InflateColumn::Fileunknown
DBIx::Class::Optional::Dependenciesunknown
DBIx::Class::Orderedunknown
DBIx::Class::PKunknown
DBIx::Class::PK::Autounknown
DBIx::Class::Relationshipunknown
DBIx::Class::Relationship::Baseunknown
DBIx::Class::ResultClass::HashRefInflatorunknown
DBIx::Class::ResultSetunknown
DBIx::Class::ResultSetColumnunknown
DBIx::Class::ResultSetManagerunknown
DBIx::Class::ResultSourceunknown
DBIx::Class::ResultSource::Tableunknown
DBIx::Class::ResultSource::Viewunknown
DBIx::Class::ResultSourceHandleunknown
DBIx::Class::ResultSourceProxy::Tableunknown
DBIx::Class::Rowunknown
DBIx::Class::Schemaunknown
DBIx::Class::Schema::Loader0.07049
DBIx::Class::Schema::Loader::Base0.07049
DBIx::Class::Schema::Loader::Columnunknown
DBIx::Class::Schema::Loader::DBI0.07049
DBIx::Class::Schema::Loader::DBI::ADO0.07049
DBIx::Class::Schema::Loader::DBI::ADO::Microsoft_SQL_Server0.07049
DBIx::Class::Schema::Loader::DBI::ADO::MS_Jet0.07049
DBIx::Class::Schema::Loader::DBI::Component::QuotedDefault0.07049
DBIx::Class::Schema::Loader::DBI::DB20.07049
DBIx::Class::Schema::Loader::DBI::Firebird0.07049
DBIx::Class::Schema::Loader::DBI::Informix0.07049
DBIx::Class::Schema::Loader::DBI::InterBase0.07049
DBIx::Class::Schema::Loader::DBI::MSSQL0.07049
DBIx::Class::Schema::Loader::DBI::mysql0.07049
DBIx::Class::Schema::Loader::DBI::ODBC0.07049
DBIx::Class::Schema::Loader::DBI::ODBC::ACCESS0.07049
DBIx::Class::Schema::Loader::DBI::ODBC::Firebird0.07049
DBIx::Class::Schema::Loader::DBI::ODBC::Microsoft_SQL_Server0.07049
DBIx::Class::Schema::Loader::DBI::ODBC::SQL_Anywhere0.07049
DBIx::Class::Schema::Loader::DBI::Oracle0.07049
DBIx::Class::Schema::Loader::DBI::Pg0.07049
DBIx::Class::Schema::Loader::DBI::SQLAnywhere0.07049
DBIx::Class::Schema::Loader::DBI::SQLite0.07049
DBIx::Class::Schema::Loader::DBI::Sybase0.07049
DBIx::Class::Schema::Loader::DBI::Sybase::Common0.07049
DBIx::Class::Schema::Loader::DBI::Sybase::Microsoft_SQL_Server0.07049
DBIx::Class::Schema::Loader::DBI::Writing0.07049
DBIx::Class::Schema::Loader::DBObjectunknown
DBIx::Class::Schema::Loader::DBObject::Informixunknown
DBIx::Class::Schema::Loader::DBObject::Sybaseunknown
DBIx::Class::Schema::Loader::Optional::Dependenciesunknown
DBIx::Class::Schema::Loader::RelBuilder0.07049
DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_0400.07049
DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_050.07049
DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_060.07049
DBIx::Class::Schema::Loader::RelBuilder::Compat::v0_070.07049
DBIx::Class::Schema::Loader::Tableunknown
DBIx::Class::Schema::Loader::Table::Informixunknown
DBIx::Class::Schema::Loader::Table::Sybaseunknown
DBIx::Class::Serialize::Storableunknown
DBIx::Class::SQLMakerunknown
DBIx::Class::SQLMaker::LimitDialectsunknown
DBIx::Class::SQLMaker::OracleJoinsunknown
DBIx::Class::StartupCheckunknown
DBIx::Class::Storageunknown
DBIx::Class::Storage::DBIunknown
DBIx::Class::Storage::DBI::ACCESSunknown
DBIx::Class::Storage::DBI::ADOunknown
DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Serverunknown
DBIx::Class::Storage::DBI::ADO::Microsoft_SQL_Server::Cursorunknown
DBIx::Class::Storage::DBI::ADO::MS_Jetunknown
DBIx::Class::Storage::DBI::ADO::MS_Jet::Cursorunknown
DBIx::Class::Storage::DBI::AutoCastunknown
DBIx::Class::Storage::DBI::Cursorunknown
DBIx::Class::Storage::DBI::DB2unknown
DBIx::Class::Storage::DBI::Firebirdunknown
DBIx::Class::Storage::DBI::Firebird::Commonunknown
DBIx::Class::Storage::DBI::IdentityInsertunknown
DBIx::Class::Storage::DBI::Informixunknown
DBIx::Class::Storage::DBI::InterBaseunknown
DBIx::Class::Storage::DBI::MSSQLunknown
DBIx::Class::Storage::DBI::mysqlunknown
DBIx::Class::Storage::DBI::NoBindVarsunknown
DBIx::Class::Storage::DBI::ODBCunknown
DBIx::Class::Storage::DBI::ODBC::ACCESSunknown
DBIx::Class::Storage::DBI::ODBC::DB2_400_SQLunknown
DBIx::Class::Storage::DBI::ODBC::Firebirdunknown
DBIx::Class::Storage::DBI::ODBC::Microsoft_SQL_Serverunknown
DBIx::Class::Storage::DBI::ODBC::SQL_Anywhereunknown
DBIx::Class::Storage::DBI::Oracleunknown
DBIx::Class::Storage::DBI::Oracle::Genericunknown
DBIx::Class::Storage::DBI::Oracle::WhereJoinsunknown
DBIx::Class::Storage::DBI::Pgunknown
DBIx::Class::Storage::DBI::Replicatedunknown
DBIx::Class::Storage::DBI::Replicated::Balancerunknown
DBIx::Class::Storage::DBI::Replicated::Balancer::Firstunknown
DBIx::Class::Storage::DBI::Replicated::Balancer::Randomunknown
DBIx::Class::Storage::DBI::Replicated::Poolunknown
DBIx::Class::Storage::DBI::Replicated::Replicantunknown
DBIx::Class::Storage::DBI::Replicated::WithDSNunknown
DBIx::Class::Storage::DBI::SQLAnywhereunknown
DBIx::Class::Storage::DBI::SQLAnywhere::Cursorunknown
DBIx::Class::Storage::DBI::SQLiteunknown
DBIx::Class::Storage::DBI::Sybaseunknown
DBIx::Class::Storage::DBI::Sybase::ASEunknown
DBIx::Class::Storage::DBI::Sybase::ASE::NoBindVarsunknown
DBIx::Class::Storage::DBI::Sybase::FreeTDSunknown
DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Serverunknown
DBIx::Class::Storage::DBI::Sybase::Microsoft_SQL_Server::NoBindVarsunknown
DBIx::Class::Storage::DBI::Sybase::MSSQLunknown
DBIx::Class::Storage::DBI::UniqueIdentifierunknown
DBIx::Class::Storage::Debug::PrettyPrintunknown
DBIx::Class::Storage::Statisticsunknown
DBIx::Class::Storage::TxnScopeGuardunknown
DBIx::Class::UTF8Columnsunknown
DBIx::Connector0.56
DBIx::Connector::Driver0.56
DBIx::Connector::Driver::Firebird0.56
DBIx::Connector::Driver::MSSQL0.56
DBIx::Connector::Driver::mysql0.56
DBIx::Connector::Driver::Oracle0.56
DBIx::Connector::Driver::Pg0.56
DBIx::Connector::Driver::SQLite0.56
DDPunknown
Declare::Constraints::Simple0.03
Declare::Constraints::Simple::Libraryunknown
Declare::Constraints::Simple::Library::Arrayunknown
Declare::Constraints::Simple::Library::Baseunknown
Declare::Constraints::Simple::Library::Exportableunknown
Declare::Constraints::Simple::Library::Generalunknown
Declare::Constraints::Simple::Library::Hashunknown
Declare::Constraints::Simple::Library::Numericalunknown
Declare::Constraints::Simple::Library::OOunknown
Declare::Constraints::Simple::Library::Operatorsunknown
Declare::Constraints::Simple::Library::Referencialunknown
Declare::Constraints::Simple::Library::Scalarunknown
Declare::Constraints::Simple::Resultunknown
Devel::AssertC99unknown
Devel::AssertOS1.21
Devel::AssertOS::AIX1.2
Devel::AssertOS::Amiga1.2
Devel::AssertOS::Android1.2
Devel::AssertOS::Apple1.3
Devel::AssertOS::BeOS1.4
Devel::AssertOS::Bitrig1.0
Devel::AssertOS::BSDOS1.2
Devel::AssertOS::Cygwin1.3
Devel::AssertOS::DEC1.4
Devel::AssertOS::DGUX1.2
Devel::AssertOS::DragonflyBSD1.2
Devel::AssertOS::Dynix1.2
Devel::AssertOS::EBCDIC1.0
Devel::AssertOS::FreeBSD1.2
Devel::AssertOS::GNUkFreeBSD1.1
Devel::AssertOS::Haiku1.1
Devel::AssertOS::HPUX1.2
Devel::AssertOS::Hurd1.0
Devel::AssertOS::Interix1.2
Devel::AssertOS::iOS1.0
Devel::AssertOS::Irix1.2
Devel::AssertOS::Linux1.3
Devel::AssertOS::Linux::Debian1.0
Devel::AssertOS::Linux::v2_61.3
Devel::AssertOS::MachTen1.2
Devel::AssertOS::MacOSclassic1.2
Devel::AssertOS::MacOSX1.2
Devel::AssertOS::MacOSX::v10_01.0
Devel::AssertOS::MacOSX::v10_11.0
Devel::AssertOS::MacOSX::v10_101.0
Devel::AssertOS::MacOSX::v10_111.0
Devel::AssertOS::MacOSX::v10_121.0
Devel::AssertOS::MacOSX::v10_21.0
Devel::AssertOS::MacOSX::v10_31.0
Devel::AssertOS::MacOSX::v10_41.4
Devel::AssertOS::MacOSX::v10_51.0
Devel::AssertOS::MacOSX::v10_61.0
Devel::AssertOS::MacOSX::v10_71.0
Devel::AssertOS::MacOSX::v10_81.0
Devel::AssertOS::MacOSX::v10_91.0
Devel::AssertOS::MicrosoftWindows1.3
Devel::AssertOS::MidnightBSD1.1
Devel::AssertOS::Minix1.0
Devel::AssertOS::MirOSBSD1.2
Devel::AssertOS::MPEiX1.2
Devel::AssertOS::MSDOS1.2
Devel::AssertOS::MSWin321.3
Devel::AssertOS::NetBSD1.2
Devel::AssertOS::Netware1.2
Devel::AssertOS::NeXT1.2
Devel::AssertOS::OpenBSD1.2
Devel::AssertOS::OS21.1
Devel::AssertOS::OS3901.2
Devel::AssertOS::OS4001.2
Devel::AssertOS::OSF1.2
Devel::AssertOS::OSFeatures::POSIXShellRedirection1.4
Devel::AssertOS::POSIXBC1.2
Devel::AssertOS::QNX1.2
Devel::AssertOS::QNX::Neutrino1.1
Devel::AssertOS::QNX::v41.1
Devel::AssertOS::Realtime1.2
Devel::AssertOS::RISCOS1.2
Devel::AssertOS::SCO1.2
Devel::AssertOS::Solaris1.2
Devel::AssertOS::Sun1.3
Devel::AssertOS::SunOS1.2
Devel::AssertOS::SysVr41.2
Devel::AssertOS::SysVr51.2
Devel::AssertOS::Unicos1.2
Devel::AssertOS::Unix1.6
Devel::AssertOS::VMESA1.2
Devel::AssertOS::VMS1.2
Devel::AssertOS::VOS1.2
Devel::Caller2.06
Devel::CheckBin0.04
Devel::CheckCompiler0.07
Devel::CheckLib1.13
Devel::CheckOS1.81
Devel::Confess0.009004
Devel::Confess::_Utilunknown
Devel::Confess::Builtin0.009004
Devel::Confess::Sourceunknown
Devel::Cover1.30
Devel::Cover::Annotation::Git1.30
Devel::Cover::Annotation::Random1.30
Devel::Cover::Annotation::Svk1.30
Devel::Cover::Branch1.30
Devel::Cover::Collection1.30
Devel::Cover::Condition1.30
Devel::Cover::Condition_and_21.30
Devel::Cover::Condition_and_31.30
Devel::Cover::Condition_or_21.30
Devel::Cover::Condition_or_31.30
Devel::Cover::Condition_xor_41.30
Devel::Cover::Criterion1.30
Devel::Cover::DB1.30
Devel::Cover::DB::Criterion1.30
Devel::Cover::DB::Digests1.30
Devel::Cover::DB::File1.30
Devel::Cover::DB::IO1.30
Devel::Cover::DB::IO::Base1.30
Devel::Cover::DB::IO::JSON1.30
Devel::Cover::DB::IO::Sereal1.30
Devel::Cover::DB::IO::Storable1.30
Devel::Cover::DB::Structure1.30
Devel::Cover::Html_Common1.30
Devel::Cover::Inc1.30
Devel::Cover::Op1.30
Devel::Cover::Pod1.30
Devel::Cover::Report::Compilation1.30
Devel::Cover::Report::Html1.30
Devel::Cover::Report::Html_basic1.30
Devel::Cover::Report::Html_minimal1.30
Devel::Cover::Report::Html_subtle1.30
Devel::Cover::Report::Json1.30
Devel::Cover::Report::Sort1.30
Devel::Cover::Report::Text1.30
Devel::Cover::Report::Text21.30
Devel::Cover::Report::Vim1.30
Devel::Cover::Statement1.30
Devel::Cover::Subroutine1.30
Devel::Cover::Test1.30
Devel::Cover::Time1.30
Devel::Cover::Util1.30
Devel::Cover::Web1.30
Devel::Cycle1.12
Devel::Declare0.006019
Devel::Declare::Context::Simple0.006019
Devel::Declare::MethodInstaller::Simple0.006019
Devel::Dwarnunknown
Devel::FindPerl0.014
Devel::GlobalDestruction0.14
Devel::GlobalPhase0.003003
Devel::GraphVizProf2.24
Devel::Hide0.0010
Devel::InnerPackage0.4
Devel::Leak0.03
Devel::LexAlias0.05
Devel::MAT0.36
Devel::MAT::Context0.36
Devel::MAT::Dumper0.36
Devel::MAT::Dumpfile0.36
Devel::MAT::Graph0.36
Devel::MAT::InternalTools0.36
Devel::MAT::SV0.36
Devel::MAT::Tool0.36
Devel::MAT::Tool::Callstack0.36
Devel::MAT::Tool::Count0.36
Devel::MAT::Tool::Find0.36
Devel::MAT::Tool::Identify0.36
Devel::MAT::Tool::Inrefs0.36
Devel::MAT::Tool::IO0.36
Devel::MAT::Tool::Outrefs0.36
Devel::MAT::Tool::Reachability0.36
Devel::MAT::Tool::Roots0.36
Devel::MAT::Tool::Show0.36
Devel::MAT::Tool::Sizes0.36
Devel::MAT::Tool::Symbols0.36
Devel::NYTProf6.06
Devel::NYTProf::Apache4.00
Devel::NYTProf::Constantsunknown
Devel::NYTProf::Core6.06
Devel::NYTProf::Data4.02
Devel::NYTProf::FileHandleunknown
Devel::NYTProf::FileInfounknown
Devel::NYTProf::Reader4.06
Devel::NYTProf::ReadStream4.00
Devel::NYTProf::Rununknown
Devel::NYTProf::SubCallInfounknown
Devel::NYTProf::SubInfounknown
Devel::NYTProf::Util4.00
Devel::OverloadInfo0.005
Devel::PartialDump0.20
Devel::PatchPerl1.52
Devel::PatchPerl::Hints1.52
Devel::PatchPerl::Plugin1.52
Devel::REPL1.003028
Devel::REPL::Error1.003028
Devel::REPL::Meta::Plugin1.003028
Devel::REPL::Plugin1.003028
Devel::REPL::Plugin::B::Concise1.003028
Devel::REPL::Plugin::Carp::REPL0.18
Devel::REPL::Plugin::Colors1.003028
Devel::REPL::Plugin::Commands1.003028
Devel::REPL::Plugin::Completion1.003028
Devel::REPL::Plugin::CompletionDriver::Globals1.003028
Devel::REPL::Plugin::CompletionDriver::INC1.003028
Devel::REPL::Plugin::CompletionDriver::Keywords1.003028
Devel::REPL::Plugin::CompletionDriver::LexEnv1.003028
Devel::REPL::Plugin::CompletionDriver::Methods1.003028
Devel::REPL::Plugin::CompletionDriver::Turtles1.003028
Devel::REPL::Plugin::DDC1.003028
Devel::REPL::Plugin::DDS1.003028
Devel::REPL::Plugin::DumpHistory1.003028
Devel::REPL::Plugin::FancyPrompt1.003028
Devel::REPL::Plugin::FindVariable1.003028
Devel::REPL::Plugin::History1.003028
Devel::REPL::Plugin::Interrupt1.003028
Devel::REPL::Plugin::LexEnv1.003028
Devel::REPL::Plugin::MultiLine::PPI1.003028
Devel::REPL::Plugin::Nopaste1.003028
Devel::REPL::Plugin::OutputCache1.003028
Devel::REPL::Plugin::Packages1.003028
Devel::REPL::Plugin::Peek1.003028
Devel::REPL::Plugin::PPI1.003028
Devel::REPL::Plugin::ReadLineHistory1.003028
Devel::REPL::Plugin::Refresh1.003028
Devel::REPL::Plugin::Selenium1.36
Devel::REPL::Plugin::ShowClass1.003028
Devel::REPL::Plugin::Timing1.003028
Devel::REPL::Plugin::Turtles1.003028
Devel::REPL::Profile1.003028
Devel::REPL::Profile::Default1.003028
Devel::REPL::Profile::Minimal1.003028
Devel::REPL::Profile::Standard1.003028
Devel::REPL::Script1.003028
Devel::StackTrace2.03
Devel::StackTrace::AsHTML0.15
Devel::StackTrace::Frame2.03
Devel::StackTrace::WithLexicals2.01
Devel::StackTrace::WithLexicals::Frameunknown
Devel::Symdump2.18
Devel::Symdump::Exportunknown
Devel::TypeTiny::Perl56Compat1.002002
Devel::TypeTiny::Perl58Compat1.002002
Digest::HMAC1.03
Digest::HMAC_MD51.01
Digest::HMAC_SHA11.03
Digest::JHash0.10
Digest::Perl::MD51.9
Digest::SHA12.13
Dist::CheckConflicts0.11
Dist::Metadata0.927
Dist::Metadata::Archive0.927
Dist::Metadata::Dir0.927
Dist::Metadata::Dist0.927
Dist::Metadata::Struct0.927
Dist::Metadata::Tar0.927
Dist::Metadata::Zip0.927
Dist::Zilla::PluginBundle::Exampleunknown
Email::Abstract3.008
Email::Abstract::EmailMIME3.008
Email::Abstract::EmailSimple3.008
Email::Abstract::MailInternet3.008
Email::Abstract::MailMessage3.008
Email::Abstract::MIMEEntity3.008
Email::Abstract::Plugin3.008
Email::Address1.909
Email::Address::XS1.04
Email::Date::Format1.005
Email::MessageID1.406
Email::MIME1.946
Email::MIME::ContentType1.022
Email::MIME::Creator1.946
Email::MIME::Encode1.946
Email::MIME::Encodings1.315
Email::MIME::Header1.946
Email::MIME::Header::AddressList1.946
Email::MIME::Kit3.000006
Email::MIME::Kit::Assembler::Standard3.000006
Email::MIME::Kit::KitReader::Dir3.000006
Email::MIME::Kit::ManifestReader::JSON3.000006
Email::MIME::Kit::ManifestReader::YAML3.000006
Email::MIME::Kit::Renderer::TestRenderer3.000006
Email::MIME::Kit::Role::Assembler3.000006
Email::MIME::Kit::Role::Component3.000006
Email::MIME::Kit::Role::KitReader3.000006
Email::MIME::Kit::Role::ManifestDesugarer3.000006
Email::MIME::Kit::Role::ManifestReader3.000006
Email::MIME::Kit::Role::Renderer3.000006
Email::MIME::Kit::Role::Validator3.000006
Email::MIME::Modifier1.946
Email::Sender1.300031
Email::Sender::Failure1.300031
Email::Sender::Failure::Multi1.300031
Email::Sender::Failure::Permanent1.300031
Email::Sender::Failure::Temporary1.300031
Email::Sender::Manual1.300031
Email::Sender::Manual::QuickStart1.300031
Email::Sender::Role::CommonSending1.300031
Email::Sender::Role::HasMessage1.300031
Email::Sender::Simple1.300031
Email::Sender::Success1.300031
Email::Sender::Success::Partial1.300031
Email::Sender::Transport1.300031
Email::Sender::Transport::DevNull1.300031
Email::Sender::Transport::Failable1.300031
Email::Sender::Transport::Maildir1.300031
Email::Sender::Transport::Mbox1.300031
Email::Sender::Transport::Print1.300031
Email::Sender::Transport::Sendmail1.300031
Email::Sender::Transport::SMTP1.300031
Email::Sender::Transport::SMTP::Persistent1.300031
Email::Sender::Transport::Test1.300031
Email::Sender::Transport::Wrapper1.300031
Email::Sender::Util1.300031
Email::Simple2.216
Email::Simple::Creator2.216
Email::Simple::Header2.216
Email::Valid1.202
Encode2.98
Encode::Alias2.24
Encode::Byte2.04
Encode::CJKConstants2.02
Encode::CN2.03
Encode::CN::HZ2.10
Encode::Config2.05
Encode::ConfigLocal1532079134
Encode::EBCDIC2.02
Encode::Encoder2.03
Encode::Encoding2.08
Encode::GSM03382.07
Encode::Guess2.07
Encode::JP2.04
Encode::JP::H2Z2.02
Encode::JP::JIS72.08
Encode::KR2.03
Encode::KR::2022_KR2.04
Encode::Locale1.05
Encode::MIME::Header2.28
Encode::MIME::Header::ISO_2022_JP1.09
Encode::MIME::Name1.03
Encode::Symbol2.02
Encode::TW2.03
Encode::Unicode2.17
Encode::Unicode::UTF72.10
encoding2.22
Env::Path0.19
Error0.17026
Error::Simple0.17026
Error::TypeTiny1.002002
Error::TypeTiny::Assertion1.002002
Error::TypeTiny::Compilation1.002002
Error::TypeTiny::WrongNumberOfParameters1.002002
Eval::Closure0.14
Eval::TypeTiny1.002002
Eval::WithLexicals1.003006
Eval::WithLexicals::WithHintPersistence1.003006
Excel::Writer::XLSX0.98
Excel::Writer::XLSX::Chart0.98
Excel::Writer::XLSX::Chart::Area0.98
Excel::Writer::XLSX::Chart::Bar0.98
Excel::Writer::XLSX::Chart::Column0.98
Excel::Writer::XLSX::Chart::Doughnut0.98
Excel::Writer::XLSX::Chart::Line0.98
Excel::Writer::XLSX::Chart::Pie0.98
Excel::Writer::XLSX::Chart::Radar0.98
Excel::Writer::XLSX::Chart::Scatter0.98
Excel::Writer::XLSX::Chart::Stock0.98
Excel::Writer::XLSX::Chartsheet0.98
Excel::Writer::XLSX::Drawing0.98
Excel::Writer::XLSX::Examples0.98
Excel::Writer::XLSX::Format0.98
Excel::Writer::XLSX::Package::App0.98
Excel::Writer::XLSX::Package::Comments0.98
Excel::Writer::XLSX::Package::ContentTypes0.98
Excel::Writer::XLSX::Package::Core0.98
Excel::Writer::XLSX::Package::Custom0.98
Excel::Writer::XLSX::Package::Packager0.98
Excel::Writer::XLSX::Package::Relationships0.98
Excel::Writer::XLSX::Package::SharedStrings0.98
Excel::Writer::XLSX::Package::Styles0.98
Excel::Writer::XLSX::Package::Table0.98
Excel::Writer::XLSX::Package::Theme0.98
Excel::Writer::XLSX::Package::VML0.98
Excel::Writer::XLSX::Package::XMLwriter0.98
Excel::Writer::XLSX::Shape0.98
Excel::Writer::XLSX::Utility0.98
Excel::Writer::XLSX::Workbook0.98
Excel::Writer::XLSX::Worksheet0.98
Exception::Assertion0.0504
Exception::Base0.2501
Exception::Class1.44
Exception::Class::Base1.44
Expect1.35
Expect::Simple0.04
Exporter::Declare0.114
Exporter::Declare::Exportunknown
Exporter::Declare::Export::Aliasunknown
Exporter::Declare::Export::Generatorunknown
Exporter::Declare::Export::Subunknown
Exporter::Declare::Export::Variableunknown
Exporter::Declare::Metaunknown
Exporter::Declare::Specsunknown
Exporter::Lite0.08
Exporter::Shiny1.002001
Exporter::Tidy0.08
Exporter::Tiny1.002001
ExtUtils::CChecker0.10
ExtUtils::Config0.008
ExtUtils::CppGuess0.12
ExtUtils::Depends0.405
ExtUtils::Helpers0.026
ExtUtils::Helpers::Unix0.026
ExtUtils::Helpers::VMS0.026
ExtUtils::Helpers::Windows0.026
ExtUtils::InstallPaths0.012
ExtUtils::MakeMaker::CPANfile0.08
ExtUtils::Manifest1.71
ExtUtils::PkgConfig1.16
FAliteunknown
FCGI0.78
FCGI::ProcManager0.28
FCGI::ProcManager::Constrainedunknown
Fennec::Lite0.004
Fh4.38
File::ChangeNotify0.28
File::ChangeNotify::Event0.28
File::ChangeNotify::Watcher0.28
File::ChangeNotify::Watcher::Default0.28
File::ChangeNotify::Watcher::Inotify0.28
File::ChangeNotify::Watcher::KQueue0.28
File::Copy::Link0.06
File::Copy::Recursive0.44
File::Copy::Recursive::Reduced0.006
File::Find::Rule0.34
File::Grep0.02
File::HomeDir1.004
File::HomeDir::Darwin1.004
File::HomeDir::Darwin::Carbon1.004
File::HomeDir::Darwin::Cocoa1.004
File::HomeDir::Driver1.004
File::HomeDir::FreeDesktop1.004
File::HomeDir::MacOS91.004
File::HomeDir::Test1.004
File::HomeDir::Unix1.004
File::HomeDir::Windows1.004
File::Listing6.04
File::Map0.65
File::Next1.16
File::NFSLock1.27
File::Path2.15
File::pushd1.016
File::Remove1.57
File::Share0.25
File::ShareDir1.116
File::ShareDir::Install0.13
File::Slurp9999.19
File::Slurp::Tiny0.004
File::Slurper0.012
File::Spec3.74
File::Spec::AmigaOS3.74
File::Spec::Cygwin3.74
File::Spec::Epoc3.74
File::Spec::Functions3.74
File::Spec::Link0.073
File::Spec::Mac3.74
File::Spec::Native1.004
File::Spec::OS23.74
File::Spec::Unix3.74
File::Spec::VMS3.74
File::Spec::Win323.74
File::Temp0.2308
File::Which1.22
Filesys::Notify::Simple0.13
Font::TTF1.06
Font::TTF::AATKernunknown
Font::TTF::AATutilsunknown
Font::TTF::Anchorunknown
Font::TTF::Bslnunknown
Font::TTF::Cmapunknown
Font::TTF::Coverageunknown
Font::TTF::Cvt_0.0001
Font::TTF::Deltaunknown
Font::TTF::DSIGunknown
Font::TTF::Dumperunknown
Font::TTF::EBDTunknown
Font::TTF::EBLCunknown
Font::TTF::Fdscunknown
Font::TTF::Featunknown
Font::TTF::Features::Cvarunknown
Font::TTF::Features::Sizeunknown
Font::TTF::Features::Ssetunknown
Font::TTF::Fmtxunknown
Font::TTF::Font0.39
Font::TTF::Fpgm0.0001
Font::TTF::GDEFunknown
Font::TTF::Glatunknown
Font::TTF::Glocunknown
Font::TTF::Glyfunknown
Font::TTF::Glyphunknown
Font::TTF::GPOSunknown
Font::TTF::GrFeatunknown
Font::TTF::GSUBunknown
Font::TTF::Hdmxunknown
Font::TTF::Headunknown
Font::TTF::Hheaunknown
Font::TTF::Hmtxunknown
Font::TTF::Kernunknown
Font::TTF::Kern::ClassArrayunknown
Font::TTF::Kern::CompactClassArrayunknown
Font::TTF::Kern::OrderedListunknown
Font::TTF::Kern::StateTableunknown
Font::TTF::Kern::Subtableunknown
Font::TTF::Locaunknown
Font::TTF::LTSHunknown
Font::TTF::Maxpunknown
Font::TTF::Mortunknown
Font::TTF::Mort::Chainunknown
Font::TTF::Mort::Contextualunknown
Font::TTF::Mort::Insertionunknown
Font::TTF::Mort::Ligatureunknown
Font::TTF::Mort::Noncontextualunknown
Font::TTF::Mort::Rearrangementunknown
Font::TTF::Mort::Subtableunknown
Font::TTF::Name1.1
Font::TTF::OldCmapunknown
Font::TTF::OldMortunknown
Font::TTF::OS_2unknown
Font::TTF::OTTagsunknown
Font::TTF::PCLTunknown
Font::TTF::Post0.01
Font::TTF::Prep0.0001
Font::TTF::Propunknown
Font::TTF::PSNamesunknown
Font::TTF::Segarr0.0001
Font::TTF::Silfunknown
Font::TTF::Sillunknown
Font::TTF::Table0.0001
Font::TTF::Ttc0.0001
Font::TTF::Ttopenunknown
Font::TTF::Utils0.0001
Font::TTF::Vheaunknown
Font::TTF::Vmtxunknown
Font::TTF::Win32unknown
Font::TTF::Woffunknown
Font::TTF::Woff::MetaDataunknown
Font::TTF::Woff::PrivateDataunknown
Font::TTF::XMLparseunknown
forks0.36
forks::shared0.36
Future0.38
Future::Mutex0.38
Future::Utils0.38
GD2.68
GD::Graph1.54
GD::Graph::areaunknown
GD::Graph::axestypeunknown
GD::Graph::barsunknown
GD::Graph::colourunknown
GD::Graph::Dataunknown
GD::Graph::Errorunknown
GD::Graph::hbarsunknown
GD::Graph::linesunknown
GD::Graph::linespointsunknown
GD::Graph::mixedunknown
GD::Graph::pieunknown
GD::Graph::pointsunknown
GD::Graph::utilsunknown
GD::Group1
GD::Image2.67
GD::Polygonunknown
GD::Polyline0.2
GD::Simpleunknown
GD::SVG0.33
GD::Text0.86
GD::Text::Alignunknown
GD::Text::Wrapunknown
Getopt::Long::Descriptive0.102
Getopt::Long::Descriptive::Opts0.102
Getopt::Long::Descriptive::Usage0.102
Graph0.9704
Graph::AdjacencyMapunknown
Graph::AdjacencyMap::Heavyunknown
Graph::AdjacencyMap::Lightunknown
Graph::AdjacencyMap::Vertexunknown
Graph::AdjacencyMatrixunknown
Graph::Attributeunknown
Graph::BitMatrixunknown
Graph::Directedunknown
Graph::Matrixunknown
Graph::MSTHeapElemunknown
Graph::Reader2.09
Graph::Reader::Dot2.09
Graph::Reader::HTK2.09
Graph::Reader::XML2.09
Graph::ReadWrite2.09
Graph::SPTHeapElemunknown
Graph::TransitiveClosureunknown
Graph::TransitiveClosure::Matrixunknown
Graph::Traversalunknown
Graph::Traversal::BFSunknown
Graph::Traversal::DFSunknown
Graph::Undirectedunknown
Graph::UnionFindunknown
Graph::Writer2.09
Graph::Writer::daVinci2.09
Graph::Writer::Dot2.09
Graph::Writer::HTK2.09
Graph::Writer::VCG2.09
Graph::Writer::XML2.09
GraphViz2.24
GraphViz::Data::Grapher2.24
GraphViz::No2.24
GraphViz::Parse::RecDescent2.24
GraphViz::Parse::Yacc2.24
GraphViz::Parse::Yapp2.24
GraphViz::Regex2.24
GraphViz::Small2.24
GraphViz::XML2.24
Hash::AutoHash1.17
Hash::AutoHash::Args1.18
Hash::AutoHash::Args::V01.18
Hash::Merge0.300
Hash::Merge::Simple0.051
Hash::MoreUtils0.06
Hash::MultiValue0.16
Hash::Util::FieldHash::Compat0.11
Hash::Util::FieldHash::Compat::Heavy0.11
Heap071::Elemunknown
Heap071::Fibonacciunknown
Hook::LexWrap0.26
HPC::Runner2.48
HPC::Runner::Scheduler0.09
HPC::Runner::Slurm2.58
HTML::AsSubs5.07
HTML::Element5.07
HTML::Element::traverse5.07
HTML::Entities3.69
HTML::Filter3.72
HTML::Form6.03
HTML::FormHandler0.40068
HTML::FormHandler::Base0.40068
HTML::FormHandler::Blocks0.40068
HTML::FormHandler::BuildFields0.40068
HTML::FormHandler::BuildPages0.40068
HTML::FormHandler::Field0.40068
HTML::FormHandler::Field::AddElement0.40068
HTML::FormHandler::Field::Boolean0.40068
HTML::FormHandler::Field::BoolSelect0.40068
HTML::FormHandler::Field::Button0.40068
HTML::FormHandler::Field::Captcha0.40068
HTML::FormHandler::Field::Checkbox0.40068
HTML::FormHandler::Field::Compound0.40068
HTML::FormHandler::Field::Date0.40068
HTML::FormHandler::Field::DateMDY0.40068
HTML::FormHandler::Field::DateTime0.40068
HTML::FormHandler::Field::Display0.40068
HTML::FormHandler::Field::Duration0.40068
HTML::FormHandler::Field::Email0.40068
HTML::FormHandler::Field::File0.40068
HTML::FormHandler::Field::Float0.40068
HTML::FormHandler::Field::Hidden0.40068
HTML::FormHandler::Field::Hour0.40068
HTML::FormHandler::Field::Integer0.40068
HTML::FormHandler::Field::IntRange0.40068
HTML::FormHandler::Field::Minute0.40068
HTML::FormHandler::Field::Money0.40068
HTML::FormHandler::Field::Month0.40068
HTML::FormHandler::Field::MonthDay0.40068
HTML::FormHandler::Field::MonthName0.40068
HTML::FormHandler::Field::Multiple0.40068
HTML::FormHandler::Field::Nested0.40068
HTML::FormHandler::Field::NonEditable0.40068
HTML::FormHandler::Field::NoValue0.40068
HTML::FormHandler::Field::Password0.40068
HTML::FormHandler::Field::PasswordConf0.40068
HTML::FormHandler::Field::PosInteger0.40068
HTML::FormHandler::Field::PrimaryKey0.40068
HTML::FormHandler::Field::Repeatable0.40068
HTML::FormHandler::Field::RequestToken0.40068
HTML::FormHandler::Field::Reset0.40068
HTML::FormHandler::Field::Result0.40068
HTML::FormHandler::Field::RmElement0.40068
HTML::FormHandler::Field::Role::RequestToken0.40068
HTML::FormHandler::Field::Second0.40068
HTML::FormHandler::Field::Select0.40068
HTML::FormHandler::Field::SelectCSV0.40068
HTML::FormHandler::Field::Submit0.40068
HTML::FormHandler::Field::Text0.40068
HTML::FormHandler::Field::TextArea0.40068
HTML::FormHandler::Field::TextCSV0.40068
HTML::FormHandler::Field::Upload0.40068
HTML::FormHandler::Field::Weekday0.40068
HTML::FormHandler::Field::Year0.40068
HTML::FormHandler::Fields0.40068
HTML::FormHandler::Foo0.40068
HTML::FormHandler::I18N0.40068
HTML::FormHandler::I18N::ar_kw0.40068
HTML::FormHandler::I18N::bg_bg0.40068
HTML::FormHandler::I18N::ca_es0.40068
HTML::FormHandler::I18N::cs_cz0.40068
HTML::FormHandler::I18N::de_de0.40068
HTML::FormHandler::I18N::en_us0.40068
HTML::FormHandler::I18N::es_es0.40068
HTML::FormHandler::I18N::hu_hu0.40068
HTML::FormHandler::I18N::it_it0.40068
HTML::FormHandler::I18N::ja_jp0.40068
HTML::FormHandler::I18N::pt_br0.40068
HTML::FormHandler::I18N::ru_ru0.40068
HTML::FormHandler::I18N::sv_se0.40068
HTML::FormHandler::I18N::tr_tr0.40068
HTML::FormHandler::I18N::ua_ua0.40068
HTML::FormHandler::InitResult0.40068
HTML::FormHandler::Merge0.40068
HTML::FormHandler::Model0.40068
HTML::FormHandler::Model::Object0.40068
HTML::FormHandler::Moose0.40068
HTML::FormHandler::Moose::Role0.40068
HTML::FormHandler::Page0.40068
HTML::FormHandler::Page::Simple0.40068
HTML::FormHandler::Pages0.40068
HTML::FormHandler::Render::RepeatableJs0.40068
HTML::FormHandler::Render::Simple0.40068
HTML::FormHandler::Render::Table0.40068
HTML::FormHandler::Render::Util0.40068
HTML::FormHandler::Render::WithTT0.40068
HTML::FormHandler::Result0.40068
HTML::FormHandler::Result::Role0.40068
HTML::FormHandler::Test0.40068
HTML::FormHandler::TraitFor::Captcha0.40068
HTML::FormHandler::TraitFor::I18N0.40068
HTML::FormHandler::TraitFor::Types0.40068
HTML::FormHandler::Traits0.40068
HTML::FormHandler::Types0.40068
HTML::FormHandler::Validate0.40068
HTML::FormHandler::Widget::ApplyRole0.40068
HTML::FormHandler::Widget::Block0.40068
HTML::FormHandler::Widget::Block::Bootstrap0.40068
HTML::FormHandler::Widget::Field::Button0.40068
HTML::FormHandler::Widget::Field::ButtonTag0.40068
HTML::FormHandler::Widget::Field::Captcha0.40068
HTML::FormHandler::Widget::Field::Checkbox0.40068
HTML::FormHandler::Widget::Field::CheckboxGroup0.40068
HTML::FormHandler::Widget::Field::Compound0.40068
HTML::FormHandler::Widget::Field::Hidden0.40068
HTML::FormHandler::Widget::Field::HorizCheckboxGroup0.40068
HTML::FormHandler::Widget::Field::NoRender0.40068
HTML::FormHandler::Widget::Field::Password0.40068
HTML::FormHandler::Widget::Field::RadioGroup0.40068
HTML::FormHandler::Widget::Field::Repeatable0.40068
HTML::FormHandler::Widget::Field::Reset0.40068
HTML::FormHandler::Widget::Field::Role::HTMLAttributes0.40068
HTML::FormHandler::Widget::Field::Role::SelectedOption0.40068
HTML::FormHandler::Widget::Field::Select0.40068
HTML::FormHandler::Widget::Field::Span0.40068
HTML::FormHandler::Widget::Field::Submit0.40068
HTML::FormHandler::Widget::Field::Text0.40068
HTML::FormHandler::Widget::Field::Textarea0.40068
HTML::FormHandler::Widget::Field::Upload0.40068
HTML::FormHandler::Widget::Form::Role::HTMLAttributes0.40068
HTML::FormHandler::Widget::Form::Simple0.40068
HTML::FormHandler::Widget::Form::Table0.40068
HTML::FormHandler::Widget::Theme::Bootstrap0.40068
HTML::FormHandler::Widget::Theme::Bootstrap30.40068
HTML::FormHandler::Widget::Theme::BootstrapFormMessages0.40068
HTML::FormHandler::Widget::Wrapper::Base0.40068
HTML::FormHandler::Widget::Wrapper::Bootstrap0.40068
HTML::FormHandler::Widget::Wrapper::Bootstrap30.40068
HTML::FormHandler::Widget::Wrapper::Fieldset0.40068
HTML::FormHandler::Widget::Wrapper::None0.40068
HTML::FormHandler::Widget::Wrapper::Simple0.40068
HTML::FormHandler::Widget::Wrapper::SimpleInline0.40068
HTML::FormHandler::Widget::Wrapper::Table0.40068
HTML::FormHandler::Widget::Wrapper::TableInline0.40068
HTML::FormHandler::Wizard0.40068
HTML::HeadParser3.71
HTML::LinkExtor3.69
HTML::Parse5.07
HTML::Parser3.72
HTML::Perlinfo1.69
HTML::Perlinfo::Apacheunknown
HTML::Perlinfo::Baseunknown
HTML::Perlinfo::Commonunknown
HTML::Perlinfo::Generalunknown
HTML::Perlinfo::Loaded1.02
HTML::Perlinfo::Modules1.19
HTML::PullParser3.57
HTML::TableExtract2.15
HTML::Tagset3.20
HTML::TokeParser3.69
HTML::Tree5.07
HTML::TreeBuilder5.07
HTTP::Body1.22
HTTP::Body::MultiPart1.22
HTTP::Body::OctetStream1.22
HTTP::Body::UrlEncoded1.22
HTTP::Body::XForms1.22
HTTP::Body::XFormsMultipart1.22
HTTP::Config6.18
HTTP::CookieJar0.008
HTTP::CookieJar::LWP0.008
HTTP::Cookies6.04
HTTP::Cookies::Microsoft6.04
HTTP::Cookies::Netscape6.04
HTTP::Daemon6.01
HTTP::Date6.02
HTTP::Entity::Parser0.21
HTTP::Entity::Parser::JSONunknown
HTTP::Entity::Parser::MultiPartunknown
HTTP::Entity::Parser::OctetStreamunknown
HTTP::Entity::Parser::UrlEncodedunknown
HTTP::Headers6.18
HTTP::Headers::Auth6.18
HTTP::Headers::ETag6.18
HTTP::Headers::Fast0.21
HTTP::Headers::Util6.18
HTTP::Message6.18
HTTP::Message::PSGIunknown
HTTP::MultiPartParser0.02
HTTP::Negotiate6.01
HTTP::Parser::XS0.17
HTTP::Parser::XS::PPunknown
HTTP::Request6.18
HTTP::Request::AsCGI1.2
HTTP::Request::Common6.18
HTTP::Response6.18
HTTP::Server::PSGIunknown
HTTP::Server::PSGI::Net::Server::PreForkunknown
HTTP::Server::Simple0.52
HTTP::Server::Simple::CGIunknown
HTTP::Server::Simple::CGI::Environmentunknown
HTTP::Server::Simple::PSGI0.16
HTTP::Status6.18
HTTP::Thin0.006
HTTP::Tinyish0.14
HTTP::Tinyish::Baseunknown
HTTP::Tinyish::Curlunknown
HTTP::Tinyish::HTTPTinyunknown
HTTP::Tinyish::LWPunknown
HTTP::Tinyish::Wgetunknown
HTTP::XSCookies0.000021
Image::PNG0.23
Image::PNG::Const0.45
Image::PNG::Container0.23
Image::PNG::Libpng0.45
Image::PNG::Utilunknown
Import::Into1.002005
Importer0.025
inc::Module::Install1.19
inc::Module::Install::DSL1.19
indirect0.38
Inline0.80
Inline::C0.78
Inline::C::Parserunknown
Inline::C::Parser::Pegexunknown
Inline::C::Parser::Pegex::ASTunknown
Inline::C::Parser::Pegex::Grammarunknown
Inline::C::Parser::RecDescentunknown
Inline::C::Parser::RegExpunknown
Inline::denterunknown
Inline::Foo0.80
Inline::MakeMaker0.80
Inline::MakePdlppInstallableunknown
Inline::Pdlpp0.4
IO::All0.87
IO::All::Baseunknown
IO::All::DBMunknown
IO::All::Dirunknown
IO::All::Fileunknown
IO::All::Filesysunknown
IO::All::Linkunknown
IO::All::MLDBMunknown
IO::All::Pipeunknown
IO::All::Socketunknown
IO::All::STDIOunknown
IO::All::Stringunknown
IO::All::Tempunknown
IO::Async0.72
IO::Async::Channel0.72
IO::Async::Debug0.72
IO::Async::File0.72
IO::Async::FileStream0.72
IO::Async::Function0.72
IO::Async::Future0.72
IO::Async::Handle0.72
IO::Async::Internals::ChildManager0.72
IO::Async::Listener0.72
IO::Async::Loop0.72
IO::Async::Loop::Poll0.72
IO::Async::Loop::Select0.72
IO::Async::LoopTests0.72
IO::Async::Notifier0.72
IO::Async::OS0.72
IO::Async::OS::cygwin0.72
IO::Async::OS::linux0.72
IO::Async::OS::MSWin320.72
IO::Async::PID0.72
IO::Async::Process0.72
IO::Async::Protocol0.72
IO::Async::Protocol::LineStream0.72
IO::Async::Protocol::Stream0.72
IO::Async::Resolver0.72
IO::Async::Routine0.72
IO::Async::Signal0.72
IO::Async::Socket0.72
IO::Async::Stream0.72
IO::Async::Test0.72
IO::Async::Timer0.72
IO::Async::Timer::Absolute0.72
IO::Async::Timer::Countdown0.72
IO::Async::Timer::Periodic0.72
IO::AtomicFile2.111
IO::CaptureOutput1.1104
IO::HTML1.001
IO::InnerFile2.111
IO::Interactive1.022
IO::Lines2.111
IO::Pipely0.005
IO::Prompt0.997004
IO::Pty1.12
IO::Scalar2.111
IO::ScalarArray2.111
IO::SessionData1.03
IO::SessionSetunknown
IO::Socket::SSL2.058
IO::Socket::SSL::Intercept2.056
IO::Socket::SSL::PublicSuffixunknown
IO::Socket::SSL::Utils2.014
IO::Socket::Timeout0.32
IO::String1.08
IO::Stringy2.111
IO::TieCombine1.005
IO::TieCombine::Handle1.005
IO::TieCombine::Scalar1.005
IO::Tty1.12
IO::Tty::Constantunknown
IO::Wrap2.111
IO::WrapTie2.111
IPC::Run20180523.0
IPC::Run30.048
IPC::Run3::ProfArrayBuffer0.048
IPC::Run3::ProfLogger0.048
IPC::Run3::ProfLogReader0.048
IPC::Run3::ProfPP0.048
IPC::Run3::ProfReporter0.048
IPC::Run::Debug20180523.0
IPC::Run::IO20180523.0
IPC::Run::Timer20180523.0
IPC::Run::Win32Helper20180523.0
IPC::Run::Win32IO20180523.0
IPC::Run::Win32Pump20180523.0
IPC::ShareLite0.17
IPC::System::Simple1.25
JSON2.97001
JSON::Any1.39
JSON::MaybeXS1.004000
Lexical::Persistence1.020
lib::core::onlyunknown
Lingua::EN::FindNumber1.32
Lingua::EN::Inflect1.903
Lingua::EN::Inflect::Number1.12
Lingua::EN::Inflect::Phrase0.20
Lingua::EN::Number::IsOrdinal0.05
Lingua::EN::Tagger0.29
Lingua::EN::Words2Numsunknown
Lingua::GL::Stemmer0.02
Lingua::PT::Stemmer0.02
Lingua::Stem0.84
Lingua::Stem::AutoLoader1.02
Lingua::Stem::Da1.01
Lingua::Stem::De1.01
Lingua::Stem::En2.16
Lingua::Stem::EnBroken2.13
Lingua::Stem::Fr0.02
Lingua::Stem::Gl1.02
Lingua::Stem::It0.02
Lingua::Stem::No1.01
Lingua::Stem::Pt1.01
Lingua::Stem::Ru0.04
Lingua::Stem::Snowball::Da1.01
Lingua::Stem::Snowball::No1.2
Lingua::Stem::Snowball::Se1.2
Lingua::Stem::Sv1.01
List::AllUtils0.14
List::MoreUtils0.428
List::MoreUtils::PP0.428
List::MoreUtils::XS0.428
List::SomeUtils0.56
List::SomeUtils::PP0.56
List::SomeUtils::XS0.58
List::Util1.50
List::Util::XS1.50
List::UtilsBy0.11
local::lib2.000024
Locale::Maketext::Extract1.00
Locale::Maketext::Extract::Plugin::Base1.00
Locale::Maketext::Extract::Plugin::FormFu1.00
Locale::Maketext::Extract::Plugin::Generic1.00
Locale::Maketext::Extract::Plugin::Haml1.00
Locale::Maketext::Extract::Plugin::Mason1.00
Locale::Maketext::Extract::Plugin::Perl1.00
Locale::Maketext::Extract::Plugin::PPI1.00
Locale::Maketext::Extract::Plugin::TextTemplate1.00
Locale::Maketext::Extract::Plugin::TT21.00
Locale::Maketext::Extract::Plugin::YAML1.00
Locale::Maketext::Extract::Run1.00
Locale::Maketext::Lexicon1.00
Locale::Maketext::Lexicon::Auto1.00
Locale::Maketext::Lexicon::Gettext1.00
Locale::Maketext::Lexicon::Msgcat1.00
Locale::Maketext::Lexicon::Tie1.00
Log::Any1.706
Log::Any::Adapter1.706
Log::Any::Adapter::Base1.706
Log::Any::Adapter::File1.706
Log::Any::Adapter::Null1.706
Log::Any::Adapter::Stderr1.706
Log::Any::Adapter::Stdout1.706
Log::Any::Adapter::Syslog1.706
Log::Any::Adapter::Test1.706
Log::Any::Adapter::Util1.706
Log::Any::Manager1.706
Log::Any::Proxy1.706
Log::Any::Proxy::Null1.706
Log::Any::Proxy::Test1.706
Log::Any::Test1.706
Log::Contextual0.008001
Log::Contextual::Easy::Default0.008001
Log::Contextual::Easy::Package0.008001
Log::Contextual::Role::Router0.008001
Log::Contextual::Role::Router::HasLogger0.008001
Log::Contextual::Role::Router::SetLogger0.008001
Log::Contextual::Role::Router::WithLogger0.008001
Log::Contextual::Router0.008001
Log::Contextual::SimpleLogger0.008001
Log::Contextual::TeeLogger0.008001
Log::Contextual::WarnLogger0.008001
Log::Dispatch2.67
Log::Dispatch::ApacheLog2.67
Log::Dispatch::Array1.003
Log::Dispatch::Base2.67
Log::Dispatch::Code2.67
Log::Dispatch::Config1.04
Log::Dispatch::Configurator1.00
Log::Dispatch::Configurator::AppConfig1.00
Log::Dispatch::Email2.67
Log::Dispatch::Email::MailSend2.67
Log::Dispatch::Email::MailSender2.67
Log::Dispatch::Email::MailSendmail2.67
Log::Dispatch::Email::MIMELite2.67
Log::Dispatch::File2.67
Log::Dispatch::File::Locked2.67
Log::Dispatch::Handle2.67
Log::Dispatch::Null2.67
Log::Dispatch::Output2.67
Log::Dispatch::Screen2.67
Log::Dispatch::Syslog2.67
Log::Dispatch::Types2.67
Log::Dispatch::Vars2.67
Log::Dispatchouli2.016
Log::Dispatchouli::Global2.016
Log::Dispatchouli::Proxy2.016
Log::Log4perl1.49
Log::Log4perl::Appenderunknown
Log::Log4perl::Appender::Bufferunknown
Log::Log4perl::Appender::DBIunknown
Log::Log4perl::Appender::Fileunknown
Log::Log4perl::Appender::Limitunknown
Log::Log4perl::Appender::RRDsunknown
Log::Log4perl::Appender::Screenunknown
Log::Log4perl::Appender::ScreenColoredLevelsunknown
Log::Log4perl::Appender::Socketunknown
Log::Log4perl::Appender::Stringunknown
Log::Log4perl::Appender::Synchronizedunknown
Log::Log4perl::Appender::TestArrayBufferunknown
Log::Log4perl::Appender::TestBufferunknown
Log::Log4perl::Appender::TestFileCreeperunknown
Log::Log4perl::Catalystunknown
Log::Log4perl::Configunknown
Log::Log4perl::Config::BaseConfiguratorunknown
Log::Log4perl::Config::DOMConfigurator0.03
Log::Log4perl::Config::PropertyConfiguratorunknown
Log::Log4perl::Config::Watchunknown
Log::Log4perl::DateFormatunknown
Log::Log4perl::Filterunknown
Log::Log4perl::Filter::Booleanunknown
Log::Log4perl::Filter::LevelMatchunknown
Log::Log4perl::Filter::LevelRangeunknown
Log::Log4perl::Filter::MDCunknown
Log::Log4perl::Filter::StringMatchunknown
Log::Log4perl::InternalDebugunknown
Log::Log4perl::JavaMapunknown
Log::Log4perl::JavaMap::ConsoleAppenderunknown
Log::Log4perl::JavaMap::FileAppenderunknown
Log::Log4perl::JavaMap::JDBCAppenderunknown
Log::Log4perl::JavaMap::NTEventLogAppenderunknown
Log::Log4perl::JavaMap::RollingFileAppenderunknown
Log::Log4perl::JavaMap::SyslogAppenderunknown
Log::Log4perl::JavaMap::TestBufferunknown
Log::Log4perl::Layoutunknown
Log::Log4perl::Layout::NoopLayoutunknown
Log::Log4perl::Layout::PatternLayoutunknown
Log::Log4perl::Layout::PatternLayout::Multilineunknown
Log::Log4perl::Layout::SimpleLayoutunknown
Log::Log4perl::Levelunknown
Log::Log4perl::Loggerunknown
Log::Log4perl::MDCunknown
Log::Log4perl::NDCunknown
Log::Log4perl::Resurrectorunknown
Log::Log4perl::Utilunknown
Log::Log4perl::Util::Semaphoreunknown
Log::Log4perl::Util::TimeTrackerunknown
Log::Message0.08
Log::Message::Config0.08
Log::Message::Handlers0.08
Log::Message::Item0.08
Log::Message::Simple0.10
Log::Report1.27
Log::Report::DBIC::Profiler1.27
Log::Report::Die1.27
Log::Report::Dispatcher1.27
Log::Report::Dispatcher::Callback1.27
Log::Report::Dispatcher::File1.27
Log::Report::Dispatcher::Log4perl1.27
Log::Report::Dispatcher::LogDispatch1.27
Log::Report::Dispatcher::Perl1.27
Log::Report::Dispatcher::Syslog1.27
Log::Report::Dispatcher::Try1.27
Log::Report::Domain1.27
Log::Report::Exception1.27
Log::Report::Message1.27
Log::Report::Minimal1.06
Log::Report::Minimal::Domain1.06
Log::Report::Optional1.06
Log::Report::Translator1.27
Log::Report::Util1.06
Logger::Simple2.0
LWP6.35
LWP::Authen::Basic6.35
LWP::Authen::Digest6.35
LWP::Authen::Ntlm6.35
LWP::ConnCache6.35
LWP::Debug6.35
LWP::Debugunknown
LWP::Debug::TraceHTTP6.35
LWP::DebugFile6.35
LWP::MediaTypes6.02
LWP::MemberMixin6.35
LWP::Protocol6.35
LWP::Protocol::cpan6.35
LWP::Protocol::data6.35
LWP::Protocol::file6.35
LWP::Protocol::ftp6.35
LWP::Protocol::gopher6.35
LWP::Protocol::http6.35
LWP::Protocol::https6.07
LWP::Protocol::loopback6.35
LWP::Protocol::mailto6.35
LWP::Protocol::nntp6.35
LWP::Protocol::nogo6.35
LWP::RobotUA6.35
LWP::Simple6.35
LWP::UserAgent6.35
Mail::Address2.20
Mail::Cap2.20
Mail::Field2.20
Mail::Field::AddrList2.20
Mail::Field::Date2.20
Mail::Field::Generic2.20
Mail::Filter2.20
Mail::Header2.20
Mail::Internet2.20
Mail::Mailer2.20
Mail::Mailer::qmail2.20
Mail::Mailer::rfc8222.20
Mail::Mailer::sendmail2.20
Mail::Mailer::smtp2.20
Mail::Mailer::smtps2.20
Mail::Mailer::testfile2.20
Mail::Send2.20
Mail::Util2.20
MailTools2.20
Math::Bezier0.01
Math::BigFloat1.999813
Math::BigInt1.999813
Math::BigInt::Calc1.999813
Math::BigInt::CalcEmu1.999813
Math::BigInt::Lib1.999813
Math::CDF0.1
Math::Cephes0.5305
Math::Cephes::Complex0.5305
Math::Cephes::Fraction0.5305
Math::Cephes::Matrix0.5305
Math::Cephes::Polynomial0.5305
Math::Combinatorics0.09
Math::Counting0.1305
Math::Derivative1.01
Math::GSL::Linalg::SVD0.0.2
Math::MatrixReal2.13
Math::Prime::Util0.70
Math::Prime::Util::ChaCha0.70
Math::Prime::Util::ECAffinePoint0.70
Math::Prime::Util::ECProjectivePoint0.70
Math::Prime::Util::Entropy0.70
Math::Prime::Util::GMP0.50
Math::Prime::Util::MemFree0.70
Math::Prime::Util::PP0.70
Math::Prime::Util::PPFEunknown
Math::Prime::Util::PrimalityProving0.70
Math::Prime::Util::PrimeArray0.70
Math::Prime::Util::PrimeIterator0.70
Math::Prime::Util::RandomPrimes0.70
Math::Prime::Util::ZetaBigFloat0.70
Math::Random0.72
Math::Random::ISAAC1.004
Math::Random::ISAAC::PP1.004
Math::Random::MT::Auto6.22
Math::Random::MT::Auto::Range6.22
Math::Round0.07
Math::Spline0.02
Math::Utils1.12
Math::VecStat0.08
Memoize::ExpireLRU0.56
Menlo1.9019
Menlo::Builder::Staticunknown
Menlo::CLI::Compat1.9022
Menlo::Dependencyunknown
Menlo::Index::MetaCPANunknown
Menlo::Index::MetaDB1.9019
Menlo::Index::Mirrorunknown
Menlo::Legacy1.9022
Menlo::Utilunknown
Meta::Builder0.003
Meta::Builder::Baseunknown
Meta::Builder::Utilunknown
metaclass2.2011
Method::Generate::Accessorunknown
Method::Generate::BuildAllunknown
Method::Generate::Constructorunknown
Method::Generate::DemolishAllunknown
Method::Inlinerunknown
MIME::Charset1.012.2
MIME::Charset::_Compat1.003.1
MIME::Charset::UTF1.010
MIME::Type2.17
MIME::Types2.17
Mixin::Linewise0.108
Mixin::Linewise::Readers0.108
Mixin::Linewise::Writers0.108
Mock::Config0.03
Modern::Perl1.20180701
Module::AutoInstall1.19
Module::Build0.4224
Module::Build::Base0.4224
Module::Build::Compat0.4224
Module::Build::Config0.4224
Module::Build::ConfigDataunknown
Module::Build::Cookbook0.4224
Module::Build::Dumper0.4224
Module::Build::Notes0.4224
Module::Build::Platform::aix0.4224
Module::Build::Platform::cygwin0.4224
Module::Build::Platform::darwin0.4224
Module::Build::Platform::Default0.4224
Module::Build::Platform::MacOS0.4224
Module::Build::Platform::os20.4224
Module::Build::Platform::Unix0.4224
Module::Build::Platform::VMS0.4224
Module::Build::Platform::VOS0.4224
Module::Build::Platform::Windows0.4224
Module::Build::PodParser0.4224
Module::Build::PPMMaker0.4224
Module::Build::Tiny0.039
Module::Build::XSUtil0.19
Module::Compile0.37
Module::Compile::Optunknown
Module::CPANfile1.1004
Module::CPANfile::Environmentunknown
Module::CPANfile::Prerequnknown
Module::CPANfile::Prereqsunknown
Module::CPANfile::Requirementunknown
Module::Faker0.020
Module::Faker::Appendix0.020
Module::Faker::Dist0.020
Module::Faker::File0.020
Module::Faker::Heavy0.020
Module::Faker::Module0.020
Module::Faker::Package0.020
Module::Find0.13
Module::Implementation0.09
Module::Install1.19
Module::Install::Admin1.19
Module::Install::Admin::Bundle1.19
Module::Install::Admin::Compiler1.19
Module::Install::Admin::Find1.19
Module::Install::Admin::Include1.19
Module::Install::Admin::Makefile1.19
Module::Install::Admin::Manifest1.19
Module::Install::Admin::Metadata1.19
Module::Install::Admin::ScanDeps1.19
Module::Install::Admin::WriteAll1.19
Module::Install::AutoInstall1.19
Module::Install::Base1.19
Module::Install::Bundle1.19
Module::Install::Can1.19
Module::Install::Catalystunknown
Module::Install::Compiler1.19
Module::Install::Deprecated1.19
Module::Install::DSL1.19
Module::Install::External1.19
Module::Install::Fetch1.19
Module::Install::Include1.19
Module::Install::Inline1.19
Module::Install::Makefile1.19
Module::Install::MakeMaker1.19
Module::Install::Metadata1.19
Module::Install::PAR1.19
Module::Install::Run1.19
Module::Install::Scripts1.19
Module::Install::Share1.19
Module::Install::Win321.19
Module::Install::With1.19
Module::Install::WriteAll1.19
Module::Optimizeunknown
Module::Path0.19
Module::Pluggable5.2
Module::Pluggable::Object5.2
Module::Runtime0.016
Module::Runtime::Conflicts0.003
Module::ScanDeps1.24
Module::ScanDeps::Cacheunknown
Module::Util1.09
Mojounknown
Mojo::Assetunknown
Mojo::Asset::Fileunknown
Mojo::Asset::Memoryunknown
Mojo::Baseunknown
Mojo::ByteStreamunknown
Mojo::Cacheunknown
Mojo::Collectionunknown
Mojo::Contentunknown
Mojo::Content::MultiPartunknown
Mojo::Content::Singleunknown
Mojo::Cookieunknown
Mojo::Cookie::Requestunknown
Mojo::Cookie::Responseunknown
Mojo::Dateunknown
Mojo::DOMunknown
Mojo::DOM::CSSunknown
Mojo::DOM::HTMLunknown
Mojo::EventEmitterunknown
Mojo::Exceptionunknown
Mojo::Fileunknown
Mojo::Headersunknown
Mojo::HelloWorldunknown
Mojo::Homeunknown
Mojo::IOLoopunknown
Mojo::IOLoop::Clientunknown
Mojo::IOLoop::Delayunknown
Mojo::IOLoop::Serverunknown
Mojo::IOLoop::Streamunknown
Mojo::IOLoop::Stream::HTTPClientunknown
Mojo::IOLoop::Stream::HTTPServerunknown
Mojo::IOLoop::Stream::WebSocketClientunknown
Mojo::IOLoop::Stream::WebSocketServerunknown
Mojo::IOLoop::Subprocessunknown
Mojo::IOLoop::TLSunknown
Mojo::JSONunknown
Mojo::JSON::Pointerunknown
Mojo::Loaderunknown
Mojo::Logunknown
Mojo::Messageunknown
Mojo::Message::Requestunknown
Mojo::Message::Responseunknown
Mojo::Parametersunknown
Mojo::Pathunknown
Mojo::Promiseunknown
Mojo::Reactorunknown
Mojo::Reactor::EVunknown
Mojo::Reactor::Pollunknown
Mojo::Serverunknown
Mojo::Server::CGIunknown
Mojo::Server::Daemonunknown
Mojo::Server::Hypnotoadunknown
Mojo::Server::Morbounknown
Mojo::Server::Morbo::Backendunknown
Mojo::Server::Morbo::Backend::Pollunknown
Mojo::Server::Preforkunknown
Mojo::Server::PSGIunknown
Mojo::Templateunknown
Mojo::Transactionunknown
Mojo::Transaction::HTTPunknown
Mojo::Transaction::WebSocketunknown
Mojo::Uploadunknown
Mojo::URLunknown
Mojo::UserAgentunknown
Mojo::UserAgent::CookieJarunknown
Mojo::UserAgent::Proxyunknown
Mojo::UserAgent::Serverunknown
Mojo::UserAgent::Transactorunknown
Mojo::Utilunknown
Mojo::WebSocketunknown
Mojolicious7.88
Mojolicious::Commandunknown
Mojolicious::Command::cgiunknown
Mojolicious::Command::cpanifyunknown
Mojolicious::Command::daemonunknown
Mojolicious::Command::evalunknown
Mojolicious::Command::generateunknown
Mojolicious::Command::generate::appunknown
Mojolicious::Command::generate::lite_appunknown
Mojolicious::Command::generate::makefileunknown
Mojolicious::Command::generate::plugin0.01
Mojolicious::Command::getunknown
Mojolicious::Command::inflateunknown
Mojolicious::Command::preforkunknown
Mojolicious::Command::psgiunknown
Mojolicious::Command::routesunknown
Mojolicious::Command::testunknown
Mojolicious::Command::versionunknown
Mojolicious::Commandsunknown
Mojolicious::Controllerunknown
Mojolicious::Liteunknown
Mojolicious::Pluginunknown
Mojolicious::Plugin::Configunknown
Mojolicious::Plugin::DefaultHelpersunknown
Mojolicious::Plugin::EPLRendererunknown
Mojolicious::Plugin::EPRendererunknown
Mojolicious::Plugin::HeaderConditionunknown
Mojolicious::Plugin::JSONConfigunknown
Mojolicious::Plugin::Mountunknown
Mojolicious::Plugin::PODRendererunknown
Mojolicious::Plugin::TagHelpersunknown
Mojolicious::Pluginsunknown
Mojolicious::Rendererunknown
Mojolicious::Routesunknown
Mojolicious::Routes::Matchunknown
Mojolicious::Routes::Patternunknown
Mojolicious::Routes::Routeunknown
Mojolicious::Sessionsunknown
Mojolicious::Staticunknown
Mojolicious::Typesunknown
Mojolicious::Validatorunknown
Mojolicious::Validator::Validationunknown
MojoX::Log::Report1.27
MojoX::MIME::Types2.17
Moo2.001001
Moo::_mrounknown
Moo::_stricturesunknown
Moo::_Utilsunknown
Moo::HandleMooseunknown
Moo::HandleMoose::_TypeMapunknown
Moo::HandleMoose::FakeMetaClassunknown
Moo::Objectunknown
Moo::Role2.001001
Moo::sificationunknown
Moose2.2011
Moose::Autobox0.16
Moose::Autobox::Array0.16
Moose::Autobox::Code0.16
Moose::Autobox::Defined0.16
Moose::Autobox::Hash0.16
Moose::Autobox::Indexed0.16
Moose::Autobox::Item0.16
Moose::Autobox::List0.16
Moose::Autobox::Number0.16
Moose::Autobox::Ref0.16
Moose::Autobox::Scalar0.16
Moose::Autobox::String0.16
Moose::Autobox::Undef0.16
Moose::Autobox::Value0.16
Moose::Deprecated2.2011
Moose::Exception2.2011
Moose::Exception::AccessorMustReadWrite2.2011
Moose::Exception::AddParameterizableTypeTakesParameterizableType2.2011
Moose::Exception::AddRoleTakesAMooseMetaRoleInstance2.2011
Moose::Exception::AddRoleToARoleTakesAMooseMetaRole2.2011
Moose::Exception::ApplyTakesABlessedInstance2.2011
Moose::Exception::AttachToClassNeedsAClassMOPClassInstanceOrASubclass2.2011
Moose::Exception::AttributeConflictInRoles2.2011
Moose::Exception::AttributeConflictInSummation2.2011
Moose::Exception::AttributeExtensionIsNotSupportedInRoles2.2011
Moose::Exception::AttributeIsRequired2.2011
Moose::Exception::AttributeMustBeAnClassMOPMixinAttributeCoreOrSubclass2.2011
Moose::Exception::AttributeNamesDoNotMatch2.2011
Moose::Exception::AttributeValueIsNotAnObject2.2011
Moose::Exception::AttributeValueIsNotDefined2.2011
Moose::Exception::AutoDeRefNeedsArrayRefOrHashRef2.2011
Moose::Exception::BadOptionFormat2.2011
Moose::Exception::BothBuilderAndDefaultAreNotAllowed2.2011
Moose::Exception::BuilderDoesNotExist2.2011
Moose::Exception::BuilderMethodNotSupportedForAttribute2.2011
Moose::Exception::BuilderMethodNotSupportedForInlineAttribute2.2011
Moose::Exception::BuilderMustBeAMethodName2.2011
Moose::Exception::CallingMethodOnAnImmutableInstance2.2011
Moose::Exception::CallingReadOnlyMethodOnAnImmutableInstance2.2011
Moose::Exception::CanExtendOnlyClasses2.2011
Moose::Exception::CannotAddAdditionalTypeCoercionsToUnion2.2011
Moose::Exception::CannotAddAsAnAttributeToARole2.2011
Moose::Exception::CannotApplyBaseClassRolesToRole2.2011
Moose::Exception::CannotAssignValueToReadOnlyAccessor2.2011
Moose::Exception::CannotAugmentIfLocalMethodPresent2.2011
Moose::Exception::CannotAugmentNoSuperMethod2.2011
Moose::Exception::CannotAutoDereferenceTypeConstraint2.2011
Moose::Exception::CannotAutoDerefWithoutIsa2.2011
Moose::Exception::CannotCalculateNativeType2.2011
Moose::Exception::CannotCallAnAbstractBaseMethod2.2011
Moose::Exception::CannotCallAnAbstractMethod2.2011
Moose::Exception::CannotCoerceAttributeWhichHasNoCoercion2.2011
Moose::Exception::CannotCoerceAWeakRef2.2011
Moose::Exception::CannotCreateHigherOrderTypeWithoutATypeParameter2.2011
Moose::Exception::CannotCreateMethodAliasLocalMethodIsPresent2.2011
Moose::Exception::CannotCreateMethodAliasLocalMethodIsPresentInClass2.2011
Moose::Exception::CannotDelegateLocalMethodIsPresent2.2011
Moose::Exception::CannotDelegateWithoutIsa2.2011
Moose::Exception::CannotFindDelegateMetaclass2.2011
Moose::Exception::CannotFindType2.2011
Moose::Exception::CannotFindTypeGivenToMatchOnType2.2011
Moose::Exception::CannotFixMetaclassCompatibility2.2011
Moose::Exception::CannotGenerateInlineConstraint2.2011
Moose::Exception::CannotInitializeMooseMetaRoleComposite2.2011
Moose::Exception::CannotInlineTypeConstraintCheck2.2011
Moose::Exception::CannotLocatePackageInINC2.2011
Moose::Exception::CannotMakeMetaclassCompatible2.2011
Moose::Exception::CannotOverrideALocalMethod2.2011
Moose::Exception::CannotOverrideBodyOfMetaMethods2.2011
Moose::Exception::CannotOverrideLocalMethodIsPresent2.2011
Moose::Exception::CannotOverrideNoSuperMethod2.2011
Moose::Exception::CannotRegisterUnnamedTypeConstraint2.2011
Moose::Exception::CannotUseLazyBuildAndDefaultSimultaneously2.2011
Moose::Exception::CanOnlyConsumeRole2.2011
Moose::Exception::CanOnlyWrapBlessedCode2.2011
Moose::Exception::CanReblessOnlyIntoASubclass2.2011
Moose::Exception::CanReblessOnlyIntoASuperclass2.2011
Moose::Exception::CircularReferenceInAlso2.2011
Moose::Exception::ClassDoesNotHaveInitMeta2.2011
Moose::Exception::ClassDoesTheExcludedRole2.2011
Moose::Exception::ClassNamesDoNotMatch2.2011
Moose::Exception::CloneObjectExpectsAnInstanceOfMetaclass2.2011
Moose::Exception::CodeBlockMustBeACodeRef2.2011
Moose::Exception::CoercingWithoutCoercions2.2011
Moose::Exception::CoercionAlreadyExists2.2011
Moose::Exception::CoercionNeedsTypeConstraint2.2011
Moose::Exception::ConflictDetectedInCheckRoleExclusions2.2011
Moose::Exception::ConflictDetectedInCheckRoleExclusionsInToClass2.2011
Moose::Exception::ConstructClassInstanceTakesPackageName2.2011
Moose::Exception::CouldNotCreateMethod2.2011
Moose::Exception::CouldNotCreateWriter2.2011
Moose::Exception::CouldNotEvalConstructor2.2011
Moose::Exception::CouldNotEvalDestructor2.2011
Moose::Exception::CouldNotFindTypeConstraintToCoerceFrom2.2011
Moose::Exception::CouldNotGenerateInlineAttributeMethod2.2011
Moose::Exception::CouldNotLocateTypeConstraintForUnion2.2011
Moose::Exception::CouldNotParseType2.2011
Moose::Exception::CreateMOPClassTakesArrayRefOfAttributes2.2011
Moose::Exception::CreateMOPClassTakesArrayRefOfSuperclasses2.2011
Moose::Exception::CreateMOPClassTakesHashRefOfMethods2.2011
Moose::Exception::CreateTakesArrayRefOfRoles2.2011
Moose::Exception::CreateTakesHashRefOfAttributes2.2011
Moose::Exception::CreateTakesHashRefOfMethods2.2011
Moose::Exception::DefaultToMatchOnTypeMustBeCodeRef2.2011
Moose::Exception::DelegationToAClassWhichIsNotLoaded2.2011
Moose::Exception::DelegationToARoleWhichIsNotLoaded2.2011
Moose::Exception::DelegationToATypeWhichIsNotAClass2.2011
Moose::Exception::DoesRequiresRoleName2.2011
Moose::Exception::EnumCalledWithAnArrayRefAndAdditionalArgs2.2011
Moose::Exception::EnumValuesMustBeString2.2011
Moose::Exception::ExtendsMissingArgs2.2011
Moose::Exception::HandlesMustBeAHashRef2.2011
Moose::Exception::IllegalInheritedOptions2.2011
Moose::Exception::IllegalMethodTypeToAddMethodModifier2.2011
Moose::Exception::IncompatibleMetaclassOfSuperclass2.2011
Moose::Exception::InitializeTakesUnBlessedPackageName2.2011
Moose::Exception::InitMetaRequiresClass2.2011
Moose::Exception::InstanceBlessedIntoWrongClass2.2011
Moose::Exception::InstanceMustBeABlessedReference2.2011
Moose::Exception::InvalidArgPassedToMooseUtilMetaRole2.2011
Moose::Exception::InvalidArgumentsToTraitAliases2.2011
Moose::Exception::InvalidArgumentToMethod2.2011
Moose::Exception::InvalidBaseTypeGivenToCreateParameterizedTypeConstraint2.2011
Moose::Exception::InvalidHandleValue2.2011
Moose::Exception::InvalidHasProvidedInARole2.2011
Moose::Exception::InvalidNameForType2.2011
Moose::Exception::InvalidOverloadOperator2.2011
Moose::Exception::InvalidRoleApplication2.2011
Moose::Exception::InvalidTypeConstraint2.2011
Moose::Exception::InvalidTypeGivenToCreateParameterizedTypeConstraint2.2011
Moose::Exception::InvalidValueForIs2.2011
Moose::Exception::IsaDoesNotDoTheRole2.2011
Moose::Exception::IsaLacksDoesMethod2.2011
Moose::Exception::LazyAttributeNeedsADefault2.2011
Moose::Exception::Legacy2.2011
Moose::Exception::MatchActionMustBeACodeRef2.2011
Moose::Exception::MessageParameterMustBeCodeRef2.2011
Moose::Exception::MetaclassIsAClassNotASubclassOfGivenMetaclass2.2011
Moose::Exception::MetaclassIsARoleNotASubclassOfGivenMetaclass2.2011
Moose::Exception::MetaclassIsNotASubclassOfGivenMetaclass2.2011
Moose::Exception::MetaclassMustBeASubclassOfMooseMetaClass2.2011
Moose::Exception::MetaclassMustBeASubclassOfMooseMetaRole2.2011
Moose::Exception::MetaclassMustBeDerivedFromClassMOPClass2.2011
Moose::Exception::MetaclassNotLoaded2.2011
Moose::Exception::MetaclassTypeIncompatible2.2011
Moose::Exception::MethodExpectedAMetaclassObject2.2011
Moose::Exception::MethodExpectsFewerArgs2.2011
Moose::Exception::MethodExpectsMoreArgs2.2011
Moose::Exception::MethodModifierNeedsMethodName2.2011
Moose::Exception::MethodNameConflictInRoles2.2011
Moose::Exception::MethodNameNotFoundInInheritanceHierarchy2.2011
Moose::Exception::MethodNameNotGiven2.2011
Moose::Exception::MOPAttributeNewNeedsAttributeName2.2011
Moose::Exception::MustDefineAMethodName2.2011
Moose::Exception::MustDefineAnAttributeName2.2011
Moose::Exception::MustDefineAnOverloadOperator2.2011
Moose::Exception::MustHaveAtLeastOneValueToEnumerate2.2011
Moose::Exception::MustPassAHashOfOptions2.2011
Moose::Exception::MustPassAMooseMetaRoleInstanceOrSubclass2.2011
Moose::Exception::MustPassAPackageNameOrAnExistingClassMOPPackageInstance2.2011
Moose::Exception::MustPassEvenNumberOfArguments2.2011
Moose::Exception::MustPassEvenNumberOfAttributeOptions2.2011
Moose::Exception::MustProvideANameForTheAttribute2.2011
Moose::Exception::MustSpecifyAtleastOneMethod2.2011
Moose::Exception::MustSpecifyAtleastOneRole2.2011
Moose::Exception::MustSpecifyAtleastOneRoleToApplicant2.2011
Moose::Exception::MustSupplyAClassMOPAttributeInstance2.2011
Moose::Exception::MustSupplyADelegateToMethod2.2011
Moose::Exception::MustSupplyAMetaclass2.2011
Moose::Exception::MustSupplyAMooseMetaAttributeInstance2.2011
Moose::Exception::MustSupplyAnAccessorTypeToConstructWith2.2011
Moose::Exception::MustSupplyAnAttributeToConstructWith2.2011
Moose::Exception::MustSupplyArrayRefAsCurriedArguments2.2011
Moose::Exception::MustSupplyPackageNameAndName2.2011
Moose::Exception::NeedsTypeConstraintUnionForTypeCoercionUnion2.2011
Moose::Exception::NeitherAttributeNorAttributeNameIsGiven2.2011
Moose::Exception::NeitherClassNorClassNameIsGiven2.2011
Moose::Exception::NeitherRoleNorRoleNameIsGiven2.2011
Moose::Exception::NeitherTypeNorTypeNameIsGiven2.2011
Moose::Exception::NoAttributeFoundInSuperClass2.2011
Moose::Exception::NoBodyToInitializeInAnAbstractBaseClass2.2011
Moose::Exception::NoCasesMatched2.2011
Moose::Exception::NoConstraintCheckForTypeConstraint2.2011
Moose::Exception::NoDestructorClassSpecified2.2011
Moose::Exception::NoImmutableTraitSpecifiedForClass2.2011
Moose::Exception::NoParentGivenToSubtype2.2011
Moose::Exception::OnlyInstancesCanBeCloned2.2011
Moose::Exception::OperatorIsRequired2.2011
Moose::Exception::OverloadConflictInSummation2.2011
Moose::Exception::OverloadRequiresAMetaClass2.2011
Moose::Exception::OverloadRequiresAMetaMethod2.2011
Moose::Exception::OverloadRequiresAMetaOverload2.2011
Moose::Exception::OverloadRequiresAMethodNameOrCoderef2.2011
Moose::Exception::OverloadRequiresAnOperator2.2011
Moose::Exception::OverloadRequiresNamesForCoderef2.2011
Moose::Exception::OverrideConflictInComposition2.2011
Moose::Exception::OverrideConflictInSummation2.2011
Moose::Exception::PackageDoesNotUseMooseExporter2.2011
Moose::Exception::PackageNameAndNameParamsNotGivenToWrap2.2011
Moose::Exception::PackagesAndModulesAreNotCachable2.2011
Moose::Exception::ParameterIsNotSubtypeOfParent2.2011
Moose::Exception::ReferencesAreNotAllowedAsDefault2.2011
Moose::Exception::RequiredAttributeLacksInitialization2.2011
Moose::Exception::RequiredAttributeNeedsADefault2.2011
Moose::Exception::RequiredMethodsImportedByClass2.2011
Moose::Exception::RequiredMethodsNotImplementedByClass2.2011
Moose::Exception::Role::Attribute2.2011
Moose::Exception::Role::AttributeName2.2011
Moose::Exception::Role::Class2.2011
Moose::Exception::Role::EitherAttributeOrAttributeName2.2011
Moose::Exception::Role::Instance2.2011
Moose::Exception::Role::InstanceClass2.2011
Moose::Exception::Role::InvalidAttributeOptions2.2011
Moose::Exception::Role::Method2.2011
Moose::Exception::Role::ParamsHash2.2011
Moose::Exception::Role::Role2.2011
Moose::Exception::Role::RoleForCreate2.2011
Moose::Exception::Role::RoleForCreateMOPClass2.2011
Moose::Exception::Role::TypeConstraint2.2011
Moose::Exception::RoleDoesTheExcludedRole2.2011
Moose::Exception::RoleExclusionConflict2.2011
Moose::Exception::RoleNameRequired2.2011
Moose::Exception::RoleNameRequiredForMooseMetaRole2.2011
Moose::Exception::RolesDoNotSupportAugment2.2011
Moose::Exception::RolesDoNotSupportExtends2.2011
Moose::Exception::RolesDoNotSupportInner2.2011
Moose::Exception::RolesDoNotSupportRegexReferencesForMethodModifiers2.2011
Moose::Exception::RolesInCreateTakesAnArrayRef2.2011
Moose::Exception::RolesListMustBeInstancesOfMooseMetaRole2.2011
Moose::Exception::SingleParamsToNewMustBeHashRef2.2011
Moose::Exception::TriggerMustBeACodeRef2.2011
Moose::Exception::TypeConstraintCannotBeUsedForAParameterizableType2.2011
Moose::Exception::TypeConstraintIsAlreadyCreated2.2011
Moose::Exception::TypeParameterMustBeMooseMetaType2.2011
Moose::Exception::UnableToCanonicalizeHandles2.2011
Moose::Exception::UnableToCanonicalizeNonRolePackage2.2011
Moose::Exception::UnableToRecognizeDelegateMetaclass2.2011
Moose::Exception::UndefinedHashKeysPassedToMethod2.2011
Moose::Exception::UnionCalledWithAnArrayRefAndAdditionalArgs2.2011
Moose::Exception::UnionTakesAtleastTwoTypeNames2.2011
Moose::Exception::ValidationFailedForInlineTypeConstraint2.2011
Moose::Exception::ValidationFailedForTypeConstraint2.2011
Moose::Exception::WrapTakesACodeRefToBless2.2011
Moose::Exception::WrongTypeConstraintGiven2.2011
Moose::Exporter2.2011
Moose::Meta::Attribute2.2011
Moose::Meta::Attribute::Native2.2011
Moose::Meta::Attribute::Native::Trait2.2011
Moose::Meta::Attribute::Native::Trait::Array2.2011
Moose::Meta::Attribute::Native::Trait::Bool2.2011
Moose::Meta::Attribute::Native::Trait::Code2.2011
Moose::Meta::Attribute::Native::Trait::Counter2.2011
Moose::Meta::Attribute::Native::Trait::Hash2.2011
Moose::Meta::Attribute::Native::Trait::Number2.2011
Moose::Meta::Attribute::Native::Trait::String2.2011
Moose::Meta::Class2.2011
Moose::Meta::Class::Immutable::Trait2.2011
Moose::Meta::Instance2.2011
Moose::Meta::Method2.2011
Moose::Meta::Method::Accessor2.2011
Moose::Meta::Method::Accessor::Native2.2011
Moose::Meta::Method::Accessor::Native::Array2.2011
Moose::Meta::Method::Accessor::Native::Array::accessor2.2011
Moose::Meta::Method::Accessor::Native::Array::clear2.2011
Moose::Meta::Method::Accessor::Native::Array::count2.2011
Moose::Meta::Method::Accessor::Native::Array::delete2.2011
Moose::Meta::Method::Accessor::Native::Array::elements2.2011
Moose::Meta::Method::Accessor::Native::Array::first2.2011
Moose::Meta::Method::Accessor::Native::Array::first_index2.2011
Moose::Meta::Method::Accessor::Native::Array::get2.2011
Moose::Meta::Method::Accessor::Native::Array::grep2.2011
Moose::Meta::Method::Accessor::Native::Array::insert2.2011
Moose::Meta::Method::Accessor::Native::Array::is_empty2.2011
Moose::Meta::Method::Accessor::Native::Array::join2.2011
Moose::Meta::Method::Accessor::Native::Array::map2.2011
Moose::Meta::Method::Accessor::Native::Array::natatime2.2011
Moose::Meta::Method::Accessor::Native::Array::pop2.2011
Moose::Meta::Method::Accessor::Native::Array::push2.2011
Moose::Meta::Method::Accessor::Native::Array::reduce2.2011
Moose::Meta::Method::Accessor::Native::Array::set2.2011
Moose::Meta::Method::Accessor::Native::Array::shallow_clone2.2011
Moose::Meta::Method::Accessor::Native::Array::shift2.2011
Moose::Meta::Method::Accessor::Native::Array::shuffle2.2011
Moose::Meta::Method::Accessor::Native::Array::sort2.2011
Moose::Meta::Method::Accessor::Native::Array::sort_in_place2.2011
Moose::Meta::Method::Accessor::Native::Array::splice2.2011
Moose::Meta::Method::Accessor::Native::Array::uniq2.2011
Moose::Meta::Method::Accessor::Native::Array::unshift2.2011
Moose::Meta::Method::Accessor::Native::Array::Writer2.2011
Moose::Meta::Method::Accessor::Native::Bool::not2.2011
Moose::Meta::Method::Accessor::Native::Bool::set2.2011
Moose::Meta::Method::Accessor::Native::Bool::toggle2.2011
Moose::Meta::Method::Accessor::Native::Bool::unset2.2011
Moose::Meta::Method::Accessor::Native::Code::execute2.2011
Moose::Meta::Method::Accessor::Native::Code::execute_method2.2011
Moose::Meta::Method::Accessor::Native::Collection2.2011
Moose::Meta::Method::Accessor::Native::Counter::dec2.2011
Moose::Meta::Method::Accessor::Native::Counter::inc2.2011
Moose::Meta::Method::Accessor::Native::Counter::reset2.2011
Moose::Meta::Method::Accessor::Native::Counter::set2.2011
Moose::Meta::Method::Accessor::Native::Counter::Writer2.2011
Moose::Meta::Method::Accessor::Native::Hash2.2011
Moose::Meta::Method::Accessor::Native::Hash::accessor2.2011
Moose::Meta::Method::Accessor::Native::Hash::clear2.2011
Moose::Meta::Method::Accessor::Native::Hash::count2.2011
Moose::Meta::Method::Accessor::Native::Hash::defined2.2011
Moose::Meta::Method::Accessor::Native::Hash::delete2.2011
Moose::Meta::Method::Accessor::Native::Hash::elements2.2011
Moose::Meta::Method::Accessor::Native::Hash::exists2.2011
Moose::Meta::Method::Accessor::Native::Hash::get2.2011
Moose::Meta::Method::Accessor::Native::Hash::is_empty2.2011
Moose::Meta::Method::Accessor::Native::Hash::keys2.2011
Moose::Meta::Method::Accessor::Native::Hash::kv2.2011
Moose::Meta::Method::Accessor::Native::Hash::set2.2011
Moose::Meta::Method::Accessor::Native::Hash::shallow_clone2.2011
Moose::Meta::Method::Accessor::Native::Hash::values2.2011
Moose::Meta::Method::Accessor::Native::Hash::Writer2.2011
Moose::Meta::Method::Accessor::Native::Number::abs2.2011
Moose::Meta::Method::Accessor::Native::Number::add2.2011
Moose::Meta::Method::Accessor::Native::Number::div2.2011
Moose::Meta::Method::Accessor::Native::Number::mod2.2011
Moose::Meta::Method::Accessor::Native::Number::mul2.2011
Moose::Meta::Method::Accessor::Native::Number::set2.2011
Moose::Meta::Method::Accessor::Native::Number::sub2.2011
Moose::Meta::Method::Accessor::Native::Reader2.2011
Moose::Meta::Method::Accessor::Native::String::append2.2011
Moose::Meta::Method::Accessor::Native::String::chomp2.2011
Moose::Meta::Method::Accessor::Native::String::chop2.2011
Moose::Meta::Method::Accessor::Native::String::clear2.2011
Moose::Meta::Method::Accessor::Native::String::inc2.2011
Moose::Meta::Method::Accessor::Native::String::length2.2011
Moose::Meta::Method::Accessor::Native::String::match2.2011
Moose::Meta::Method::Accessor::Native::String::prepend2.2011
Moose::Meta::Method::Accessor::Native::String::replace2.2011
Moose::Meta::Method::Accessor::Native::String::substr2.2011
Moose::Meta::Method::Accessor::Native::Writer2.2011
Moose::Meta::Method::Augmented2.2011
Moose::Meta::Method::Constructor2.2011
Moose::Meta::Method::Delegation2.2011
Moose::Meta::Method::Destructor2.2011
Moose::Meta::Method::Meta2.2011
Moose::Meta::Method::Overridden2.2011
Moose::Meta::Mixin::AttributeCore2.2011
Moose::Meta::Object::Trait2.2011
Moose::Meta::Role2.2011
Moose::Meta::Role::Application2.2011
Moose::Meta::Role::Application::RoleSummation2.2011
Moose::Meta::Role::Application::ToClass2.2011
Moose::Meta::Role::Application::ToInstance2.2011
Moose::Meta::Role::Application::ToRole2.2011
Moose::Meta::Role::Attribute2.2011
Moose::Meta::Role::Composite2.2011
Moose::Meta::Role::Method2.2011
Moose::Meta::Role::Method::Conflicting2.2011
Moose::Meta::Role::Method::Required2.2011
Moose::Meta::TypeCoercion2.2011
Moose::Meta::TypeCoercion::Union2.2011
Moose::Meta::TypeConstraint2.2011
Moose::Meta::TypeConstraint::Class2.2011
Moose::Meta::TypeConstraint::DuckType2.2011
Moose::Meta::TypeConstraint::Enum2.2011
Moose::Meta::TypeConstraint::Parameterizable2.2011
Moose::Meta::TypeConstraint::Parameterized2.2011
Moose::Meta::TypeConstraint::Registry2.2011
Moose::Meta::TypeConstraint::Role2.2011
Moose::Meta::TypeConstraint::Union2.2011
Moose::Object2.2011
Moose::Role2.2011
Moose::Util2.2011
Moose::Util::MetaRole2.2011
Moose::Util::TypeConstraints2.2011
Moose::Util::TypeConstraints::Builtins2.2011
MooseX::Adopt::Class::Accessor::Fast0.009032
MooseX::Aliases0.11
MooseX::Aliases::Meta::Trait::Attribute0.11
MooseX::Aliases::Meta::Trait::Class0.11
MooseX::Aliases::Meta::Trait::Method0.11
MooseX::Aliases::Meta::Trait::Role0.11
MooseX::Aliases::Meta::Trait::Role::ApplicationToClass0.11
MooseX::Aliases::Meta::Trait::Role::ApplicationToRole0.11
MooseX::Aliases::Meta::Trait::Role::Composite0.11
MooseX::App::Cmd0.32
MooseX::App::Cmd::Command0.32
MooseX::ArrayRef0.005
MooseX::ArrayRef::Meta::Class0.005
MooseX::ArrayRef::Meta::Instance0.005
MooseX::ClassAttribute0.29
MooseX::ClassAttribute::Meta::Role::Attribute0.29
MooseX::ClassAttribute::Trait::Application0.29
MooseX::ClassAttribute::Trait::Application::ToClass0.29
MooseX::ClassAttribute::Trait::Application::ToRole0.29
MooseX::ClassAttribute::Trait::Attribute0.29
MooseX::ClassAttribute::Trait::Class0.29
MooseX::ClassAttribute::Trait::Mixin::HasClassAttributes0.29
MooseX::ClassAttribute::Trait::Role0.29
MooseX::ClassAttribute::Trait::Role::Composite0.29
MooseX::Clone0.06
MooseX::Clone::Meta::Attribute::Trait::Clone0.06
MooseX::Clone::Meta::Attribute::Trait::Clone::Base0.06
MooseX::Clone::Meta::Attribute::Trait::Clone::Std0.06
MooseX::Clone::Meta::Attribute::Trait::Copy0.06
MooseX::Clone::Meta::Attribute::Trait::NoClone0.06
MooseX::Clone::Meta::Attribute::Trait::StorableClone0.06
MooseX::ConfigFromFile0.14
MooseX::Configuration0.02
MooseX::Configuration::Trait::Attribute0.02
MooseX::Configuration::Trait::Attribute::ConfigKey0.02
MooseX::Configuration::Trait::Object0.02
MooseX::Daemonize0.21
MooseX::Daemonize::Core0.21
MooseX::Daemonize::Pid0.21
MooseX::Daemonize::Pid::File0.21
MooseX::Daemonize::WithPidFile0.21
MooseX::Declare0.43
MooseX::Declare::Context0.43
MooseX::Declare::Context::Namespaced0.43
MooseX::Declare::Context::Parameterized0.43
MooseX::Declare::Syntax::EmptyBlockIfMissing0.43
MooseX::Declare::Syntax::Extending0.43
MooseX::Declare::Syntax::InnerSyntaxHandling0.43
MooseX::Declare::Syntax::Keyword::Class0.43
MooseX::Declare::Syntax::Keyword::Clean0.43
MooseX::Declare::Syntax::Keyword::Method0.43
MooseX::Declare::Syntax::Keyword::MethodModifier0.43
MooseX::Declare::Syntax::Keyword::Namespace0.43
MooseX::Declare::Syntax::Keyword::Role0.43
MooseX::Declare::Syntax::Keyword::With0.43
MooseX::Declare::Syntax::KeywordHandling0.43
MooseX::Declare::Syntax::MethodDeclaration0.43
MooseX::Declare::Syntax::MooseSetup0.43
MooseX::Declare::Syntax::NamespaceHandling0.43
MooseX::Declare::Syntax::OptionHandling0.43
MooseX::Declare::Syntax::RoleApplication0.43
MooseX::Declare::Util0.43
MooseX::Emulate::Class::Accessor::Fast0.009032
MooseX::Emulate::Class::Accessor::Fast::Meta::Accessorunknown
MooseX::Emulate::Class::Accessor::Fast::Meta::Role::Attributeunknown
MooseX::Getopt0.71
MooseX::Getopt::Basic0.71
MooseX::Getopt::Dashes0.71
MooseX::Getopt::GLD0.71
MooseX::Getopt::Meta::Attribute0.71
MooseX::Getopt::Meta::Attribute::NoGetopt0.71
MooseX::Getopt::Meta::Attribute::Trait0.71
MooseX::Getopt::Meta::Attribute::Trait::NoGetopt0.71
MooseX::Getopt::OptionTypeMap0.71
MooseX::Getopt::ProcessedArgv0.71
MooseX::Getopt::Strict0.71
MooseX::Getopt::Usage0.24
MooseX::Getopt::Usage::Formatter0.24
MooseX::Getopt::Usage::Pod::Text0.24
MooseX::Getopt::Usage::Role::Man0.24
MooseX::GlobRef0.0701
MooseX::GlobRef::Object0.0701
MooseX::GlobRef::Role::Meta::Instance0.0701
MooseX::GlobRef::Role::Object0.0701
MooseX::InsideOut0.106
MooseX::InsideOut::Role::Meta::Instance0.106
MooseX::Iterator0.11
MooseX::Iterator::Array0.11
MooseX::Iterator::Hash0.11
MooseX::Iterator::Meta::Iterable0.11
MooseX::Iterator::Role0.11
MooseX::LazyLogDispatch0.02
MooseX::LazyLogDispatch::Levels0.02
MooseX::LazyRequire0.11
MooseX::LazyRequire::Meta::Attribute::Trait::LazyRequire0.11
MooseX::Log::Log4perl0.47
MooseX::Log::Log4perl::Easy0.47
MooseX::LogDispatch1.2002
MooseX::LogDispatch::Levelsunknown
MooseX::MarkAsMethods0.15
MooseX::Meta::TypeConstraint::ForceCoercion0.01
MooseX::Method::Signatures0.49
MooseX::Method::Signatures::Meta::Method0.49
MooseX::Method::Signatures::Types0.49
MooseX::MethodAttributes0.31
MooseX::MethodAttributes::Inheritable0.31
MooseX::MethodAttributes::Role0.31
MooseX::MethodAttributes::Role::AttrContainer0.31
MooseX::MethodAttributes::Role::AttrContainer::Inheritable0.31
MooseX::MethodAttributes::Role::Meta::Class0.31
MooseX::MethodAttributes::Role::Meta::Map0.31
MooseX::MethodAttributes::Role::Meta::Method0.31
MooseX::MethodAttributes::Role::Meta::Method::MaybeWrapped0.31
MooseX::MethodAttributes::Role::Meta::Method::Wrapped0.31
MooseX::MethodAttributes::Role::Meta::Role0.31
MooseX::MethodAttributes::Role::Meta::Role::Application0.31
MooseX::MethodAttributes::Role::Meta::Role::Application::Summation0.31
MooseX::NonMoose0.26
MooseX::NonMoose::InsideOut0.26
MooseX::NonMoose::Meta::Role::Class0.26
MooseX::NonMoose::Meta::Role::Constructor0.26
MooseX::Object::Pluggable0.0014
MooseX::OneArgNew0.005
MooseX::Param0.02
MooseX::Params::Validate0.21
MooseX::Params::Validate::Exception::ValidationFailedForTypeConstraint0.21
MooseX::POE0.215
MooseX::POE::Aliased0.215
MooseX::POE::Meta::Method::State0.215
MooseX::POE::Meta::Role0.215
MooseX::POE::Meta::Trait0.215
MooseX::POE::Meta::Trait::Class0.215
MooseX::POE::Meta::Trait::Instance0.215
MooseX::POE::Meta::Trait::Object0.215
MooseX::POE::Meta::Trait::SweetArgs0.215
MooseX::POE::Role0.215
MooseX::POE::SweetArgs0.215
MooseX::RelatedClassRoles0.004
MooseX::Role::Cmd0.10
MooseX::Role::Cmd::Meta::Attribute::Traitunknown
MooseX::Role::Parameterised1.10
MooseX::Role::Parameterized1.10
MooseX::Role::Parameterized::Meta::Role::Parameterized1.10
MooseX::Role::Parameterized::Meta::Trait::Parameterizable1.10
MooseX::Role::Parameterized::Meta::Trait::Parameterized1.10
MooseX::Role::Parameterized::Parameters1.10
MooseX::Role::TraitConstructor0.01
MooseX::Role::WithOverloading0.17
MooseX::Role::WithOverloading::Meta::Role0.17
MooseX::Role::WithOverloading::Meta::Role::Application0.17
MooseX::Role::WithOverloading::Meta::Role::Application::Composite0.17
MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToClass0.17
MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToInstance0.17
MooseX::Role::WithOverloading::Meta::Role::Application::Composite::ToRole0.17
MooseX::Role::WithOverloading::Meta::Role::Application::FixOverloadedRefs0.17
MooseX::Role::WithOverloading::Meta::Role::Application::ToClass0.17
MooseX::Role::WithOverloading::Meta::Role::Application::ToInstance0.17
MooseX::Role::WithOverloading::Meta::Role::Application::ToRole0.17
MooseX::Role::WithOverloading::Meta::Role::Composite0.17
MooseX::SemiAffordanceAccessor0.10
MooseX::SemiAffordanceAccessor::Role::Attribute0.10
MooseX::SetOnce0.200002
MooseX::SimpleConfig0.11
MooseX::Singleton0.30
MooseX::Singleton::Role::Meta::Class0.30
MooseX::Singleton::Role::Meta::Instance0.30
MooseX::Singleton::Role::Meta::Method::Constructor0.30
MooseX::Singleton::Role::Object0.30
MooseX::Storage0.52
MooseX::Storage::Base::WithChecksum0.52
MooseX::Storage::Basic0.52
MooseX::Storage::Deferred0.52
MooseX::Storage::Engine0.52
MooseX::Storage::Engine::IO::AtomicFile0.52
MooseX::Storage::Engine::IO::File0.52
MooseX::Storage::Engine::Trait::DisableCycleDetection0.52
MooseX::Storage::Engine::Trait::OnlyWhenBuilt0.52
MooseX::Storage::Format::JSON0.52
MooseX::Storage::Format::Storable0.52
MooseX::Storage::Format::YAML0.52
MooseX::Storage::IO::AtomicFile0.52
MooseX::Storage::IO::File0.52
MooseX::Storage::IO::StorableFile0.52
MooseX::Storage::Meta::Attribute::DoNotSerialize0.52
MooseX::Storage::Meta::Attribute::Trait::DoNotSerialize0.52
MooseX::Storage::Traits::DisableCycleDetection0.52
MooseX::Storage::Traits::OnlyWhenBuilt0.52
MooseX::Storage::Util0.52
MooseX::StrictConstructor0.21
MooseX::StrictConstructor::Trait::Class0.21
MooseX::StrictConstructor::Trait::Method::Constructor0.21
MooseX::Traits0.13
MooseX::Traits::Pluggable0.12
MooseX::Traits::Util0.13
MooseX::Types0.50
MooseX::Types::Base0.50
MooseX::Types::CheckedUtilExports0.50
MooseX::Types::Combine0.50
MooseX::Types::Common0.001014
MooseX::Types::Common::Numeric0.001014
MooseX::Types::Common::String0.001014
MooseX::Types::DateTime0.13
MooseX::Types::LoadableClass0.015
MooseX::Types::Moose0.50
MooseX::Types::Path::Class0.09
MooseX::Types::Path::Tiny0.012
MooseX::Types::Perl0.101343
MooseX::Types::Set::Object0.05
MooseX::Types::Stringlike0.003
MooseX::Types::Structured0.36
MooseX::Types::TypeDecorator0.50
MooseX::Types::UndefinedType0.50
MooseX::Types::Util0.50
MooseX::Types::Wrapper0.50
MooseX::Workers0.24
MooseX::Workers::Engine0.24
MooseX::Workers::Job0.24
MooX::HandlesVia0.001008
MooX::Types::MooseLike0.29
MooX::Types::MooseLike::Base0.29
MooX::Types::MooseLike::Numeric1.03
Mozilla::CA20180117
MRO::Compat0.13
multidimensional0.014
MyApp::Schema0.001
MyApplication::Form::Userunknown
MyPersonHandler0.14
namespace::autoclean0.28
namespace::clean0.27
Net::Domain::TLD1.75
Net::EmptyPortunknown
Net::HTTP6.18
Net::HTTP::Methods6.18
Net::HTTP::NB6.18
Net::HTTPS6.18
Net::Server2.009
Net::Server::Daemonize0.06
Net::Server::Forkunknown
Net::Server::HTTPunknown
Net::Server::INETunknown
Net::Server::Log::Log::Log4perlunknown
Net::Server::Log::Sys::Syslogunknown
Net::Server::Multiplexunknown
Net::Server::MultiTypeunknown
Net::Server::PreForkunknown
Net::Server::PreForkSimpleunknown
Net::Server::Protounknown
Net::Server::Proto::SSLunknown
Net::Server::Proto::SSLEAYunknown
Net::Server::Proto::TCPunknown
Net::Server::Proto::UDPunknown
Net::Server::Proto::UNIXunknown
Net::Server::Proto::UNIXDGRAMunknown
Net::Server::PSGIunknown
Net::Server::SIG0.03
Net::Server::Singleunknown
Net::SSLeay1.85
Net::SSLeay::Handle0.61
NetAddr::IP4.079
NetAddr::IP::InetBase0.08
NetAddr::IP::Lite1.57
NetAddr::IP::Util1.53
NetAddr::IP::Util_IS1
NetAddr::IP::UtilPP1.09
ntheory0.70
Number::Compare0.03
Number::Format1.75
Number::Misc1.2
Obj1.39
Object::InsideOut4.04
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOut4.04
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOutunknown
Object::InsideOut::Exception4.04
Object::InsideOut::Metadata4.04
Object::InsideOut::Secure4.04
Object::InsideOut::Util4.04
Object::Signature1.07
Object::Signature::File1.07
ojounknown
ok1.302138
OLE::Storage_Lite::PPS0.19
oounknown
oose2.2011
Package::DeprecationManager0.17
Package::Stash0.37
Package::Stash::PP0.37
Package::Stash::XS0.28
Package::Variant1.003002
PadWalker2.3
Parallel::ForkManager1.20
Params::Util1.07
Params::Validate1.29
Params::Validate::Constants1.29
Params::Validate::PP1.29
Params::Validate::XS1.29
Params::ValidationCompiler0.27
Params::ValidationCompiler::Compiler0.27
Params::ValidationCompiler::Exceptions0.27
Parse::Method::Signatures1.003019
Parse::Method::Signatures::Paramunknown
Parse::Method::Signatures::Param::Bindableunknown
Parse::Method::Signatures::Param::Namedunknown
Parse::Method::Signatures::Param::Placeholderunknown
Parse::Method::Signatures::Param::Positionalunknown
Parse::Method::Signatures::Param::Unpackedunknown
Parse::Method::Signatures::Param::Unpacked::Arrayunknown
Parse::Method::Signatures::Param::Unpacked::Hashunknown
Parse::Method::Signatures::ParamCollectionunknown
Parse::Method::Signatures::Sigunknown
Parse::Method::Signatures::TypeConstraintunknown
Parse::Method::Signatures::Typesunknown
Parse::PMFile0.41
Parse::RecDescent1.967015
Parse::Yapp1.21
Parse::Yapp::Driver1.21
Parse::Yapp::Grammarunknown
Parse::Yapp::Lalrunknown
Parse::Yapp::Optionsunknown
Parse::Yapp::Outputunknown
Parse::Yapp::Parseunknown
Path::Class0.37
Path::Class::Dir0.37
Path::Class::Entity0.37
Path::Class::File0.37
Path::FindDevunknown
Path::FindDev::Objectunknown
Path::IsDev1.001003
Path::IsDev::Heuristic::Changelog1.001003
Path::IsDev::Heuristic::DevDirMarker1.001003
Path::IsDev::Heuristic::Makefile1.001003
Path::IsDev::Heuristic::META1.001003
Path::IsDev::Heuristic::MYMETA1.001003
Path::IsDev::Heuristic::TestDir1.001003
Path::IsDev::Heuristic::Tool::Dzil1.001003
Path::IsDev::Heuristic::Tool::MakeMaker1.001003
Path::IsDev::Heuristic::Tool::ModuleBuild1.001003
Path::IsDev::Heuristic::VCS::Git1.001003
Path::IsDev::HeuristicSet::Basic1.001003
Path::IsDev::NegativeHeuristic::HomeDir1.001003
Path::IsDev::NegativeHeuristic::IsDev::IgnoreFile1.001003
Path::IsDev::NegativeHeuristic::PerlINC1.001003
Path::IsDev::Object1.001003
Path::IsDev::Result1.001003
Path::IsDev::Role::Heuristic1.001003
Path::IsDev::Role::HeuristicSet1.001003
Path::IsDev::Role::HeuristicSet::Simple1.001003
Path::IsDev::Role::Matcher::Child::BaseName::MatchRegexp1.001003
Path::IsDev::Role::Matcher::Child::BaseName::MatchRegexp::File1.001003
Path::IsDev::Role::Matcher::Child::Exists::Any1.001003
Path::IsDev::Role::Matcher::Child::Exists::Any::Dir1.001003
Path::IsDev::Role::Matcher::Child::Exists::Any::File1.001003
Path::IsDev::Role::Matcher::FullPath::Is::Any1.001003
Path::IsDev::Role::NegativeHeuristic1.001003
Path::Tiny0.106
PDLunknown
PDL::Badunknown
PDL::Basicunknown
PDL::CallExtunknown
PDL::Charunknown
PDL::Complex2.009
PDL::Compressionunknown
PDL::Constants0.02
PDL::Core2.019
PDL::Core::Devunknown
PDL::Dbgunknown
PDL::Demos::BAD2_demounknown
PDL::Demos::BAD_demounknown
PDL::Demos::Cartography_demounknown
PDL::Demos::Generalunknown
PDL::Demos::Gnuplot_demounknown
PDL::Demos::PGPLOT_demounknown
PDL::Demos::PGPLOT_OO_demounknown
PDL::Demos::Primaunknown
PDL::Demos::Routinesunknown
PDL::Demos::Transform_demounknown
PDL::Demos::TriD1unknown
PDL::Demos::TriD2unknown
PDL::Demos::TriDGalleryunknown
PDL::Doc::Configunknown
PDL::Doc::Perldlunknown
PDL::FFTunknown
PDL::Filter::Linearunknown
PDL::Filter::LinSmoothunknown
PDL::Fit::Gaussianunknown
PDL::Fit::Linfitunknown
PDL::Fit::LMunknown
PDL::Fit::Polynomialunknown
PDL::Funcunknown
PDL::Graphics2Dunknown
PDL::Graphics::IISunknown
PDL::Graphics::Limits0.01
PDL::Graphics::LUTunknown
PDL::Graphics::PGPLOTunknown
PDL::Graphics::PGPLOT::Windowunknown
PDL::Graphics::PGPLOTOptionsunknown
PDL::Graphics::Stateunknown
PDL::GSL::DIFFunknown
PDL::GSL::INTEGunknown
PDL::GSL::INTERPunknown
PDL::GSL::MROOTunknown
PDL::GSL::RNGunknown
PDL::GSLSF::AIRYunknown
PDL::GSLSF::BESSELunknown
PDL::GSLSF::CLAUSENunknown
PDL::GSLSF::COULOMBunknown
PDL::GSLSF::COUPLINGunknown
PDL::GSLSF::DAWSONunknown
PDL::GSLSF::DEBYEunknown
PDL::GSLSF::DILOGunknown
PDL::GSLSF::ELEMENTARYunknown
PDL::GSLSF::ELLINTunknown
PDL::GSLSF::ELLJACunknown
PDL::GSLSF::ERFunknown
PDL::GSLSF::EXPunknown
PDL::GSLSF::EXPINTunknown
PDL::GSLSF::FERMI_DIRACunknown
PDL::GSLSF::GAMMAunknown
PDL::GSLSF::GEGENBAUERunknown
PDL::GSLSF::HYPERGunknown
PDL::GSLSF::LAGUERREunknown
PDL::GSLSF::LEGENDREunknown
PDL::GSLSF::LOGunknown
PDL::GSLSF::POLYunknown
PDL::GSLSF::POW_INTunknown
PDL::GSLSF::PSIunknown
PDL::GSLSF::SYNCHROTRONunknown
PDL::GSLSF::TRANSPORTunknown
PDL::GSLSF::TRIGunknown
PDL::GSLSF::ZETAunknown
PDL::Image2Dunknown
PDL::ImageNDunknown
PDL::ImageRGBunknown
PDL::Install::Files2.009
PDL::IO::Dicomunknown
PDL::IO::Dumper1.3.2
PDL::IO::FastRawunknown
PDL::IO::FITS0.92
PDL::IO::FlexRawunknown
PDL::IO::GDunknown
PDL::IO::Miscunknown
PDL::IO::Picunknown
PDL::IO::Pnmunknown
PDL::IO::Storableunknown
PDL::Liteunknown
PDL::LiteFunknown
PDL::Lvalueunknown
PDL::Mathunknown
PDL::Matrix0.5
PDL::MatrixOpsunknown
PDL::MyModunknown
PDL::NiceSlice1.001
PDL::Opsunknown
PDL::Opt::Simplexunknown
PDL::Options0.92
PDL::Perldl2::Plugin::CleanErrorsunknown
PDL::Perldl2::Plugin::NiceSliceunknown
PDL::Perldl2::Plugin::PDLCommandsunknown
PDL::Perldl2::Plugin::PrintControlunknown
PDL::Perldl2::Profile::Perldl20.008
PDL::Perldl2::Scriptunknown
PDL::PodParserunknown
PDL::PP::Codeunknown
PDL::PP::Dumpunknown
PDL::PP::PdlDimsObjunknown
PDL::PP::PdlParObjunknown
PDL::PP::Rule2.3
PDL::PP::Signatureunknown
PDL::Primitiveunknown
PDL::Reduceunknown
PDL::Slicesunknown
PDL::Transformunknown
PDL::Transform::Cartography0.6
PDL::Typesunknown
PDL::Ufuncunknown
PDL::Version2.019
Pegex0.64
Pegex::Baseunknown
Pegex::Bootstrapunknown
Pegex::Compilerunknown
Pegex::Grammarunknown
Pegex::Grammar::Atomsunknown
Pegex::Inputunknown
Pegex::Moduleunknown
Pegex::Optimizerunknown
Pegex::Parserunknown
Pegex::Parser::Indentunknown
Pegex::Pegex::ASTunknown
Pegex::Pegex::Grammarunknown
Pegex::Receiverunknown
Pegex::Regexunknown
Pegex::Treeunknown
Pegex::Tree::Wrapunknown
Perl6::Export0.07
Perl6::Form0.06
Perl::Critic1.132
Perl::Critic::Annotation1.132
Perl::Critic::Command1.132
Perl::Critic::Config1.132
Perl::Critic::Document1.132
Perl::Critic::Exception1.132
Perl::Critic::Exception::AggregateConfiguration1.132
Perl::Critic::Exception::Configuration1.132
Perl::Critic::Exception::Configuration::Generic1.132
Perl::Critic::Exception::Configuration::NonExistentPolicy1.132
Perl::Critic::Exception::Configuration::Option1.132
Perl::Critic::Exception::Configuration::Option::Global1.132
Perl::Critic::Exception::Configuration::Option::Global::ExtraParameter1.132
Perl::Critic::Exception::Configuration::Option::Global::ParameterValue1.132
Perl::Critic::Exception::Configuration::Option::Policy1.132
Perl::Critic::Exception::Configuration::Option::Policy::ExtraParameter1.132
Perl::Critic::Exception::Configuration::Option::Policy::ParameterValue1.132
Perl::Critic::Exception::Fatal1.132
Perl::Critic::Exception::Fatal::Generic1.132
Perl::Critic::Exception::Fatal::Internal1.132
Perl::Critic::Exception::Fatal::PolicyDefinition1.132
Perl::Critic::Exception::IO1.132
Perl::Critic::Exception::Parse1.132
Perl::Critic::OptionsProcessor1.132
Perl::Critic::Policy1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitBooleanGrep1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitComplexMappings1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitLvalueSubstr1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitReverseSortBlock1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitSleepViaSelect1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitStringyEval1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitStringySplit1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalCan1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitUniversalIsa1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitUselessTopic1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidGrep1.132
Perl::Critic::Policy::BuiltinFunctions::ProhibitVoidMap1.132
Perl::Critic::Policy::BuiltinFunctions::RequireBlockGrep1.132
Perl::Critic::Policy::BuiltinFunctions::RequireBlockMap1.132
Perl::Critic::Policy::BuiltinFunctions::RequireGlobFunction1.132
Perl::Critic::Policy::BuiltinFunctions::RequireSimpleSortBlock1.132
Perl::Critic::Policy::ClassHierarchies::ProhibitAutoloading1.132
Perl::Critic::Policy::ClassHierarchies::ProhibitExplicitISA1.132
Perl::Critic::Policy::ClassHierarchies::ProhibitOneArgBless1.132
Perl::Critic::Policy::CodeLayout::ProhibitHardTabs1.132
Perl::Critic::Policy::CodeLayout::ProhibitParensWithBuiltins1.132
Perl::Critic::Policy::CodeLayout::ProhibitQuotedWordLists1.132
Perl::Critic::Policy::CodeLayout::ProhibitTrailingWhitespace1.132
Perl::Critic::Policy::CodeLayout::RequireConsistentNewlines1.132
Perl::Critic::Policy::CodeLayout::RequireTidyCode1.132
Perl::Critic::Policy::CodeLayout::RequireTrailingCommas1.132
Perl::Critic::Policy::ControlStructures::ProhibitCascadingIfElse1.132
Perl::Critic::Policy::ControlStructures::ProhibitCStyleForLoops1.132
Perl::Critic::Policy::ControlStructures::ProhibitDeepNests1.132
Perl::Critic::Policy::ControlStructures::ProhibitLabelsWithSpecialBlockNames1.132
Perl::Critic::Policy::ControlStructures::ProhibitMutatingListFunctions1.132
Perl::Critic::Policy::ControlStructures::ProhibitNegativeExpressionsInUnlessAndUntilConditions1.132
Perl::Critic::Policy::ControlStructures::ProhibitPostfixControls1.132
Perl::Critic::Policy::ControlStructures::ProhibitUnlessBlocks1.132
Perl::Critic::Policy::ControlStructures::ProhibitUnreachableCode1.132
Perl::Critic::Policy::ControlStructures::ProhibitUntilBlocks1.132
Perl::Critic::Policy::ControlStructures::ProhibitYadaOperator1.132
Perl::Critic::Policy::Documentation::PodSpelling1.132
Perl::Critic::Policy::Documentation::RequirePackageMatchesPodName1.132
Perl::Critic::Policy::Documentation::RequirePodAtEnd1.132
Perl::Critic::Policy::Documentation::RequirePodLinksIncludeText1.132
Perl::Critic::Policy::Documentation::RequirePodSections1.132
Perl::Critic::Policy::ErrorHandling::RequireCarping1.132
Perl::Critic::Policy::ErrorHandling::RequireCheckingReturnValueOfEval1.132
Perl::Critic::Policy::InputOutput::ProhibitBacktickOperators1.132
Perl::Critic::Policy::InputOutput::ProhibitBarewordFileHandles1.132
Perl::Critic::Policy::InputOutput::ProhibitExplicitStdin1.132
Perl::Critic::Policy::InputOutput::ProhibitInteractiveTest1.132
Perl::Critic::Policy::InputOutput::ProhibitJoinedReadline1.132
Perl::Critic::Policy::InputOutput::ProhibitOneArgSelect1.132
Perl::Critic::Policy::InputOutput::ProhibitReadlineInForLoop1.132
Perl::Critic::Policy::InputOutput::ProhibitTwoArgOpen1.132
Perl::Critic::Policy::InputOutput::RequireBracedFileHandleWithPrint1.132
Perl::Critic::Policy::InputOutput::RequireBriefOpen1.132
Perl::Critic::Policy::InputOutput::RequireCheckedClose1.132
Perl::Critic::Policy::InputOutput::RequireCheckedOpen1.132
Perl::Critic::Policy::InputOutput::RequireCheckedSyscalls1.132
Perl::Critic::Policy::InputOutput::RequireEncodingWithUTF8Layer1.132
Perl::Critic::Policy::Miscellanea::ProhibitFormats1.132
Perl::Critic::Policy::Miscellanea::ProhibitTies1.132
Perl::Critic::Policy::Miscellanea::ProhibitUnrestrictedNoCritic1.132
Perl::Critic::Policy::Miscellanea::ProhibitUselessNoCritic1.132
Perl::Critic::Policy::Modules::ProhibitAutomaticExportation1.132
Perl::Critic::Policy::Modules::ProhibitConditionalUseStatements1.132
Perl::Critic::Policy::Modules::ProhibitEvilModules1.132
Perl::Critic::Policy::Modules::ProhibitExcessMainComplexity1.132
Perl::Critic::Policy::Modules::ProhibitMultiplePackages1.132
Perl::Critic::Policy::Modules::RequireBarewordIncludes1.132
Perl::Critic::Policy::Modules::RequireEndWithOne1.132
Perl::Critic::Policy::Modules::RequireExplicitPackage1.132
Perl::Critic::Policy::Modules::RequireFilenameMatchesPackage1.132
Perl::Critic::Policy::Modules::RequireNoMatchVarsWithUseEnglish1.132
Perl::Critic::Policy::Modules::RequireVersionVar1.132
Perl::Critic::Policy::NamingConventions::Capitalization1.132
Perl::Critic::Policy::NamingConventions::ProhibitAmbiguousNames1.132
Perl::Critic::Policy::Objects::ProhibitIndirectSyntax1.132
Perl::Critic::Policy::References::ProhibitDoubleSigils1.132
Perl::Critic::Policy::RegularExpressions::ProhibitCaptureWithoutTest1.132
Perl::Critic::Policy::RegularExpressions::ProhibitComplexRegexes1.132
Perl::Critic::Policy::RegularExpressions::ProhibitEnumeratedClasses1.132
Perl::Critic::Policy::RegularExpressions::ProhibitEscapedMetacharacters1.132
Perl::Critic::Policy::RegularExpressions::ProhibitFixedStringMatches1.132
Perl::Critic::Policy::RegularExpressions::ProhibitSingleCharAlternation1.132
Perl::Critic::Policy::RegularExpressions::ProhibitUnusedCapture1.132
Perl::Critic::Policy::RegularExpressions::ProhibitUnusualDelimiters1.132
Perl::Critic::Policy::RegularExpressions::ProhibitUselessTopic1.132
Perl::Critic::Policy::RegularExpressions::RequireBracesForMultiline1.132
Perl::Critic::Policy::RegularExpressions::RequireDotMatchAnything1.132
Perl::Critic::Policy::RegularExpressions::RequireExtendedFormatting1.132
Perl::Critic::Policy::RegularExpressions::RequireLineBoundaryMatching1.132
Perl::Critic::Policy::Subroutines::ProhibitAmpersandSigils1.132
Perl::Critic::Policy::Subroutines::ProhibitBuiltinHomonyms1.132
Perl::Critic::Policy::Subroutines::ProhibitExcessComplexity1.132
Perl::Critic::Policy::Subroutines::ProhibitExplicitReturnUndef1.132
Perl::Critic::Policy::Subroutines::ProhibitManyArgs1.132
Perl::Critic::Policy::Subroutines::ProhibitNestedSubs1.132
Perl::Critic::Policy::Subroutines::ProhibitReturnSort1.132
Perl::Critic::Policy::Subroutines::ProhibitSubroutinePrototypes1.132
Perl::Critic::Policy::Subroutines::ProhibitUnusedPrivateSubroutines1.132
Perl::Critic::Policy::Subroutines::ProtectPrivateSubs1.132
Perl::Critic::Policy::Subroutines::RequireArgUnpacking1.132
Perl::Critic::Policy::Subroutines::RequireFinalReturn1.132
Perl::Critic::Policy::TestingAndDebugging::ProhibitNoStrict1.132
Perl::Critic::Policy::TestingAndDebugging::ProhibitNoWarnings1.132
Perl::Critic::Policy::TestingAndDebugging::ProhibitProlongedStrictureOverride1.132
Perl::Critic::Policy::TestingAndDebugging::RequireTestLabels1.132
Perl::Critic::Policy::TestingAndDebugging::RequireUseStrict1.132
Perl::Critic::Policy::TestingAndDebugging::RequireUseWarnings1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitCommaSeparatedStatements1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitComplexVersion1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitConstantPragma1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitEmptyQuotes1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitEscapedCharacters1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitImplicitNewlines1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitInterpolationOfLiterals1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitLeadingZeros1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitLongChainsOfMethodCalls1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitMagicNumbers1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitMismatchedOperators1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitMixedBooleanOperators1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitNoisyQuotes1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitQuotesAsQuotelikeOperatorDelimiters1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitSpecialLiteralHeredocTerminator1.132
Perl::Critic::Policy::ValuesAndExpressions::ProhibitVersionStrings1.132
Perl::Critic::Policy::ValuesAndExpressions::RequireConstantVersion1.132
Perl::Critic::Policy::ValuesAndExpressions::RequireInterpolationOfMetachars1.132
Perl::Critic::Policy::ValuesAndExpressions::RequireNumberSeparators1.132
Perl::Critic::Policy::ValuesAndExpressions::RequireQuotedHeredocTerminator1.132
Perl::Critic::Policy::ValuesAndExpressions::RequireUpperCaseHeredocTerminator1.132
Perl::Critic::Policy::Variables::ProhibitAugmentedAssignmentInDeclaration1.132
Perl::Critic::Policy::Variables::ProhibitConditionalDeclarations1.132
Perl::Critic::Policy::Variables::ProhibitEvilVariables1.132
Perl::Critic::Policy::Variables::ProhibitLocalVars1.132
Perl::Critic::Policy::Variables::ProhibitMatchVars1.132
Perl::Critic::Policy::Variables::ProhibitPackageVars1.132
Perl::Critic::Policy::Variables::ProhibitPerl4PackageNames1.132
Perl::Critic::Policy::Variables::ProhibitPunctuationVars1.132
Perl::Critic::Policy::Variables::ProhibitReusedNames1.132
Perl::Critic::Policy::Variables::ProhibitUnusedVariables1.132
Perl::Critic::Policy::Variables::ProtectPrivateVars1.132
Perl::Critic::Policy::Variables::RequireInitializationForLocalVars1.132
Perl::Critic::Policy::Variables::RequireLexicalLoopIterators1.132
Perl::Critic::Policy::Variables::RequireLocalizedPunctuationVars1.132
Perl::Critic::Policy::Variables::RequireNegativeIndices1.132
Perl::Critic::PolicyConfig1.132
Perl::Critic::PolicyFactory1.132
Perl::Critic::PolicyListing1.132
Perl::Critic::PolicyParameter1.132
Perl::Critic::PolicyParameter::Behavior1.132
Perl::Critic::PolicyParameter::Behavior::Boolean1.132
Perl::Critic::PolicyParameter::Behavior::Enumeration1.132
Perl::Critic::PolicyParameter::Behavior::Integer1.132
Perl::Critic::PolicyParameter::Behavior::String1.132
Perl::Critic::PolicyParameter::Behavior::StringList1.132
Perl::Critic::ProfilePrototype1.132
Perl::Critic::Statistics1.132
Perl::Critic::TestUtils1.132
Perl::Critic::Theme1.132
Perl::Critic::ThemeListing1.132
Perl::Critic::UserProfile1.132
Perl::Critic::Utils1.132
Perl::Critic::Utils::Constants1.132
Perl::Critic::Utils::DataConversion1.132
Perl::Critic::Utils::McCabe1.132
Perl::Critic::Utils::Perl1.132
Perl::Critic::Utils::POD1.132
Perl::Critic::Utils::POD::ParseInteriorSequence1.132
Perl::Critic::Utils::PPI1.132
Perl::Critic::Violation1.132
Perl::PrereqScanner1.023
Perl::PrereqScanner::Scanner1.023
Perl::PrereqScanner::Scanner::Aliased1.023
Perl::PrereqScanner::Scanner::Moose1.023
Perl::PrereqScanner::Scanner::Perl51.023
Perl::PrereqScanner::Scanner::POE1.023
Perl::PrereqScanner::Scanner::Superclass1.023
Perl::PrereqScanner::Scanner::TestMore1.023
Perl::Tidyunknown
Perl::Unsafe::Signals0.03
Perl::Version1.013
PerlIO::gzip0.20
PerlIO::Layers0.011
PerlIO::utf8_strict0.007
PerlIO::via::Timeout0.32
Pinto0.14
Pinto::Action0.14
Pinto::Action::Add0.14
Pinto::Action::Clean0.14
Pinto::Action::Copy0.14
Pinto::Action::Default0.14
Pinto::Action::Delete0.14
Pinto::Action::Diff0.14
Pinto::Action::Install0.14
Pinto::Action::Kill0.14
Pinto::Action::List0.14
Pinto::Action::Lock0.14
Pinto::Action::Log0.14
Pinto::Action::Look0.14
Pinto::Action::Merge0.14
Pinto::Action::New0.14
Pinto::Action::Nop0.14
Pinto::Action::Pin0.14
Pinto::Action::Props0.14
Pinto::Action::Pull0.14
Pinto::Action::Register0.14
Pinto::Action::Rename0.14
Pinto::Action::Reset0.14
Pinto::Action::Revert0.14
Pinto::Action::Roots0.14
Pinto::Action::Stacks0.14
Pinto::Action::Statistics0.14
Pinto::Action::Unlock0.14
Pinto::Action::Unpin0.14
Pinto::Action::Unregister0.14
Pinto::Action::Update0.14
Pinto::Action::Verify0.14
Pinto::ArchiveUnpacker0.14
Pinto::Chrome0.14
Pinto::Chrome::Net0.14
Pinto::Chrome::Term0.14
Pinto::Config0.14
Pinto::Constants0.14
Pinto::Database0.14
Pinto::Difference0.14
Pinto::DifferenceEntry0.14
Pinto::Editor0.14
Pinto::Editor::Clip0.14
Pinto::Editor::Edit0.14
Pinto::Exception0.14
Pinto::Globals0.14
Pinto::IndexReader0.14
Pinto::IndexWriter0.14
Pinto::Initializer0.14
Pinto::Locator0.14
Pinto::Locator::Mirror0.14
Pinto::Locator::Multiplex0.14
Pinto::Locator::Stratopan0.14
Pinto::Locker0.14
Pinto::Migrator0.14
Pinto::ModlistWriter0.14
Pinto::PackageExtractor0.14
Pinto::PrerequisiteWalker0.14
Pinto::Remote0.14
Pinto::Remote::Action0.14
Pinto::Remote::Action::Add0.14
Pinto::Remote::Action::Install0.14
Pinto::Remote::Result0.14
Pinto::Repository0.14
Pinto::Result0.14
Pinto::RevisionWalker0.14
Pinto::Role::Committable0.14
Pinto::Role::Installer0.14
Pinto::Role::PauseConfig0.14
Pinto::Role::Plated0.14
Pinto::Role::Puller0.14
Pinto::Role::Schema::Result0.14
Pinto::Role::Transactional0.14
Pinto::Role::UserAgent0.14
Pinto::Schema0.14
Pinto::Schema::Result::Ancestry0.14
Pinto::Schema::Result::Distribution0.14
Pinto::Schema::Result::Package0.14
Pinto::Schema::Result::Prerequisite0.14
Pinto::Schema::Result::Registration0.14
Pinto::Schema::Result::Revision0.14
Pinto::Schema::Result::Stack0.14
Pinto::Schema::ResultSet::Distribution0.14
Pinto::Schema::ResultSet::Package0.14
Pinto::Schema::ResultSet::Registration0.14
Pinto::Server0.14
Pinto::Server::Responder0.14
Pinto::Server::Responder::Action0.14
Pinto::Server::Responder::File0.14
Pinto::Server::Router0.14
Pinto::Shell0.14
Pinto::Statistics0.14
Pinto::Store0.14
Pinto::Target0.14
Pinto::Target::Distribution0.14
Pinto::Target::Package0.14
Pinto::Types0.14
Pinto::Util0.14
Plack1.0047
Plack::App::Cascadeunknown
Plack::App::CGIBinunknown
Plack::App::Directoryunknown
Plack::App::Fileunknown
Plack::App::PSGIBinunknown
Plack::App::URLMapunknown
Plack::App::WrapCGIunknown
Plack::Builderunknown
Plack::Componentunknown
Plack::Handlerunknown
Plack::Handler::Apache1unknown
Plack::Handler::Apache2unknown
Plack::Handler::Apache2::Registryunknown
Plack::Handler::CGIunknown
Plack::Handler::FCGIunknown
Plack::Handler::HTTP::Server::PSGIunknown
Plack::Handler::HTTP::Server::Simple0.16
Plack::Handler::Standaloneunknown
Plack::Handler::Starmanunknown
Plack::HTTPParserunknown
Plack::HTTPParser::PPunknown
Plack::Loaderunknown
Plack::Loader::Delayedunknown
Plack::Loader::Restarterunknown
Plack::Loader::Shotgununknown
Plack::LWPishunknown
Plack::Middlewareunknown
Plack::Middleware::AccessLogunknown
Plack::Middleware::AccessLog::Timedunknown
Plack::Middleware::Auth::Basicunknown
Plack::Middleware::BufferedStreamingunknown
Plack::Middleware::Chunkedunknown
Plack::Middleware::Conditionalunknown
Plack::Middleware::ConditionalGETunknown
Plack::Middleware::ContentLengthunknown
Plack::Middleware::ContentMD5unknown
Plack::Middleware::ErrorDocumentunknown
Plack::Middleware::FixMissingBodyInRedirect0.12
Plack::Middleware::Headunknown
Plack::Middleware::HTTPExceptionsunknown
Plack::Middleware::IIS6ScriptNameFixunknown
Plack::Middleware::IIS7KeepAliveFixunknown
Plack::Middleware::JSONPunknown
Plack::Middleware::LighttpdScriptNameFixunknown
Plack::Middleware::Lintunknown
Plack::Middleware::Log4perlunknown
Plack::Middleware::LogDispatchunknown
Plack::Middleware::MethodOverride0.20
Plack::Middleware::NullLoggerunknown
Plack::Middleware::RearrangeHeadersunknown
Plack::Middleware::Recursiveunknown
Plack::Middleware::Refreshunknown
Plack::Middleware::RemoveRedundantBody0.06
Plack::Middleware::ReverseProxy0.15
Plack::Middleware::Runtimeunknown
Plack::Middleware::SimpleContentFilterunknown
Plack::Middleware::SimpleLoggerunknown
Plack::Middleware::StackTraceunknown
Plack::Middleware::Staticunknown
Plack::Middleware::XFrameworkunknown
Plack::Middleware::XSendfileunknown
Plack::MIMEunknown
Plack::Request1.0047
Plack::Request::Uploadunknown
Plack::Response1.0047
Plack::Runnerunknown
Plack::TempBufferunknown
Plack::Testunknown
Plack::Test::ExternalServer0.02
Plack::Test::MockHTTPunknown
Plack::Test::Serverunknown
Plack::Test::Suiteunknown
Plack::Utilunknown
Plack::Util::Accessorunknown
Pod::Coverage0.23
Pod::Coverage::CountParentsunknown
Pod::Coverage::ExportOnlyunknown
Pod::Coverage::Moose0.07
Pod::Coverage::Overloaderunknown
Pod::Coverage::TrustPod0.100005
Pod::Elemental0.103004
Pod::Elemental::Autoblank0.103004
Pod::Elemental::Autochomp0.103004
Pod::Elemental::Command0.103004
Pod::Elemental::Document0.103004
Pod::Elemental::Element::Generic::Blank0.103004
Pod::Elemental::Element::Generic::Command0.103004
Pod::Elemental::Element::Generic::Nonpod0.103004
Pod::Elemental::Element::Generic::Text0.103004
Pod::Elemental::Element::Nested0.103004
Pod::Elemental::Element::Pod5::Command0.103004
Pod::Elemental::Element::Pod5::Data0.103004
Pod::Elemental::Element::Pod5::Nonpod0.103004
Pod::Elemental::Element::Pod5::Ordinary0.103004
Pod::Elemental::Element::Pod5::Region0.103004
Pod::Elemental::Element::Pod5::Verbatim0.103004
Pod::Elemental::Flat0.103004
Pod::Elemental::Node0.103004
Pod::Elemental::Objectifier0.103004
Pod::Elemental::Paragraph0.103004
Pod::Elemental::Selectors0.103004
Pod::Elemental::Transformer0.103004
Pod::Elemental::Transformer::Gatherer0.103004
Pod::Elemental::Transformer::Nester0.103004
Pod::Elemental::Transformer::Pod50.103004
Pod::Elemental::Types0.103004
Pod::Eventual0.094001
Pod::Eventual::Simple0.094001
Pod::Markdown3.005
Pod::Perldoc::ToMarkdown3.005
Pod::Readmeunknown
Pod::Readme::Filterunknown
Pod::Readme::Pluginunknown
Pod::Readme::Plugin::changesunknown
Pod::Readme::Plugin::requiresunknown
Pod::Readme::Plugin::versionunknown
Pod::Readme::Typesunknown
Pod::Spell1.20
Pod::Wordlist1.20
POE1.367
POE::Component1.367
POE::Component::Client::TCP1.367
POE::Component::Server::TCP1.367
POE::Driver1.367
POE::Driver::SysRW1.367
POE::Filter1.367
POE::Filter::Block1.367
POE::Filter::Grep1.367
POE::Filter::HTTPD1.367
POE::Filter::Line1.367
POE::Filter::Map1.367
POE::Filter::RecordBlock1.367
POE::Filter::Reference1.367
POE::Filter::Stackable1.367
POE::Filter::Stream1.367
POE::Kernel1.367
POE::Loop1.367
POE::Loop::IO_Poll1.367
POE::Loop::PerlSignals1.367
POE::Loop::Select1.367
POE::NFA1.367
POE::Pipe1.367
POE::Pipe::OneWay1.367
POE::Pipe::TwoWay1.367
POE::Queue1.367
POE::Queue::Array1.367
POE::Resource1.367
POE::Resource::Aliases1.367
POE::Resource::Clock1.367
POE::Resource::Events1.367
POE::Resource::Extrefs1.367
POE::Resource::FileHandles1.367
POE::Resource::Sessions1.367
POE::Resource::SIDs1.367
POE::Resource::Signals1.367
POE::Resources1.367
POE::Session1.367
POE::Test::DondeEstan1.360
POE::Test::Loops1.360
POE::Test::Sequenceunknown
POE::Wheel1.367
POE::Wheel::Curses1.367
POE::Wheel::FollowTail1.367
POE::Wheel::ListenAccept1.367
POE::Wheel::ReadLine1.367
POE::Wheel::ReadWrite1.367
POE::Wheel::Run1.367
POE::Wheel::SocketFactory1.367
POSIX::strftime::Compiler0.42
PostScript::Document0.06
PostScript::Elementsunknown
PostScript::Metrics0.06
PostScript::TextBlock0.06
PPI1.236
PPI::Cache1.236
PPI::Document1.236
PPI::Document::File1.236
PPI::Document::Fragment1.236
PPI::Document::Normalized1.236
PPI::Dumper1.236
PPI::Element1.236
PPI::Exception1.236
PPI::Exception::ParserRejection1.236
PPI::Find1.236
PPI::HTML1.08
PPI::HTML::Fragment1.08
PPI::Lexer1.236
PPI::Node1.236
PPI::Normal1.236
PPI::Normal::Standard1.236
PPI::Statement1.236
PPI::Statement::Break1.236
PPI::Statement::Compound1.236
PPI::Statement::Data1.236
PPI::Statement::End1.236
PPI::Statement::Expression1.236
PPI::Statement::Given1.236
PPI::Statement::Include1.236
PPI::Statement::Include::Perl61.236
PPI::Statement::Null1.236
PPI::Statement::Package1.236
PPI::Statement::Scheduled1.236
PPI::Statement::Sub1.236
PPI::Statement::Unknown1.236
PPI::Statement::UnmatchedBrace1.236
PPI::Statement::Variable1.236
PPI::Statement::When1.236
PPI::Structure1.236
PPI::Structure::Block1.236
PPI::Structure::Condition1.236
PPI::Structure::Constructor1.236
PPI::Structure::For1.236
PPI::Structure::Given1.236
PPI::Structure::List1.236
PPI::Structure::Subscript1.236
PPI::Structure::Unknown1.236
PPI::Structure::When1.236
PPI::Token1.236
PPI::Token::_QuoteEngine1.236
PPI::Token::_QuoteEngine::Full1.236
PPI::Token::_QuoteEngine::Simple1.236
PPI::Token::ArrayIndex1.236
PPI::Token::Attribute1.236
PPI::Token::BOM1.236
PPI::Token::Cast1.236
PPI::Token::Comment1.236
PPI::Token::DashedWord1.236
PPI::Token::Data1.236
PPI::Token::End1.236
PPI::Token::HereDoc1.236
PPI::Token::Label1.236
PPI::Token::Magic1.236
PPI::Token::Number1.236
PPI::Token::Number::Binary1.236
PPI::Token::Number::Exp1.236
PPI::Token::Number::Float1.236
PPI::Token::Number::Hex1.236
PPI::Token::Number::Octal1.236
PPI::Token::Number::Version1.236
PPI::Token::Operator1.236
PPI::Token::Pod1.236
PPI::Token::Prototype1.236
PPI::Token::Quote1.236
PPI::Token::Quote::Double1.236
PPI::Token::Quote::Interpolate1.236
PPI::Token::Quote::Literal1.236
PPI::Token::Quote::Single1.236
PPI::Token::QuoteLike1.236
PPI::Token::QuoteLike::Backtick1.236
PPI::Token::QuoteLike::Command1.236
PPI::Token::QuoteLike::Readline1.236
PPI::Token::QuoteLike::Regexp1.236
PPI::Token::QuoteLike::Words1.236
PPI::Token::Regexp1.236
PPI::Token::Regexp::Match1.236
PPI::Token::Regexp::Substitute1.236
PPI::Token::Regexp::Transliterate1.236
PPI::Token::Separator1.236
PPI::Token::Structure1.236
PPI::Token::Symbol1.236
PPI::Token::Unknown1.236
PPI::Token::Whitespace1.236
PPI::Token::Word1.236
PPI::Tokenizer1.236
PPI::Transform1.236
PPI::Transform::UpdateCopyright1.236
PPI::Util1.236
PPI::XSAccessor1.236
PPIx::QuoteLike0.006
PPIx::QuoteLike::Constant0.006
PPIx::QuoteLike::Dumper0.006
PPIx::QuoteLike::Token0.006
PPIx::QuoteLike::Token::Control0.006
PPIx::QuoteLike::Token::Delimiter0.006
PPIx::QuoteLike::Token::Interpolation0.006
PPIx::QuoteLike::Token::String0.006
PPIx::QuoteLike::Token::Structure0.006
PPIx::QuoteLike::Token::Unknown0.006
PPIx::QuoteLike::Token::Whitespace0.006
PPIx::QuoteLike::Utils0.006
PPIx::Regexp0.061
PPIx::Regexp::Constant0.061
PPIx::Regexp::Dumper0.061
PPIx::Regexp::Element0.061
PPIx::Regexp::Lexer0.061
PPIx::Regexp::Node0.061
PPIx::Regexp::Node::Range0.061
PPIx::Regexp::Node::Unknown0.061
PPIx::Regexp::StringTokenizer0.061
PPIx::Regexp::Structure0.061
PPIx::Regexp::Structure::Assertion0.061
PPIx::Regexp::Structure::BranchReset0.061
PPIx::Regexp::Structure::Capture0.061
PPIx::Regexp::Structure::CharClass0.061
PPIx::Regexp::Structure::Code0.061
PPIx::Regexp::Structure::Main0.061
PPIx::Regexp::Structure::Modifier0.061
PPIx::Regexp::Structure::NamedCapture0.061
PPIx::Regexp::Structure::Quantifier0.061
PPIx::Regexp::Structure::Regexp0.061
PPIx::Regexp::Structure::RegexSet0.061
PPIx::Regexp::Structure::Replacement0.061
PPIx::Regexp::Structure::Subexpression0.061
PPIx::Regexp::Structure::Switch0.061
PPIx::Regexp::Structure::Unknown0.061
PPIx::Regexp::Support0.061
PPIx::Regexp::Token0.061
PPIx::Regexp::Token::Assertion0.061
PPIx::Regexp::Token::Backreference0.061
PPIx::Regexp::Token::Backtrack0.061
PPIx::Regexp::Token::CharClass0.061
PPIx::Regexp::Token::CharClass::POSIX0.061
PPIx::Regexp::Token::CharClass::POSIX::Unknown0.061
PPIx::Regexp::Token::CharClass::Simple0.061
PPIx::Regexp::Token::Code0.061
PPIx::Regexp::Token::Comment0.061
PPIx::Regexp::Token::Condition0.061
PPIx::Regexp::Token::Control0.061
PPIx::Regexp::Token::Delimiter0.061
PPIx::Regexp::Token::Greediness0.061
PPIx::Regexp::Token::GroupType0.061
PPIx::Regexp::Token::GroupType::Assertion0.061
PPIx::Regexp::Token::GroupType::BranchReset0.061
PPIx::Regexp::Token::GroupType::Code0.061
PPIx::Regexp::Token::GroupType::Modifier0.061
PPIx::Regexp::Token::GroupType::NamedCapture0.061
PPIx::Regexp::Token::GroupType::Subexpression0.061
PPIx::Regexp::Token::GroupType::Switch0.061
PPIx::Regexp::Token::Interpolation0.061
PPIx::Regexp::Token::Literal0.061
PPIx::Regexp::Token::Modifier0.061
PPIx::Regexp::Token::NoOp0.061
PPIx::Regexp::Token::Operator0.061
PPIx::Regexp::Token::Quantifier0.061
PPIx::Regexp::Token::Recursion0.061
PPIx::Regexp::Token::Reference0.061
PPIx::Regexp::Token::Structure0.061
PPIx::Regexp::Token::Unknown0.061
PPIx::Regexp::Token::Unmatched0.061
PPIx::Regexp::Token::Whitespace0.061
PPIx::Regexp::Tokenizer0.061
PPIx::Regexp::Util0.061
PPIx::Utilities1.001000
PPIx::Utilities::Exception::Bug1.001000
PPIx::Utilities::Node1.001000
PPIx::Utilities::Statement1.001000
Proc::Fork0.804
Proc::Terminator::Ctx0.05
RDF::Trine1.019
RDF::Trine::Error1.019
RDF::Trine::Exporter::CSV1.019
RDF::Trine::Exporter::RDFPatch1.019
RDF::Trine::Graph1.019
RDF::Trine::Iterator1.019
RDF::Trine::Iterator::Bindings1.019
RDF::Trine::Iterator::Bindings::Materialized1.019
RDF::Trine::Iterator::Boolean1.019
RDF::Trine::Iterator::Graph1.019
RDF::Trine::Iterator::Graph::Materialized1.019
RDF::Trine::Iterator::JSONHandler1.019
RDF::Trine::Iterator::SAXHandler1.019
RDF::Trine::Model1.019
RDF::Trine::Model::Dataset1.019
RDF::Trine::Model::StatementFilter1.019
RDF::Trine::Model::Union1.019
RDF::Trine::Namespace1.019
RDF::Trine::NamespaceMap1.019
RDF::Trine::Node1.019
RDF::Trine::Node::Blank1.019
RDF::Trine::Node::Literal1.019
RDF::Trine::Node::Nil1.019
RDF::Trine::Node::Resource1.019
RDF::Trine::Node::Variable1.019
RDF::Trine::Parser1.019
RDF::Trine::Parser::LineProtocol1.019
RDF::Trine::Parser::NQuads1.019
RDF::Trine::Parser::NTriples1.019
RDF::Trine::Parser::RDFa1.019
RDF::Trine::Parser::RDFJSON1.019
RDF::Trine::Parser::RDFPatch1.019
RDF::Trine::Parser::RDFXML1.019
RDF::Trine::Parser::Redland1.019
RDF::Trine::Parser::TriG1.019
RDF::Trine::Parser::Turtle1.019
RDF::Trine::Parser::Turtle::Constants1.019
RDF::Trine::Parser::Turtle::Lexer1.019
RDF::Trine::Parser::Turtle::Tokenunknown
RDF::Trine::Pattern1.019
RDF::Trine::Serializer1.019
RDF::Trine::Serializer::NQuads1.019
RDF::Trine::Serializer::NTriples1.019
RDF::Trine::Serializer::NTriples::Canonical1.019
RDF::Trine::Serializer::RDFJSON1.019
RDF::Trine::Serializer::RDFPatch1.019
RDF::Trine::Serializer::RDFXML1.019
RDF::Trine::Serializer::TriG1.019
RDF::Trine::Serializer::TSV1.019
RDF::Trine::Serializer::Turtle1.019
RDF::Trine::Statement1.019
RDF::Trine::Statement::Quad1.019
RDF::Trine::Store1.019
RDF::Trine::Store::DBI1.019
RDF::Trine::Store::DBI::mysql1.019
RDF::Trine::Store::DBI::Pg1.019
RDF::Trine::Store::DBI::SQLite1.019
RDF::Trine::Store::Dydra1.019
RDF::Trine::Store::Hexastore1.019
RDF::Trine::Store::LanguagePreference1.019
RDF::Trine::Store::Memory1.019
RDF::Trine::Store::Redis1.019
RDF::Trine::Store::Redland1.019
RDF::Trine::Store::SPARQL1.019
RDF::Trine::VariableBindings1.019
re::engine::RE20.13
Readonly2.05
Redis1.991
Redis::Hash1.991
Redis::List1.991
Redis::Sentinel1.991
Ref::Util0.204
Ref::Util::PP0.204
Ref::Util::XS0.117
Regexp::Common2017060201
Regexp::Common::_support2017060201
Regexp::Common::balanced2017060201
Regexp::Common::CC2017060201
Regexp::Common::comment2017060201
Regexp::Common::delimited2017060201
Regexp::Common::lingua2017060201
Regexp::Common::list2017060201
Regexp::Common::net2017060201
Regexp::Common::number2017060201
Regexp::Common::profanity2017060201
Regexp::Common::SEN2017060201
Regexp::Common::URI2017060201
Regexp::Common::URI::fax2017060201
Regexp::Common::URI::file2017060201
Regexp::Common::URI::ftp2017060201
Regexp::Common::URI::gopher2017060201
Regexp::Common::URI::http2017060201
Regexp::Common::URI::news2017060201
Regexp::Common::URI::pop2017060201
Regexp::Common::URI::prospero2017060201
Regexp::Common::URI::RFC10352017060201
Regexp::Common::URI::RFC17382017060201
Regexp::Common::URI::RFC18082017060201
Regexp::Common::URI::RFC23842017060201
Regexp::Common::URI::RFC23962017060201
Regexp::Common::URI::RFC28062017060201
Regexp::Common::URI::tel2017060201
Regexp::Common::URI::telnet2017060201
Regexp::Common::URI::tv2017060201
Regexp::Common::URI::wais2017060201
Regexp::Common::whitespace2017060201
Regexp::Common::zip2017060201
Reply::Plugin::TypeTiny1.002002
Return::MultiLevel0.05
rlib0.02
Role::HasMessage0.006
Role::HasMessage::Errf0.006
Role::Identifiable::HasIdent0.007
Role::Identifiable::HasTags0.007
Role::Tiny2.000006
Role::Tiny::With2.000006
Router::Simple0.17
Router::Simple::Declareunknown
Router::Simple::Routeunknown
Router::Simple::SubMapperunknown
Safe::Isa1.000010
Scalar::Util1.50
Scalar::Util::Numeric0.40
Scope::Guard0.21
Scope::Upper0.30
Set::Infinite0.65
Set::Infinite::_recurrenceunknown
Set::Infinite::Arithmeticunknown
Set::Infinite::Basicunknown
Set::IntervalTree0.12
Set::IntSpan1.19
Set::Object::Weakunknown
Set::Scalar1.29
Set::Scalar::Base1.29
Set::Scalar::Null1.29
Set::Scalar::Real1.29
Set::Scalar::Universe1.29
Set::Scalar::Valued1.29
Set::Scalar::ValuedUniverse1.29
Set::Scalar::Virtual1.29
Slurp0.4
Smart::Comments1.06
SOAP::Constants1.27
SOAP::Lite1.27
SOAP::Lite::Deserializer::XMLSchema19991.27
SOAP::Lite::Deserializer::XMLSchema20011.27
SOAP::Lite::Deserializer::XMLSchemaSOAP1_11.27
SOAP::Lite::Deserializer::XMLSchemaSOAP1_21.27
SOAP::Lite::Packager1.27
SOAP::Lite::Utils1.27
SOAP::Packager1.27
SOAP::Test1.27
SOAP::Transport::HTTP1.27
SOAP::Transport::IO1.27
SOAP::Transport::LOCAL1.27
SOAP::Transport::LOOPBACK1.27
SOAP::Transport::MAILTO1.27
SOAP::Transport::POP31.27
SOAP::Transport::TCP1.27
Socket::GetAddrInfo0.22
Socket::GetAddrInfo::Core0.22
Socket::GetAddrInfo::Emul0.22
Socket::GetAddrInfo::Socket6api0.22
Socket::GetAddrInfo::Strict0.22
Socket::GetAddrInfo::XS0.22
Software::License0.103013
Software::License::AGPL_30.103013
Software::License::Apache_1_10.103013
Software::License::Apache_2_00.103013
Software::License::Artistic_1_00.103013
Software::License::Artistic_2_00.103013
Software::License::BSD0.103013
Software::License::CC0_1_00.103013
Software::License::Custom0.103013
Software::License::EUPL_1_10.103013
Software::License::EUPL_1_20.103013
Software::License::FreeBSD0.103013
Software::License::GFDL_1_20.103013
Software::License::GFDL_1_30.103013
Software::License::GPL_10.103013
Software::License::GPL_20.103013
Software::License::GPL_30.103013
Software::License::LGPL_2_10.103013
Software::License::LGPL_3_00.103013
Software::License::MIT0.103013
Software::License::Mozilla_1_00.103013
Software::License::Mozilla_1_10.103013
Software::License::Mozilla_2_00.103013
Software::License::None0.103013
Software::License::OpenSSL0.103013
Software::License::Perl_50.103013
Software::License::PostgreSQL0.103013
Software::License::QPL_1_00.103013
Software::License::SSLeay0.103013
Software::License::Sun0.103013
Software::License::Zlib0.103013
Software::LicenseUtils0.103013
Sort::Naturally1.03
Specio0.42
Specio::Coercion0.42
Specio::Constraint::AnyCan0.42
Specio::Constraint::AnyDoes0.42
Specio::Constraint::AnyIsa0.42
Specio::Constraint::Enum0.42
Specio::Constraint::Intersection0.42
Specio::Constraint::ObjectCan0.42
Specio::Constraint::ObjectDoes0.42
Specio::Constraint::ObjectIsa0.42
Specio::Constraint::Parameterizable0.42
Specio::Constraint::Parameterized0.42
Specio::Constraint::Role::CanType0.42
Specio::Constraint::Role::DoesType0.42
Specio::Constraint::Role::Interface0.42
Specio::Constraint::Role::IsaType0.42
Specio::Constraint::Simple0.42
Specio::Constraint::Structurable0.42
Specio::Constraint::Structured0.42
Specio::Constraint::Union0.42
Specio::Declare0.42
Specio::DeclaredAt0.42
Specio::Exception0.42
Specio::Exporter0.42
Specio::Helpers0.42
Specio::Library::Builtins0.42
Specio::Library::Numeric0.42
Specio::Library::Perl0.42
Specio::Library::String0.42
Specio::Library::Structured0.42
Specio::Library::Structured::Dict0.42
Specio::Library::Structured::Map0.42
Specio::Library::Structured::Tuple0.42
Specio::OO0.42
Specio::PartialDump0.42
Specio::Registry0.42
Specio::Role::Inlinable0.42
Specio::Subs0.42
Specio::TypeChecks0.42
Spiffy0.46
Spiffy::mixinunknown
Spreadsheet::ParseExcel0.65
Spreadsheet::ParseExcel::Cell0.65
Spreadsheet::ParseExcel::Dump0.65
Spreadsheet::ParseExcel::FmtDefault0.65
Spreadsheet::ParseExcel::FmtJapan0.65
Spreadsheet::ParseExcel::FmtJapan20.65
Spreadsheet::ParseExcel::FmtUnicode0.65
Spreadsheet::ParseExcel::Font0.65
Spreadsheet::ParseExcel::Format0.65
Spreadsheet::ParseExcel::SaveParser0.65
Spreadsheet::ParseExcel::SaveParser::Workbook0.65
Spreadsheet::ParseExcel::SaveParser::Worksheet0.65
Spreadsheet::ParseExcel::Simple1.04
Spreadsheet::ParseExcel::Utility0.65
Spreadsheet::ParseExcel::Workbook0.65
Spreadsheet::ParseExcel::Worksheet0.65
Spreadsheet::WriteExcel2.40
Spreadsheet::WriteExcel::BIFFwriter2.40
Spreadsheet::WriteExcel::Big2.40
Spreadsheet::WriteExcel::Chart2.40
Spreadsheet::WriteExcel::Chart::Area2.40
Spreadsheet::WriteExcel::Chart::Bar2.40
Spreadsheet::WriteExcel::Chart::Column2.40
Spreadsheet::WriteExcel::Chart::External2.40
Spreadsheet::WriteExcel::Chart::Line2.40
Spreadsheet::WriteExcel::Chart::Pie2.40
Spreadsheet::WriteExcel::Chart::Scatter2.40
Spreadsheet::WriteExcel::Chart::Stock2.40
Spreadsheet::WriteExcel::Examples2.40
Spreadsheet::WriteExcel::Format2.40
Spreadsheet::WriteExcel::Formula2.40
Spreadsheet::WriteExcel::OLEwriter2.40
Spreadsheet::WriteExcel::Properties2.40
Spreadsheet::WriteExcel::Simple1.04
Spreadsheet::WriteExcel::Utility2.40
Spreadsheet::WriteExcel::Workbook2.40
Spreadsheet::WriteExcel::Worksheet2.40
SQL::Abstract1.86
SQL::Abstract::Testunknown
SQL::Abstract::Treeunknown
SQL::Translator0.11024
SQL::Translator::Diffunknown
SQL::Translator::Filter::DefaultExtra1.59
SQL::Translator::Filter::Globals1.59
SQL::Translator::Filter::Names1.59
SQL::Translator::Generator::DDL::MySQLunknown
SQL::Translator::Generator::DDL::PostgreSQLunknown
SQL::Translator::Generator::DDL::SQLiteunknown
SQL::Translator::Generator::DDL::SQLServerunknown
SQL::Translator::Generator::Role::DDLunknown
SQL::Translator::Generator::Role::Quoteunknown
SQL::Translator::Parser1.60
SQL::Translator::Parser::Access1.59
SQL::Translator::Parser::DB2unknown
SQL::Translator::Parser::DB2::Grammarunknown
SQL::Translator::Parser::DBI1.59
SQL::Translator::Parser::DBI::DB21.59
SQL::Translator::Parser::DBI::MySQL1.59
SQL::Translator::Parser::DBI::Oracle1.59
SQL::Translator::Parser::DBI::PostgreSQL1.59
SQL::Translator::Parser::DBI::SQLite1.59
SQL::Translator::Parser::DBI::SQLServer1.59
SQL::Translator::Parser::DBI::Sybase1.59
SQL::Translator::Parser::DBIx::Class1.10
SQL::Translator::Parser::Excel1.59
SQL::Translator::Parser::JSON1.00
SQL::Translator::Parser::MySQL1.59
SQL::Translator::Parser::Oracle1.59
SQL::Translator::Parser::PostgreSQL1.59
SQL::Translator::Parser::SQLite1.59
SQL::Translator::Parser::SQLServer1.59
SQL::Translator::Parser::Storable1.59
SQL::Translator::Parser::Sybase1.59
SQL::Translator::Parser::XML1.59
SQL::Translator::Parser::XML::SQLFairy1.59
SQL::Translator::Parser::xSV1.59
SQL::Translator::Parser::YAML1.59
SQL::Translator::Producer1.59
SQL::Translator::Producer::ClassDBI1.59
SQL::Translator::Producer::DB21.59
SQL::Translator::Producer::DBIx::Class::File0.1
SQL::Translator::Producer::Diagram1.59
SQL::Translator::Producer::DiaUml1.59
SQL::Translator::Producer::Dumper1.59
SQL::Translator::Producer::GraphViz1.59
SQL::Translator::Producer::HTML1.59
SQL::Translator::Producer::JSON1.00
SQL::Translator::Producer::Latex1.59
SQL::Translator::Producer::MySQL1.59
SQL::Translator::Producer::Oracle1.59
SQL::Translator::Producer::POD1.59
SQL::Translator::Producer::PostgreSQL1.59
SQL::Translator::Producer::SQLite1.59
SQL::Translator::Producer::SQLServer1.59
SQL::Translator::Producer::Storable1.59
SQL::Translator::Producer::Sybase1.59
SQL::Translator::Producer::TT::Base1.59
SQL::Translator::Producer::TT::Table1.59
SQL::Translator::Producer::TTSchema1.59
SQL::Translator::Producer::XML1.59
SQL::Translator::Producer::XML::SQLFairy1.59
SQL::Translator::Producer::YAML1.59
SQL::Translator::Role::BuildArgsunknown
SQL::Translator::Role::Debugunknown
SQL::Translator::Role::Errorunknown
SQL::Translator::Role::ListAttrunknown
SQL::Translator::Schema1.59
SQL::Translator::Schema::Constants1.59
SQL::Translator::Schema::Constraint1.59
SQL::Translator::Schema::Field1.59
SQL::Translator::Schema::Index1.59
SQL::Translator::Schema::Object1.59
SQL::Translator::Schema::Procedure1.59
SQL::Translator::Schema::Role::Compareunknown
SQL::Translator::Schema::Role::Extraunknown
SQL::Translator::Schema::Table1.59
SQL::Translator::Schema::Trigger1.59
SQL::Translator::Schema::View1.59
SQL::Translator::Typesunknown
SQL::Translator::Utils1.59
StackTrace::Auto0.200013
Starman0.4014
Starman::Serverunknown
Statistics::ANOVA0.14
Statistics::ANOVA::Compare0.01
Statistics::ANOVA::EffectSize0.02
Statistics::ANOVA::Friedman0.02
Statistics::ANOVA::JTunknown
Statistics::ANOVA::KW0.01
Statistics::ANOVA::Page0.02
Statistics::Basic1.6611
Statistics::Basic::_OneVectorBaseunknown
Statistics::Basic::_TwoVectorBaseunknown
Statistics::Basic::ComputedVectorunknown
Statistics::Basic::Correlationunknown
Statistics::Basic::Covarianceunknown
Statistics::Basic::LeastSquareFitunknown
Statistics::Basic::Meanunknown
Statistics::Basic::Medianunknown
Statistics::Basic::Modeunknown
Statistics::Basic::StdDevunknown
Statistics::Basic::Varianceunknown
Statistics::Basic::Vectorunknown
Statistics::Candidatesunknown
Statistics::ChiSquare1.0000
Statistics::Contingency0.09
Statistics::Cook0.0.6
Statistics::Data0.11
Statistics::Data::Dichotomize0.05
Statistics::Data::Rank0.02
Statistics::DependantTTest0.03
Statistics::Descriptive3.0701
Statistics::Descriptive::Discrete0.07
Statistics::Descriptive::Full3.0701
Statistics::Descriptive::LogScale0.11
Statistics::Descriptive::Smoother3.0701
Statistics::Descriptive::Smoother::Exponential3.0701
Statistics::Descriptive::Smoother::Weightedexponential3.0701
Statistics::Descriptive::Sparse3.0701
Statistics::Discrete0.05.00
Statistics::Distributions1.02
Statistics::Distributions::Bartlettunknown
Statistics::Distributions::GTestunknown
Statistics::Diversity::Shannon0.0102
Statistics::FactorAnalysisunknown
Statistics::FisherPitman0.034
Statistics::Frequency0.04
Statistics::Histogram0.1
Statistics::KruskalWallis0.01
Statistics::Lite3.62
Statistics::MaxEntropy1.0
Statistics::Normality0.01
Statistics::PCAunknown
Statistics::PCA::Varimaxunknown
Statistics::PointEstimation1.1
Statistics::R0.34
Statistics::R::Legacyunknown
Statistics::R::Win32unknown
Statistics::RankCorrelation0.1205
Statistics::Robust0.02
Statistics::Robust::Bootstrapunknown
Statistics::Robust::Densityunknown
Statistics::Robust::Locationunknown
Statistics::Robust::Scaleunknown
Statistics::Sampler::Multinomial0.7
Statistics::Sampler::Multinomial::AliasMethod0.7
Statistics::Sequences0.15
Statistics::Sequences::Joins0.20
Statistics::Sequences::Pot0.12
Statistics::Sequences::Runs0.22
Statistics::Sequences::Turns0.13
Statistics::Sequences::Vnomes0.20
Statistics::Shannon0.05
Statistics::Simpson0.03
Statistics::SparseVector0.2
Statistics::Standard_Normalunknown
Statistics::TopK0.02
Statistics::TTest1.1
Statistics::Zed0.10
Storable3.11
Stream::Buffered0.03
Stream::Buffered::Autounknown
Stream::Buffered::Fileunknown
Stream::Buffered::PerlIOunknown
strictures2.000005
strictures::extraunknown
String::Diff0.07
String::Errf0.008
String::Escape2010.002
String::Flogger1.101245
String::Format1.18
String::Formatter0.102084
String::Formatter::Cookbook0.102084
String::Numeric0.9
String::Numeric::PP0.9
String::Print0.93
String::RewritePrefix0.007
String::ShellQuote1.04
String::Tagged0.15
String::Tagged::Terminal0.02
String::ToIdentifier::EN0.12
String::ToIdentifier::EN::Unicode0.12
String::Truncate1.100602
String::Util1.26
Struct::Dumb0.09
Sub::Attribute0.06
Sub::Defer2.001001
Sub::Exporter0.987
Sub::Exporter::ForMethods0.100052
Sub::Exporter::GlobExporter0.005
Sub::Exporter::Progressive0.001013
Sub::Exporter::Util0.987
Sub::Identify0.14
Sub::Info0.002
Sub::Install0.928
Sub::Name0.21
Sub::Quote2.001001
Sub::Uplevel0.2800
Sub::Util1.50
SUPER1.20141117
SVG2.84
SVG::DOM2.84
SVG::Element2.84
SVG::Extension2.84
SVG::Graph0.02
SVG::Graph::Dataunknown
SVG::Graph::Data::Datumunknown
SVG::Graph::Data::Nodeunknown
SVG::Graph::Data::Treeunknown
SVG::Graph::Fileunknown
SVG::Graph::Frameunknown
SVG::Graph::Glyphunknown
SVG::Graph::Glyph::axisunknown
SVG::Graph::Glyph::barunknown
SVG::Graph::Glyph::barflexunknown
SVG::Graph::Glyph::bezierunknown
SVG::Graph::Glyph::bubbleunknown
SVG::Graph::Glyph::heatmapunknown
SVG::Graph::Glyph::lineunknown
SVG::Graph::Glyph::pictogramunknown
SVG::Graph::Glyph::scatterunknown
SVG::Graph::Glyph::treeunknown
SVG::Graph::Glyph::wedgeunknown
SVG::Graph::Groupunknown
SVG::XML2.84
Symbol::Util0.0203
SymTabunknown
syntax0.004
Syntax::Feature::Junction0.003008
Syntax::Keyword::Junction0.003008
Syntax::Keyword::Junction::All0.003008
Syntax::Keyword::Junction::Any0.003008
Syntax::Keyword::Junction::Base0.003008
Syntax::Keyword::Junction::None0.003008
Syntax::Keyword::Junction::One0.003008
Sys::SigAction0.23
Sys::SigAction::Alarmunknown
TAP::Base3.42
TAP::Formatter::Base3.42
TAP::Formatter::Color3.42
TAP::Formatter::Console3.42
TAP::Formatter::Console::ParallelSession3.42
TAP::Formatter::Console::Session3.42
TAP::Formatter::File3.42
TAP::Formatter::File::Session3.42
TAP::Formatter::Session3.42
TAP::Harness3.42
TAP::Harness::Env3.42
TAP::Object3.42
TAP::Parser3.42
TAP::Parser::Aggregator3.42
TAP::Parser::Grammar3.42
TAP::Parser::Iterator3.42
TAP::Parser::Iterator::Array3.42
TAP::Parser::Iterator::Process3.42
TAP::Parser::Iterator::Stream3.42
TAP::Parser::IteratorFactory3.42
TAP::Parser::Multiplexer3.42
TAP::Parser::Result3.42
TAP::Parser::Result::Bailout3.42
TAP::Parser::Result::Comment3.42
TAP::Parser::Result::Plan3.42
TAP::Parser::Result::Pragma3.42
TAP::Parser::Result::Test3.42
TAP::Parser::Result::Unknown3.42
TAP::Parser::Result::Version3.42
TAP::Parser::Result::YAML3.42
TAP::Parser::ResultFactory3.42
TAP::Parser::Scheduler3.42
TAP::Parser::Scheduler::Job3.42
TAP::Parser::Scheduler::Spinner3.42
TAP::Parser::Source3.42
TAP::Parser::SourceHandler3.42
TAP::Parser::SourceHandler::Executable3.42
TAP::Parser::SourceHandler::File3.42
TAP::Parser::SourceHandler::Handle3.42
TAP::Parser::SourceHandler::Perl3.42
TAP::Parser::SourceHandler::RawTAP3.42
TAP::Parser::YAMLish::Reader3.42
TAP::Parser::YAMLish::Writer3.42
Task::Catalyst4.02
Task::Kensho0.39
Task::Kensho::Async0.39
Task::Kensho::CLI0.39
Task::Kensho::Config0.39
Task::Kensho::Dates0.39
Task::Kensho::DBDev0.39
Task::Kensho::Email0.39
Task::Kensho::ExcelCSV0.39
Task::Kensho::Exceptions0.39
Task::Kensho::Hackery0.39
Task::Kensho::Logging0.39
Task::Kensho::ModuleDev0.39
Task::Kensho::OOP0.39
Task::Kensho::Scalability0.39
Task::Kensho::Testing0.39
Task::Kensho::Toolchain0.39
Task::Kensho::WebCrawling0.39
Task::Kensho::WebDev0.39
Task::Kensho::XML0.39
Task::Moose0.03
Task::Weaken1.06
Template2.27
Template::Base2.78
Template::Config2.75
Template::Constants2.75
Template::Context2.98
Template::Directive2.2
Template::Document2.79
Template::Exception2.7
Template::Filters2.87
Template::Grammar2.26
Template::Iterator2.68
Template::Namespace::Constants1.27
Template::Parser2.89
Template::Plugin2.7
Template::Plugin::Assert1
Template::Plugin::CGI2.7
Template::Plugin::Datafile2.72
Template::Plugin::Date2.78
Template::Plugin::Directory2.7
Template::Plugin::Dumper2.7
Template::Plugin::File2.71
Template::Plugin::Filter1.38
Template::Plugin::Format2.7
Template::Plugin::HTML2.62
Template::Plugin::Image1.21
Template::Plugin::Iterator2.68
Template::Plugin::Math1.16
Template::Plugin::Pod2.69
Template::Plugin::Procedural1.17
Template::Plugin::Scalar1
Template::Plugin::String2.4
Template::Plugin::Table2.71
Template::Plugin::URL2.74
Template::Plugin::View2.68
Template::Plugin::Wrap2.68
Template::Plugins2.77
Template::Provider2.94
Template::Service2.8
Template::Stash2.91
Template::Stash::Context1.63
Template::Stash::XSunknown
Template::Test2.75
Template::Timer1.00
Template::Tiny1.12
Template::Toolkitunknown
Template::View2.91
Template::VMethods2.16
Term::Encoding0.02
Term::ProgressBar2.22
Term::ProgressBar::IO2.22
Term::ProgressBar::Quiet0.31
Term::ProgressBar::Simple0.03
Term::ReadKey2.37
Term::ReadLine::Perl51.45
Term::ReadLine::Perl5::Commonunknown
Term::ReadLine::Perl5::Dumbunknown
Term::ReadLine::Perl5::Historyunknown
Term::ReadLine::Perl5::Keymapunknown
Term::ReadLine::Perl5::OO0.43
Term::ReadLine::Perl5::OO::Historyunknown
Term::ReadLine::Perl5::OO::Keymapunknown
Term::ReadLine::Perl5::OO::Stateunknown
Term::ReadLine::Perl5::readline1.45
Term::ReadLine::Perl5::TermCapunknown
Term::ReadLine::Perl5::Tie1.45
Term::Size0.207
Term::Table0.012
Term::Table::Cell0.012
Term::Table::CellStack0.012
Term::Table::HashBase0.003
Term::Table::LineBreak0.012
Term::Table::Spacer0.012
Term::Table::Util0.012
Term::UI0.46
Term::UI::History0.46
Test21.302138
Test2::API1.302138
Test2::API::Breakage1.302138
Test2::API::Context1.302138
Test2::API::Instance1.302138
Test2::API::Stack1.302138
Test2::AsyncSubtest0.000115
Test2::AsyncSubtest::Event::Attach0.000115
Test2::AsyncSubtest::Event::Detach0.000115
Test2::AsyncSubtest::Formatter0.000115
Test2::AsyncSubtest::Hub0.000115
Test2::Bundle0.000115
Test2::Bundle::Extended0.000115
Test2::Bundle::More0.000115
Test2::Bundle::Simple0.000115
Test2::Compare0.000115
Test2::Compare::Array0.000115
Test2::Compare::Bag0.000115
Test2::Compare::Base0.000115
Test2::Compare::Bool0.000115
Test2::Compare::Custom0.000115
Test2::Compare::DeepRef0.000115
Test2::Compare::Delta0.000115
Test2::Compare::Event0.000115
Test2::Compare::EventMeta0.000115
Test2::Compare::Float0.000115
Test2::Compare::Hash0.000115
Test2::Compare::Meta0.000115
Test2::Compare::Negatable0.000115
Test2::Compare::Number0.000115
Test2::Compare::Object0.000115
Test2::Compare::OrderedSubset0.000115
Test2::Compare::Pattern0.000115
Test2::Compare::Ref0.000115
Test2::Compare::Regex0.000115
Test2::Compare::Scalar0.000115
Test2::Compare::Set0.000115
Test2::Compare::String0.000115
Test2::Compare::Undef0.000115
Test2::Compare::Wildcard0.000115
Test2::Event1.302138
Test2::Event::Bail1.302138
Test2::Event::Diag1.302138
Test2::Event::Encoding1.302138
Test2::Event::Exception1.302138
Test2::Event::Fail1.302138
Test2::Event::Generic1.302138
Test2::Event::Note1.302138
Test2::Event::Ok1.302138
Test2::Event::Pass1.302138
Test2::Event::Plan1.302138
Test2::Event::Skip1.302138
Test2::Event::Subtest1.302138
Test2::Event::TAP::Version1.302138
Test2::Event::Times0.000115
Test2::Event::V21.302138
Test2::Event::Waiting1.302138
Test2::Event::Warning0.06
Test2::EventFacet1.302138
Test2::EventFacet::About1.302138
Test2::EventFacet::Amnesty1.302138
Test2::EventFacet::Assert1.302138
Test2::EventFacet::Control1.302138
Test2::EventFacet::Error1.302138
Test2::EventFacet::Hub1.302138
Test2::EventFacet::Info1.302138
Test2::EventFacet::Meta1.302138
Test2::EventFacet::Parent1.302138
Test2::EventFacet::Plan1.302138
Test2::EventFacet::Render1.302138
Test2::EventFacet::Trace1.302138
Test2::Formatter1.302138
Test2::Formatter::TAP1.302138
Test2::Hub1.302138
Test2::Hub::Interceptor1.302138
Test2::Hub::Interceptor::Terminator1.302138
Test2::Hub::Subtest1.302138
Test2::IPC1.302138
Test2::IPC::Driver1.302138
Test2::IPC::Driver::Files1.302138
Test2::Manual0.000115
Test2::Manual::Anatomy0.000115
Test2::Manual::Anatomy::API0.000115
Test2::Manual::Anatomy::Context0.000115
Test2::Manual::Anatomy::EndToEnd0.000115
Test2::Manual::Anatomy::Event0.000115
Test2::Manual::Anatomy::Hubs0.000115
Test2::Manual::Anatomy::IPC0.000115
Test2::Manual::Anatomy::Utilities0.000115
Test2::Manual::Contributing0.000115
Test2::Manual::Testing0.000115
Test2::Manual::Testing::Introduction0.000115
Test2::Manual::Testing::Migrating0.000115
Test2::Manual::Testing::Planning0.000115
Test2::Manual::Testing::Todo0.000115
Test2::Manual::Tooling0.000115
Test2::Manual::Tooling::FirstTool0.000115
Test2::Manual::Tooling::Formatter0.000115
Test2::Manual::Tooling::Nesting0.000115
Test2::Manual::Tooling::Plugin::TestExit0.000115
Test2::Manual::Tooling::Plugin::TestingDone0.000115
Test2::Manual::Tooling::Plugin::ToolCompletes0.000115
Test2::Manual::Tooling::Plugin::ToolStarts0.000115
Test2::Manual::Tooling::Subtest0.000115
Test2::Manual::Tooling::TestBuilder0.000115
Test2::Manual::Tooling::Testing0.000115
Test2::Mock0.000115
Test2::Plugin0.000115
Test2::Plugin::BailOnFail0.000115
Test2::Plugin::DieOnFail0.000115
Test2::Plugin::ExitSummary0.000115
Test2::Plugin::NoWarnings0.06
Test2::Plugin::SRand0.000115
Test2::Plugin::Times0.000115
Test2::Plugin::UTF80.000115
Test2::Require0.000115
Test2::Require::AuthorTesting0.000115
Test2::Require::EnvVar0.000115
Test2::Require::Fork0.000115
Test2::Require::Module0.000115
Test2::Require::Perl0.000115
Test2::Require::RealFork0.000115
Test2::Require::Threads0.000115
Test2::Suite0.000115
Test2::Todo0.000115
Test2::Tools0.000115
Test2::Tools::AsyncSubtest0.000115
Test2::Tools::Basic0.000115
Test2::Tools::Class0.000115
Test2::Tools::ClassicCompare0.000115
Test2::Tools::Compare0.000115
Test2::Tools::Defer0.000115
Test2::Tools::Encoding0.000115
Test2::Tools::Event0.000115
Test2::Tools::Exception0.000115
Test2::Tools::Exports0.000115
Test2::Tools::GenTemp0.000115
Test2::Tools::Grab0.000115
Test2::Tools::Mock0.000115
Test2::Tools::Ref0.000115
Test2::Tools::Spec0.000115
Test2::Tools::Subtest0.000115
Test2::Tools::Target0.000115
Test2::Tools::Tester0.000115
Test2::Tools::Tiny1.302138
Test2::Tools::Warnings0.000115
Test2::Util1.302138
Test2::Util::ExternalMeta1.302138
Test2::Util::Facets2Legacy1.302138
Test2::Util::Grabber0.000115
Test2::Util::HashBase1.302138
Test2::Util::Ref0.000115
Test2::Util::Stash0.000115
Test2::Util::Sub0.000115
Test2::Util::Table0.000115
Test2::Util::Table::Cell0.000115
Test2::Util::Table::LineBreak0.000115
Test2::Util::Term0.000115
Test2::Util::Times0.000115
Test2::Util::Trace1.302138
Test2::V00.000115
Test2::Workflow0.000115
Test2::Workflow::BlockBase0.000115
Test2::Workflow::Build0.000115
Test2::Workflow::Runner0.000115
Test2::Workflow::Task0.000115
Test2::Workflow::Task::Action0.000115
Test2::Workflow::Task::Group0.000115
Test::Assert0.0504
Test::Base0.89
Test::Base::Filterunknown
Test::Builder1.302138
Test::Builder::Formatter1.302138
Test::Builder::IO::Scalar2.114
Test::Builder::Module1.302138
Test::Builder::Tester1.302138
Test::Builder::Tester::Color1.302138
Test::Builder::TodoDiag1.302138
Test::Class0.50
Test::Class::Load0.50
Test::Class::MethodInfo0.50
Test::Class::Moose0.92
Test::Class::Moose::AttributeRegistry0.92
Test::Class::Moose::CLI0.92
Test::Class::Moose::Config0.92
Test::Class::Moose::Deprecated0.92
Test::Class::Moose::Executor::Parallel0.92
Test::Class::Moose::Executor::Sequential0.92
Test::Class::Moose::Load0.92
Test::Class::Moose::Report0.92
Test::Class::Moose::Report::Class0.92
Test::Class::Moose::Report::Instance0.92
Test::Class::Moose::Report::Method0.92
Test::Class::Moose::Report::Time0.92
Test::Class::Moose::Role0.92
Test::Class::Moose::Role::AutoUse0.92
Test::Class::Moose::Role::CLI0.92
Test::Class::Moose::Role::Executor0.92
Test::Class::Moose::Role::HasTimeReport0.92
Test::Class::Moose::Role::ParameterizedInstances0.92
Test::Class::Moose::Role::Reporting0.92
Test::Class::Moose::Runner0.92
Test::Class::Moose::Tutorial0.92
Test::Class::Moose::Util0.92
Test::CleanNamespaces0.23
Test::CPAN::Changes0.400002
Test::CPAN::Meta0.25
Test::CPAN::Meta::Version0.25
Test::Deep1.128
Test::Deep::Allunknown
Test::Deep::Anyunknown
Test::Deep::Arrayunknown
Test::Deep::ArrayEachunknown
Test::Deep::ArrayElementsOnlyunknown
Test::Deep::ArrayLengthunknown
Test::Deep::ArrayLengthOnlyunknown
Test::Deep::Blessedunknown
Test::Deep::Booleanunknown
Test::Deep::Cacheunknown
Test::Deep::Cache::Simpleunknown
Test::Deep::Classunknown
Test::Deep::Cmpunknown
Test::Deep::Codeunknown
Test::Deep::Hashunknown
Test::Deep::HashEachunknown
Test::Deep::HashElementsunknown
Test::Deep::HashKeysunknown
Test::Deep::HashKeysOnlyunknown
Test::Deep::Ignoreunknown
Test::Deep::Isaunknown
Test::Deep::JSON0.05
Test::Deep::ListMethodsunknown
Test::Deep::Methodsunknown
Test::Deep::MMunknown
Test::Deep::Noneunknown
Test::Deep::NoTestunknown
Test::Deep::Numberunknown
Test::Deep::Objunknown
Test::Deep::Refunknown
Test::Deep::RefTypeunknown
Test::Deep::Regexpunknown
Test::Deep::RegexpMatchesunknown
Test::Deep::RegexpOnlyunknown
Test::Deep::RegexpRefunknown
Test::Deep::RegexpRefOnlyunknown
Test::Deep::RegexpVersionunknown
Test::Deep::ScalarRefunknown
Test::Deep::ScalarRefOnlyunknown
Test::Deep::Setunknown
Test::Deep::Shallowunknown
Test::Deep::Stackunknown
Test::Deep::Stringunknown
Test::Deep::Type0.008
Test::Differences0.64
Test::EOL2.00
Test::Exception0.43
Test::Expect0.34
Test::FailWarnings0.008
Test::Fatal0.014
Test::File1.443
Test::File::ShareDir1.001002
Test::File::ShareDir::Dist1.001002
Test::File::ShareDir::Module1.001002
Test::File::ShareDir::Object::Dist1.001002
Test::File::ShareDir::Object::Inc1.001002
Test::File::ShareDir::Object::Module1.001002
Test::File::ShareDir::TempDirObject1.001002
Test::File::ShareDir::Utils1.001002
Test::Fork0.02
Test::Future0.38
Test::Harness3.42
Test::Identity0.01
Test::JSON0.11
Test::LeakTrace0.16
Test::LeakTrace::Scriptunknown
Test::LongString0.17
Test::LWP::UserAgent0.033
Test::Memory::Cycle1.06
Test::Mock::HTTP::Request0.01
Test::Mock::HTTP::Response0.01
Test::Mock::LWP0.08
Test::Mock::LWP::UserAgent0.01
Test::MockModule0.15
Test::MockObject1.20180705
Test::MockObject::Extends1.20180705
Test::MockTime0.17
Test::Mojounknown
Test::Moose2.2011
Test::MooseX::Daemonize0.21
Test::More1.302138
Test::More::UTF80.05
Test::Most0.35
Test::Most::Exception0.35
Test::Needs0.002005
Test::NoWarnings1.04
Test::NoWarnings::Warning1.04
Test::Number::Delta1.06
Test::Object0.08
Test::Object::Test0.08
Test::Output1.031
Test::Perl::Critic::Policy1.132
Test::Pod1.52
Test::Pod::Contentunknown
Test::Pod::Coverage1.10
Test::RDF::Trine::Store1.019
Test::Refcount0.08
Test::Requires0.10
Test::RequiresInternet0.05
Test::SharedFork0.35
Test::SharedFork::Arrayunknown
Test::SharedFork::Scalarunknown
Test::SharedFork::Storeunknown
Test::Simple1.302138
Test::Spec0.54
Test::Spec::Contextunknown
Test::Spec::Exampleunknown
Test::Spec::Mocksunknown
Test::Spec::SharedHashunknown
Test::Spec::TodoExampleunknown
Test::Specio0.42
Test::SQL::Translator1.59
Test::SubCalls1.10
Test::TCP2.19
Test::TCP::CheckPortunknown
Test::TempDir::Tiny0.018
Test::Tester1.302138
Test::Tester::Capture1.302138
Test::Tester::CaptureRunner1.302138
Test::Tester::Delegate1.302138
Test::Time0.06
Test::Toolbox0.4
Test::Trapunknown
Test::Trap::Builderunknown
Test::Trap::Builder::PerlIOunknown
Test::Trap::Builder::SystemSafeunknown
Test::Trap::Builder::TempFileunknown
Test::TypeTiny1.002002
Test::Unit::Lite0.1202
Test::use::ok1.302138
Test::utf81.01
Test::Warn0.36
Test::Warnings0.026
Test::Without::Module0.20
Test::WWW::Mechanize1.50
Test::WWW::Mechanize::Catalyst0.60
Test::WWW::Mechanize::PSGI0.38
Test::WWW::Selenium1.36
Test::YAML1.07
Text::Aligner0.13
Text::Autoformat1.74
Text::Autoformat::Hang1.74
Text::Autoformat::NullHang1.74
Text::CSV1.95
Text::CSV_PP1.95
Text::CSV_XS1.36
Text::Diff1.45
Text::Diff::Config1.44
Text::Diff::Table1.44
Text::Format0.61
Text::German0.06
Text::German::Adjektivunknown
Text::German::Ausnahmeunknown
Text::German::Cacheunknown
Text::German::Endungunknown
Text::German::Regelunknown
Text::German::Utilunknown
Text::German::Verbunknown
Text::German::Vorsilbeunknown
Text::Glob0.11
Text::LineFold2016.00702
Text::Reform1.20
Text::SimpleTable2.05
Text::Table1.133
Text::Template1.53
Text::Template::Preprocess1.53
Text::Unidecode1.30
Text::VisualWidth::PP0.05
threads::shared::array0.36
threads::shared::handle0.36
threads::shared::hash0.36
threads::shared::scalar0.36
Throwable0.200013
Throwable::Error0.200013
Tie::Handle::Offset0.004
Tie::Handle::SkipHeader0.004
Tie::Hash::MultiValue1.05
Tie::IxHash1.23
Tie::ToObject0.03
Tie::Watch1.302
Time::CTime2011.0505
Time::DaysInMonth99.1117
Time::Duration1.20
Time::Duration::Parse0.14
Time::HiRes1.9758
Time::JulianDay2011.0505
Time::ParseDate2015.103
Time::Piece1.3204
Time::Seconds1.3204
Time::Timezone2015.0925
Time::Tiny1.08
Time::Zone2.24
Tk804.034
Tk::Adjuster4.008
Tk::After4.008
Tk::Animation4.008
Tk::Balloon4.012
Tk::Bitmap4.004
Tk::BrowseEntry4.015
Tk::Button4.010
Tk::Canvas4.013
Tk::Checkbutton4.006
Tk::Clipboard4.009
Tk::CmdLine4.007
Tk::ColorDialog4.014
Tk::ColorEditor4.014
Tk::ColorSelect4.014
Tk::Compound4.004
Tk::Config804.034
Tk::Configure4.009
Tk::Derived4.011
Tk::Dialog4.005
Tk::DialogBox4.016
Tk::Dirlist4.004
Tk::DirTree4.022
Tk::DragDrop4.015
Tk::DragDrop::Common4.005
Tk::DragDrop::Local4.004
Tk::DragDrop::Rect4.012
Tk::DragDrop::SunConst4.004
Tk::DragDrop::SunDrop4.006
Tk::DragDrop::SunSite4.007
Tk::DragDrop::XDNDDrop4.007
Tk::DragDrop::XDNDSite4.007
Tk::DropSite4.008
Tk::DummyEncode4.007
Tk::English4.006
Tk::Entry4.018
Tk::ErrorDialog4.007
Tk::Event4.035
Tk::Event::IO4.009
Tk::FBox4.018
Tk::FileSelect4.018
Tk::FloatEntry4.004
Tk::Font4.004
Tk::Frame4.010
Tk::HList4.015
Tk::IconList4.007
Tk::Image4.011
Tk::InputO4.004
Tk::install4.004
Tk::IO4.006
Tk::ItemStyle4.004
Tk::JPEG4.003
Tk::Label4.006
Tk::LabeledEntryLabeledRadiobutton4.004
Tk::Labelframe4.003
Tk::LabEntry4.006
Tk::LabFrame4.010
Tk::LabRadiobutton4.004
Tk::Listbox4.015
Tk::MainWindow4.015
Tk::MakeDepend4.015
Tk::Menu4.023
Tk::Menu::Item4.005
Tk::Menubar4.006
Tk::Menubutton4.005
Tk::Message4.006
Tk::MMtry4.009
Tk::MMutil4.026
Tk::MsgBox4.002
Tk::Mwm4.004
Tk::NBFrame4.004
Tk::NoteBook4.009
Tk::Optionmenu4.014
Tk::Pane4.007
Tk::Panedwindow4.004
Tk::Photo4.006
Tk::Pixmap4.004
Tk::PNG4.004
Tk::Pretty4.006
Tk::ProgressBar4.015
Tk::Radiobutton4.006
Tk::Region4.006
Tk::Reindex4.006
Tk::ReindexedROText4.004
Tk::ReindexedText4.004
Tk::ROText4.010
Tk::Scale4.004
Tk::Scrollbar4.010
Tk::Spinbox4.007
Tk::Stats4.004
Tk::Submethods4.005
Tk::Table4.016
Tk::Text4.024
Tk::Text::Tag4.004
Tk::TextEdit4.004
Tk::TextList4.006
Tk::TextUndo4.015
Tk::Tiler4.012
Tk::TixGrid4.010
Tk::TList4.006
Tk::Toplevel4.006
Tk::Trace4.009
Tk::Tree4.72
Tk::Widget4.036
Tk::widgets4.005
Tk::WinPhoto4.005
Tk::Wm4.015
Tk::X4.005
Tk::X11Font4.007
Tk::Xlib4.004
Tk::Xrm4.005
Tree::DAG_Node1.31
Tree::Simple1.33
Tree::Simple::Visitor1.33
Tree::Simple::Visitor::BreadthFirstTraversal0.15
Tree::Simple::Visitor::CreateDirectoryTree0.15
Tree::Simple::Visitor::FindByNodeValue0.15
Tree::Simple::Visitor::FindByPath0.15
Tree::Simple::Visitor::FindByUID0.15
Tree::Simple::Visitor::FromNestedArray0.15
Tree::Simple::Visitor::FromNestedHash0.15
Tree::Simple::Visitor::GetAllDescendents0.15
Tree::Simple::Visitor::LoadClassHierarchy0.15
Tree::Simple::Visitor::LoadDirectoryTree0.15
Tree::Simple::Visitor::PathToRoot0.15
Tree::Simple::Visitor::PostOrderTraversal0.15
Tree::Simple::Visitor::PreOrderTraversal0.15
Tree::Simple::Visitor::Sort0.15
Tree::Simple::Visitor::ToNestedArray0.15
Tree::Simple::Visitor::ToNestedHash0.15
Tree::Simple::Visitor::VariableDepthClone0.15
Tree::Simple::VisitorFactory0.15
Try::Tiny0.30
TryCatch1.003002
Type::Coercion1.002002
Type::Coercion::FromMoose1.002002
Type::Coercion::Union1.002002
Type::Library1.002002
Type::Params1.002002
Type::Parser1.002002
Type::Registry1.002002
Type::Tiny1.002002
Type::Tiny::_HalfOp1.002002
Type::Tiny::Class1.002002
Type::Tiny::Duck1.002002
Type::Tiny::Enum1.002002
Type::Tiny::Intersection1.002002
Type::Tiny::Role1.002002
Type::Tiny::Union1.002002
Type::Utils1.002002
Types::Common::Numeric1.002002
Types::Common::String1.002002
Types::Serialiser1.0
Types::Standard1.002002
Types::Standard::ArrayRef1.002002
Types::Standard::CycleTuple1.002002
Types::Standard::Dict1.002002
Types::Standard::HashRef1.002002
Types::Standard::Map1.002002
Types::Standard::ScalarRef1.002002
Types::Standard::Tuple1.002002
Types::TypeTiny1.002002
Unicode::CharName0.00
Unicode::EastAsianWidth1.33
Unicode::EastAsianWidth::Detect0.03
Unicode::GCString2013.10
Unicode::LineBreak2018.003
Unicode::LineBreakunknown
Unicode::Map0.112
Unicode::Map80.13
Unicode::String2.10
UNIVERSAL::can1.20140328
UNIVERSAL::isa1.20171012
UNIVERSAL::require0.18
URI1.74
URI::_foreign1.74
URI::_generic1.74
URI::_idna1.74
URI::_ldap1.74
URI::_login1.74
URI::_punycode1.74
URI::_query1.74
URI::_segment1.74
URI::_server1.74
URI::_userpass1.74
URI::data1.74
URI::Escape3.31
URI::file4.21
URI::file::Base1.74
URI::file::FAT1.74
URI::file::Mac1.74
URI::file::OS21.74
URI::file::QNX1.74
URI::file::Unix1.74
URI::file::Win321.74
URI::Find20160806
URI::Find::Schemeless20160806
URI::ftp1.74
URI::gopher1.74
URI::Heuristic4.20
URI::http1.74
URI::https1.74
URI::IRI1.74
URI::ldap1.74
URI::ldapi1.74
URI::ldaps1.74
URI::mailto1.74
URI::mms1.74
URI::news1.74
URI::nntp1.74
URI::pop1.74
URI::QueryParam1.74
URI::rlogin1.74
URI::rsync1.74
URI::rtsp1.74
URI::rtspu1.74
URI::sftp1.74
URI::sip1.74
URI::sips1.74
URI::snews1.74
URI::Split1.74
URI::ssh1.74
URI::telnet1.74
URI::tn32701.74
URI::URL5.04
URI::urn1.74
URI:⚱:isbn1.74
URI:⚱:oid1.74
URI::WithBase2.20
URI::ws0.03
URI::wss0.03
UUID::Tiny1.04
Variable::Magic0.62
Want0.29
WidgetDemo4.012
Win32::ShellQuote0.003001
WWW::Form::UrlEncoded0.24
WWW::Form::UrlEncoded::PPunknown
WWW::Mechanize1.88
WWW::Mechanize::Image1.88
WWW::Mechanize::Link1.88
WWW::Mechanize::TreeBuilder1.20000
WWW::Pastebin::PastebinCom::Create1.003
WWW::RobotRules6.02
WWW::RobotRules::AnyDBM_File6.00
WWW::Selenium1.36
WWW::Selenium::Util1.36
XML::Atom0.42
XML::Atom::Baseunknown
XML::Atom::Categoryunknown
XML::Atom::Clientunknown
XML::Atom::Contentunknown
XML::Atom::Entryunknown
XML::Atom::ErrorHandlerunknown
XML::Atom::Feedunknown
XML::Atom::Linkunknown
XML::Atom::Personunknown
XML::Atom::Serverunknown
XML::Atom::Thingunknown
XML::Atom::Utilunknown
XML::CommonNS0.06
XML::Compile1.60
XML::Compile::Iterator1.60
XML::Compile::Schema1.60
XML::Compile::Schema::BuiltInFacets1.60
XML::Compile::Schema::BuiltInTypes1.60
XML::Compile::Schema::Instance1.60
XML::Compile::Schema::NameSpaces1.60
XML::Compile::Schema::Specs1.60
XML::Compile::Tester0.91
XML::Compile::Translate1.60
XML::Compile::Translate::Reader1.60
XML::Compile::Translate::Template1.60
XML::Compile::Translate::Writer1.60
XML::Compile::Util1.60
XML::DOM1.46
XML::DOM::DOMExceptionunknown
XML::DOM::NamedNodeMapunknown
XML::DOM::NodeListunknown
XML::DOM::PerlSAXunknown
XML::DOM::XPath0.14
XML::ESISParser0.08
XML::Filter::BufferText1.01
XML::Generator::PerlData0.95
XML::Handler::BuildDOMunknown
XML::Handler::CanonXMLWriter0.08
XML::Handler::Sampleunknown
XML::Handler::Subs0.08
XML::Handler::XMLWriter0.08
XML::LibXML2.0132
XML::LibXML::AttributeHash2.0132
XML::LibXML::Boolean2.0132
XML::LibXML::Common2.0132
XML::LibXML::Devel2.0132
XML::LibXML::ErrNo2.0132
XML::LibXML::Error2.0132
XML::LibXML::Literal2.0132
XML::LibXML::NodeList2.0132
XML::LibXML::Number2.0132
XML::LibXML::Reader2.0132
XML::LibXML::SAX2.0132
XML::LibXML::SAX::Builder2.0132
XML::LibXML::SAX::Generator2.0132
XML::LibXML::SAX::Parser2.0132
XML::LibXML::Simple0.99
XML::LibXML::XPathContext2.0132
XML::LibXSLT1.96
XML::Namespace0.02
XML::NamespaceFactory1.02
XML::NamespaceSupport1.12
XML::Parser2.44
XML::Parser::Expat2.44
XML::Parser::Lite0.721
XML::Parser::PerlSAX0.08
XML::Parser::Style::Debugunknown
XML::Parser::Style::Objectsunknown
XML::Parser::Style::Streamunknown
XML::Parser::Style::Subsunknown
XML::Parser::Style::Treeunknown
XML::PatAct::ACTIONunknown
XML::PatAct::Amsterdam0.08
XML::PatAct::MatchName0.08
XML::PatAct::PATTERNunknown
XML::PatAct::ToObjects0.08
XML::Perl2SAX0.08
XML::RegExp0.04
XML::RSS1.60
XML::RSS::Private::Output::Base1.60
XML::RSS::Private::Output::Roles::ImageDims1.60
XML::RSS::Private::Output::Roles::ModulesElems1.60
XML::RSS::Private::Output::V0_91.60
XML::RSS::Private::Output::V0_911.60
XML::RSS::Private::Output::V1_01.60
XML::RSS::Private::Output::V2_01.60
XML::SAX1.00
XML::SAX2Perl0.08
XML::SAX::Base1.09
XML::SAX::DocumentLocatorunknown
XML::SAX::Exception1.09
XML::SAX::Expat0.51
XML::SAX::ParserFactory1.01
XML::SAX::PurePerl1.00
XML::SAX::PurePerlunknown
XML::SAX::PurePerlunknown
XML::SAX::PurePerlunknown
XML::SAX::PurePerlunknown
XML::SAX::PurePerlunknown
XML::SAX::PurePerlunknown
XML::SAX::PurePerl::DebugHandlerunknown
XML::SAX::PurePerl::Exceptionunknown
XML::SAX::PurePerl::Productionsunknown
XML::SAX::PurePerl::Readerunknown
XML::SAX::PurePerl::Readerunknown
XML::SAX::PurePerl::Readerunknown
XML::SAX::PurePerl::Reader::Streamunknown
XML::SAX::PurePerl::Reader::Stringunknown
XML::SAX::PurePerl::Reader::URIunknown
XML::SAX::Writer0.57
XML::SAX::Writer::XML0.57
XML::Simple2.25
XML::Twig3.52
XML::Twig::XPath0.02
XML::Writer0.625
XML::XPath1.42
XML::XPath::Boolean1.42
XML::XPath::Builder1.42
XML::XPath::Expr1.42
XML::XPath::Function1.42
XML::XPath::Literal1.42
XML::XPath::LocationPath1.42
XML::XPath::Node1.42
XML::XPath::Node::Attribute1.42
XML::XPath::Node::Comment1.42
XML::XPath::Node::Element1.42
XML::XPath::Node::Namespace1.42
XML::XPath::Node::PI1.42
XML::XPath::Node::Text1.42
XML::XPath::NodeSet1.42
XML::XPath::Number1.42
XML::XPath::Parser1.42
XML::XPath::PerlSAX1.42
XML::XPath::Root1.42
XML::XPath::Step1.42
XML::XPath::Variable1.42
XML::XPath::XMLParser1.42
XML::XPathEngine0.14
XML::XPathEngine::Booleanunknown
XML::XPathEngine::Exprunknown
XML::XPathEngine::Functionunknown
XML::XPathEngine::Literalunknown
XML::XPathEngine::LocationPathunknown
XML::XPathEngine::NodeSetunknown
XML::XPathEngine::Numberunknown
XML::XPathEngine::Rootunknown
XML::XPathEngine::Step1.0
XML::XPathEngine::Variableunknown
XSunknown
YAML1.26
YAML::Any1.26
YAML::Dumperunknown
YAML::Dumper::Baseunknown
YAML::Errorunknown
YAML::LibYAML0.72
YAML::Loaderunknown
YAML::Loader::Baseunknown
YAML::Marshallunknown
YAML::Mounknown
YAML::Nodeunknown
YAML::Tagunknown
YAML::Tiny1.73
YAML::Typesunknown
YAML::XS0.72
YAML::XS::LibYAMLunknown
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Include path (INC) directoriesSearchedNumber of Modules
/sw/comp/perl_modules/5.26.2/rackham/lib/perl5/5.26.2/x86_64-linux-thread-multiyes0
/sw/comp/perl_modules/5.26.2/rackham/lib/perl5/5.26.2yes0
/sw/comp/perl_modules/5.26.2/rackham/lib/perl5/x86_64-linux-thread-multiyes1055
/sw/comp/perl_modules/5.26.2/rackham/lib/perl5yes5989
/sw/comp/perl/5.26.2/rackham/lib/5.26.2/x86_64-linux-thread-multinounknown
/sw/comp/perl/5.26.2/rackham/lib/5.26.2nounknown
/sw/comp/perl/5.26.2/rackham/libnounknown
/sw/comp/perl/5.26.2/rackham/lib/site_perl/5.26.2/x86_64-linux-thread-multinounknown
/sw/comp/perl/5.26.2/rackham/lib/site_perl/5.26.2nounknown
+
    +
  • Total modules : 5989
  • +
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/perl_packages/index.html b/software/perl_packages/index.html new file mode 100644 index 000000000..259b8c603 --- /dev/null +++ b/software/perl_packages/index.html @@ -0,0 +1,3155 @@ + + + + + + + + + + + + + + + + + + + How do I install local Perl packages? - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

How do I install local Perl packages?

+

What is available already in the perl modules?

+
    +
  • +

    A number of packages are available by default with all Perl versions.

    +
  • +
  • +

    For Perl version 5.18.4 in particular (available through the software module system as perl/5.18.4), we have installed many more Perl packages. These are available by loading the software module perl_modules/5.18.4. We have a complete list of the Perl packages available.

    +
  • +
  • +

    If you would like to use BioPerl, module avail BioPerl after loading bioinfo-tools will show the versions available. The latest is BioPerl/1.6.924_Perl5.18.4, which is built against Perl 5.18.4 so also loads the modules perl/5.18.4 and perl_modules/5.18.4.

    +
  • +
+

Install other packages

+

You could email support at support@uppmax.uu.se and suggest we include the package in perl_modules. If that doesn't work, or you decide to install it for yourself, please keep reading.

+

First you have to decide where you want to put your local Perl packages. Save this in a temporary environment variable called MY_PERL, make sure to substitute the path with your own:

+
export MY_PERL=/home/johanhe/slask/perl/
+
+

Then we download and install a more light weight CPAN client called cpanm, which have less confusing settings to configure and also makes it easier to install local packages. We'll then also install the module local::lib to a directory of your choice:

+
wget -O- http://cpanmin.us | perl - -l $MY_PERL App::cpanminus local::lib
+
+

Now we should be ready to set up the correct environment variables and load them for this session:

+
echo "eval `perl -I $MY_PERL/lib/perl5 -Mlocal::lib=$MY_PERL`" >> ~/.bash_profile 
+echo "export PATH=$MY_PERL/bin/:$PATH" >> ~/.bash_profile 
+source ~/.bash_profile
+
+

After this is done we can always install local packages easily by using the command:

+

bash +cpanm [name-of-package-to-install]bash

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/picard/index.html b/software/picard/index.html new file mode 100644 index 000000000..7b6fa08a8 --- /dev/null +++ b/software/picard/index.html @@ -0,0 +1,3244 @@ + + + + + + + + + + + + + + + + + + + Picard - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Picard

+

'Picard is a set of command line tools for manipulating high-throughput sequencing (HTS) data and formats such as SAM/BAM/CRAM and VCF' +(source: the Picard documentation).

+

Usage

+

Load the bioinfo-tools module first:

+
module load bioinfo-tools
+
+

Then search for you favorite Picard version:

+
module spider picard
+
+
+How does this look like? +

Your output will be similar to this:

+
[sven@rackham2 ~]$ module spider picard
+
+----------------------------------------------------------------------------
+  picard:
+----------------------------------------------------------------------------
+     Versions:
+        picard/1.92
+        picard/1.118
+        picard/1.141
+        picard/2.0.1
+        picard/2.10.3
+        picard/2.19.2
+        picard/2.20.4
+        picard/2.23.4
+        picard/2.27.5
+        picard/3.1.1
+
+----------------------------------------------------------------------------
+  For detailed information about a specific "picard" package (including how to l
+oad the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modu
+les.
+  For example:
+
+     $ module spider picard/3.1.1
+----------------------------------------------------------------------------
+
+
+

Then load your favorite version:

+
module load picard/3.1.1
+
+
+How does this look like? +

Your output will be similar to this:

+
[sven@rackham2 ~]$ module load picard/3.1.1
+picard/3.1.1: java -jar $PICARD command ...
+
+
+

Read up on how to use Picard:

+
module help picard/3.1.1
+
+
+How does this look like? +

Your output will be similar to this:

+
[sven@rackham2 ~]$ module help picard/3.1.1
+
+----------------------------------------------------------------------- Module Specific Help for "picard/3.1.1" -----------------------------------------------------------------------
+ picard - use picard/3.1.1
+
+    Version 3.1.1
+
+Usage:
+
+    java -jar $PICARD command ...
+
+or
+
+    java -jar $PICARD_ROOT/picard.jar command ...
+
+where 'command' is the desired Picard command, and ... are the desired further arguments.
+
+
+

Here is an example of using Picard to test if a file is a valid BAM/CRAM/SAM file:

+
java -jar $PICARD ValidateSamFile --INPUT my_file.bam
+
+
+How does this look like? +

First, download an example BAM file from the Picard GitHub repository:

+
[sven@rackham2 ~]$ wget https://github.com/broadinstitute/picard/raw/master/testdata/picard/flow/reads/input/sample_mc.bam
+
+--2024-08-05 09:16:40--  https://github.com/broadinstitute/picard/raw/master/testdata/picard/flow/reads/input/sample_mc.bam
+Resolving github.com (github.com)... 140.82.121.3
+Connecting to github.com (github.com)|140.82.121.3|:443... connected.
+HTTP request sent, awaiting response... 302 Found
+Location: https://raw.githubusercontent.com/broadinstitute/picard/master/testdata/picard/flow/reads/input/sample_mc.bam [following]
+--2024-08-05 09:16:40--  https://raw.githubusercontent.com/broadinstitute/picard/master/testdata/picard/flow/reads/input/sample_mc.bam
+Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.109.133, 185.199.108.133, 185.199.110.133, ...
+Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.109.133|:443... connected.
+HTTP request sent, awaiting response... 200 OK
+Length: 117715 (115K) [application/octet-stream]
+Saving to: ‘sample_mc.bam’
+
+100%[=============================================================================================================================================>] 117,715     --.-K/s   in 0.001s  
+
+2024-08-05 09:16:41 (171 MB/s) - ‘sample_mc.bam’ saved [117715/117715]
+
+

Your output will be similar to this, when using that valid BAM file:

+
[sven@rackham2 ~]$ java -jar $PICARD ValidateSamFile --INPUT sample_mc.bam 
+Aug 05, 2024 9:16:47 AM com.intel.gkl.NativeLibraryLoader load
+INFO: Loading libgkl_compression.so from jar:file:/sw/bioinfo/picard/3.1.1/rackham/picard.jar!/com/intel/gkl/native/libgkl_compression.so
+[Mon Aug 05 09:16:47 CEST 2024] ValidateSamFile --INPUT sample_mc.bam --MODE VERBOSE --MAX_OUTPUT 100 --IGNORE_WARNINGS false --VALIDATE_INDEX true --INDEX_VALIDATION_STRINGENCY EXHAUSTIVE --IS_BISULFITE_SEQUENCED false --MAX_OPEN_TEMP_FILES 8000 --SKIP_MATE_VALIDATION false --VERBOSITY INFO --QUIET false --VALIDATION_STRINGENCY STRICT --COMPRESSION_LEVEL 5 --MAX_RECORDS_IN_RAM 500000 --CREATE_INDEX false --CREATE_MD5_FILE false --help false --version false --showHidden false --USE_JDK_DEFLATER false --USE_JDK_INFLATER false
+[Mon Aug 05 09:16:47 CEST 2024] Executing as sven@rackham2.uppmax.uu.se on Linux 3.10.0-1160.119.1.el7.x86_64 amd64; OpenJDK 64-Bit Server VM 17+35-2724; Deflater: Intel; Inflater: Intel; Provider GCS is available; Picard version: Version:3.1.1
+WARNING 2024-08-05 09:16:47 ValidateSamFile NM validation cannot be performed without the reference. All other validations will still occur.
+No errors found
+[Mon Aug 05 09:16:48 CEST 2024] picard.sam.ValidateSamFile done. Elapsed time: 0.01 minutes.
+Runtime.totalMemory()=2181038080
+[sven@rackham2 ~]$ 
+
+

Your output will be similar to this, when using an invalid file, +such as an R script file:

+
[sven@rackham2 ~]$ java -jar $PICARD ValidateSamFile --INPUT app.R 
+Aug 05, 2024 9:13:20 AM com.intel.gkl.NativeLibraryLoader load
+INFO: Loading libgkl_compression.so from jar:file:/sw/bioinfo/picard/3.1.1/rackham/picard.jar!/com/intel/gkl/native/libgkl_compression.so
+[Mon Aug 05 09:13:20 CEST 2024] ValidateSamFile --INPUT app.R --MODE VERBOSE --MAX_OUTPUT 100 --IGNORE_WARNINGS false --VALIDATE_INDEX true --INDEX_VALIDATION_STRINGENCY EXHAUSTIVE --IS_BISULFITE_SEQUENCED false --MAX_OPEN_TEMP_FILES 8000 --SKIP_MATE_VALIDATION false --VERBOSITY INFO --QUIET false --VALIDATION_STRINGENCY STRICT --COMPRESSION_LEVEL 5 --MAX_RECORDS_IN_RAM 500000 --CREATE_INDEX false --CREATE_MD5_FILE false --help false --version false --showHidden false --USE_JDK_DEFLATER false --USE_JDK_INFLATER false
+[Mon Aug 05 09:13:21 CEST 2024] Executing as sven@rackham2.uppmax.uu.se on Linux 3.10.0-1160.119.1.el7.x86_64 amd64; OpenJDK 64-Bit Server VM 17+35-2724; Deflater: Intel; Inflater: Intel; Provider GCS is available; Picard version: Version:3.1.1
+WARNING 2024-08-05 09:13:21 ValidateSamFile NM validation cannot be performed without the reference. All other validations will still occur.
+ERROR::MISSING_READ_GROUP:Read groups is empty
+SAMFormatException on record 01
+ERROR 2024-08-05 09:13:21 ValidateSamFile SAMFormatException on record 01
+[Mon Aug 05 09:13:21 CEST 2024] picard.sam.ValidateSamFile done. Elapsed time: 0.01 minutes.
+Runtime.totalMemory()=2181038080
+To get help, see http://broadinstitute.github.io/picard/index.html#GettingHelp
+
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/profilers/index.html b/software/profilers/index.html new file mode 100644 index 000000000..fdec33ad1 --- /dev/null +++ b/software/profilers/index.html @@ -0,0 +1,3162 @@ + + + + + + + + + + + + + + + + + + + + + + + Profilers - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Profilers

+

There are some profiling tools that are available at UPPMAX.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
SoftwareCompiler(s)Description
Intel VTuneIntelBroad set of tools with a focus on performance improvement
Intel AdvisorIntelBroad set of tools with a focus on performance analysis
gprofGCCrun-time profiler
valgrindGCC and IntelBroad set of tools
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/projplot/index.html b/software/projplot/index.html new file mode 100644 index 000000000..08c4be7ec --- /dev/null +++ b/software/projplot/index.html @@ -0,0 +1,3399 @@ + + + + + + + + + + + + + + + + + + + + + + + projplot - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

projplot

+

Projplot plot

+

projplot is an UPPMAX tool to plot your core hour usage

+

Minimal use

+

projplot needs only the project code:

+
projplot -A [project_code]
+
+

For example:

+
projplot -A uppmax2020-2-2
+
+

Output will look similar to this:

+

A projplot plot

+
+

Example projplot output. The horizontal axis +shows the days before today, the vertical axis shows +the cores used on that day (hence, the amount of core hours +is the area under the curve). +For this example project, +apparently, the maximum number of cores per day is 800.

+
+

This graph shows you the projects core usage during the last 30 days. +The heights of the peaks in the plot shows you +how many cores that were used simultaneously, +and the width show you for how long they were used.

+

If we look at the big peak to the left in the diagram, +we can see that 15 cores were used for around 24 hours, +and somewhere in the middle of that period, +another 8 cores was used for a shorter period of time.

+

Since the plots are made using ordinary text, +there will sometimes be rounding errors +because of the low resolution of the terminal window, +which is usually around 80x30 characters. +The plot will adapt to your terminal window, +so increase the size of your window to increase the resolution of the plot +(the data being plotted has a resolution down to single seconds).

+

As time progresses the peaks in the graph will move to the left in the diagram. +In the standard plot of the last 30 days, +that means that when a peak exits the plot to the left, +your get those core hours back to the project.

+

If you are over quota

+

If we look at a project that has used more core hours than their projects allocation, +the image will look like this:

+

A projplot that is over quota

+

There is a message about the core hour limit being reached at the top of the plot. +If you look in the diagram at around 10 days ago, +you will see the point where the core hour limit is reached +(the bar of >s). +This point is calculated by summing up all core hour usage +to the right of the bar. +What this means in reality is that if this project was to stop analyzing right now, +they would have to wait until the bar of >s has exited the graph to the left +(i.e. ~20 days) before they are below their core hour limit again. +Most of the time, projects do not completely stop analyzing, +so for each core hour they use the more to the right the > bar will move.

+

Other options

+

projplot has more options, that are shown by using --help:

+
projplot --help
+
+

Below, these options are discussed in detail.

+

Help

+

Use --help (or -h) to get a short description of the options and some examples:

+
projplot --help
+
+
+How does that look like? +
Usage: projplot -A <proj-id> [options]
+
+More details: https://uppmax.uu.se/support/user-guides/plotting-your-core-hour-usage
+
+Example runs:
+
+# Plot the last 30 days of project <proj>
+projplot -A <proj>
+
+# Plot the last 30 days of project <proj> on cluster <cluster>
+projplot -A <proj> -c <cluster>
+
+# Plot the last <n> days of project <proj>
+projplot -A <proj> -d <n>
+
+# Plot the usage for project <proj> since <date>
+projplot -A <proj> -s <date>
+
+# Plot the usage for project <proj> between <date_1> and <date_2>
+projplot -A <proj> -s <date_1> -e <date_2>
+
+# Plot the usage for project <proj> between <date_1> and <date_2>, on cluster <cluster>
+projplot -A <proj> -s <date_1> -e <date_2> -c <cluster>
+
+# Plot the usage for project <proj> between date <date_1> and <days> days later
+projplot -A <proj> -s <date_1> -d <days>
+
+# Plot the usage for project <proj> between date <date_1> and <days> days earlier
+projplot -A <proj> -e <date_1> -d <days>
+
+# Plot the last 30 days of project <proj>, but don't check the queue for running jobs
+projplot -A <proj> -R
+
+
+Options:
+  -h, --help            show this help message and exit
+  -A ACCOUNT, --account=ACCOUNT
+                        Your UPPMAX project ID
+  -c CLUSTER, --cluster=CLUSTER
+                        The cluster you want to plot (default: current
+                        cluster)
+  -d DAYS, --days=DAYS  The number of days you want to plot (default: none)
+  -s START, --start=START
+                        The starting date you want to plot (format: YYYY-MM-
+                        DD)
+  -e END, --end=END     The ending date you want to plot (format: YYYY-MM-DD)
+  -R, --no-running-jobs
+                        Use to skip including running jobs in the plot
+                        (faster). Useful if you are not running any jobs and
+                        want to save time.
+
+
+

Number of days

+

Use --days (or -d) the plot a custom number of days, +instead of the default of 30 days:

+
projplot -A [project_code] --days [number_of_days]
+
+

For example, this will plot the last 45 days: +:

+
projplot -A uppmax2020-2-2 --days 45
+
+

Starting date

+

Use --start (or -s) to specify a custom starting date, +from when the time in your plot will start:

+
projplot -A [project_code] --start [starting_date_in_yyyy-mm-dd_format]
+
+

For example:

+
projplot -A uppmax2020-2-2 --start 2023-05-03
+
+

will give you a plot starting on the date 2023-05-03 +and the default number of days after that date. +The command below does exactly the same, yet makes the default +number of days explicit:

+
projplot -A uppmax2020-2-2 --start 2023-05-03 --days 30
+
+

Ending data

+

Use --end (or -e) to specify a custom ending date, +from when the time in your plot will end:

+
projplot -A [project_code] --end [ending_date_in_yyyy-mm-dd_format]
+
+

For example:

+
projplot -A uppmax2020-2-2 --end 2023-05-03
+
+

will give you a plot ending on the date 2023-05-03 +and the default number of days before that date. +The command below does exactly the same, yet makes the default +number of days explicit:

+
projplot -A uppmax2020-2-2 --end 2023-05-03 --days 30
+
+

Start and end date combined

+

Use --start and --end combined to specify a custom range +of dates for your plot:

+
projplot -A [project_code] --start [starting_date_in_yyyy-mm-dd_format] --end [ending_date_in_yyyy-mm-dd_format]
+
+

For example:

+
projplot -A uppmax2020-2-2 --start 2022-05-03 --end 2023-05-03
+
+

Cluster

+

Use --cluster (or -c) to determine which UPPMAX cluster to plot. +By default, the current cluster is used.

+

Since the different clusters at UPPMAX have separate core hour quotas, +it makes sense to being able to plot them separately.

+
projplot -A [project_code] -c [cluster_name]
+
+

For example:

+
projplot -A uppmax2020-2-2 -c snowy
+
+

Valid cluster names are bianca, rackham and snowy.

+
+How to get valid cluster names? +

Use projplot with a nonsense clustername:

+
projplot -A uppmax2020-2-2 --cluster nonsensename
+
+

The error message will display valid cluster names.

+
+

This option can be combined with all the other options.

+

Exclude running jobs

+

Use --no-running-jobs (or -R) to skip checking the queue for running jobs.

+

If you don't have any running jobs, +asking the queue system to list jobs is just a waste of time +(anywhere 1-15 seconds). +By giving --no-running-jobs when running projplot, +it skips checking the queue and if you do have jobs running, +they will not be visible in the plot or in the sum of core hours used.

+
projplot -A [project_code] --no-running-jobs
+
+

For example:

+
projplot -A uppmax2020-2-2 --no-running-jobs
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python/index.html b/software/python/index.html new file mode 100644 index 000000000..ea13b8383 --- /dev/null +++ b/software/python/index.html @@ -0,0 +1,3834 @@ + + + + + + + + + + + + + + + + + + + + + + + Python - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Python user guide

+

The Python logo

+

Welcome to the UPPMAX Python user guide.

+

We describe what Python is +and that there are multiple Python versions.

+

Then, we show how to load Python +and to load Python packages +after which you can run Python.

+

Finally, you can find UPPMAX Python-related courses +and these more advanced topics:

+ +

What is Python?

+

Python is a high-level, general-purpose programming language. +Its design philosophy emphasizes code readability +with the use of significant indentation [Kuhlman, 2009].

+

Python versions

+

Python (or to be precise: the Python interpreter) has different versions. +The current major version of Python is Python 3. +Python 3 is not backwards compatible with Python 2. +This means that you need to use the correct Python version +to run a Python script.

+
+Could you give me an example of a difference between Python 2 and 3? +

One example is how Python 2 and Python 3 dividetwo integers. +Here is an example that will work on all UPMMAX clusters.

+

Load Python 2.7.15:

+
module load python/2.7.15
+
+

Then

+
python -c "print(1/2)"
+
+

will print 0, as this is an integer division: two fits zero times in one.

+

Load Python 3.11.4:

+
module load python/3.11.4
+
+

Then

+
python -c "print(1/2)"
+
+

will print 0.5, as this is turned into a floating point division, +equivalent to 1.0 / 2.0.

+
+
+Which version of Python is python? +

To determine which version python is, in a terminal, type:

+
python --version
+
+

to see which Python version you are using now.

+
+
+Which version of Python is python3? +

To determine which version python3 is, in a terminal, type:

+
python3 --version
+
+

to see which Python version you are using now.

+
+

Loading Python

+
+Prefer seeing a video? +

A video that shows how to load the Python +module +can be found here.

+
+

The different versions of Python are available via +the module system on all UPPMAX clusters. +Loading a Python module also makes some Python packages available.

+
+Forgot what the module system is? +

See the UPPMAX pages on the module system here.

+
+
+UPPMAX modules or Python modules? +

At this page, we will use the word 'modules' for UPPMAX modules +and 'packages' for Python modules, to be clear in what is meant. +The word 'package' is used in multiple other languages, such as R, +with a similar definition as a Python module.

+
+

To find out which Python modules there are, use module spider python.

+
+What is the output of that command? +

The output of module spider python on the day of writing, is:

+
[user@rackham1 ~]$ module spider python
+
+---------------------------------------------------------------------------------------
+  python:
+---------------------------------------------------------------------------------------
+     Versions:
+        python/2.7.6
+        python/2.7.9
+        python/2.7.11
+        python/2.7.15
+        python/3.3
+        python/3.3.1
+        python/3.4.3
+        python/3.5.0
+        python/3.6.0
+        python/3.6.8
+        python/3.7.2
+        python/3.8.7
+        python/3.9.5
+        python/3.10.8
+        python/3.11.4
+     Other possible modules matches:
+        Biopython  Boost.Python  GitPython  IPython  Python  biopython  flatbuffers-python
+ ...
+
+---------------------------------------------------------------------------------------
+  To find other possible module matches execute:
+
+      $ module -r spider '.*python.*'
+
+---------------------------------------------------------------------------------------
+  For detailed information about a specific "python" package (including how to load the mod
+ules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modules.
+  For example:
+
+     $ module spider python/3.11.4
+---------------------------------------------------------------------------------------
+
+
+

To load a specific version of Python into your environment, +type module load python/[version], +where [version] is a Python version, +for example, module load python/3.11.4

+
+Do I really need to load a Python module? +

It is recommended to load a Python module, +but in some case you will not get into trouble.

+

When you do not load a module, the system-installed Python version are used. +These are python version 2.7.5, and python3 version 3.6.8.

+

If using those older versions give you no trouble, all is well, +for example, when running basic Python scripts that have no package imports.

+

However, when any problem occurs, load those newer modules.

+
+
+Why are there both python/3.X.Y and python3/3.X.Y modules? +

Sometimes existing software might use python2 +and there’s nothing you can do about that.

+

In pipelines and other toolchains the different tools may together +require both python2 and python3.

+
+
+How to deal with tools that require both python2 and python3? +

You can run two python modules at the same time if +one of the modules is python/2.X.Y and the other module is python3/3.X.Y +(i.e. not python/3.X.Y).

+
+

Loading Python package modules

+
+

Terminology

+

A Python package consists out of one or more Python modules. +in this document, we avoid using this term, +to avoid confusion with the UPPMAX modules.

+
+

For more complex complex Python packages, +there exist UPPMAX modules to load these:

+
    +
  • python_GIS_packages: for geographic information system packages
  • +
  • python_ML_packages: for machine learning Python packages
  • +
+
+How could I find these modules myself? +

Use:

+
module spider packages
+
+
+

Loading Python packages

+
+

Terminology

+

A Python package consists out of one or more Python modules. +in this document, we avoid using this term, +to avoid confusion with the UPPMAX modules.

+
+

Many scientific tools are distributed as Python packages, +which allows any user to run complex tools from a terminal or script. +For example, the following Python code imports the functionality +of the pandas library:

+
import pandas
+
+

Some packages/tools are preinstalled on all UPPMAX clusters. +To load such a package:

+
    +
  • determine if it comes with your Python version
  • +
  • determine if it comes as a module
  • +
+

Determine if a Python package comes with your Python module

+

To determine if a Python package comes with your Python module, +there are multiple ways:

+
    +
  • Using pip list
  • +
  • Using the module help
  • +
  • Importing the package
  • +
+

Using pip list

+

To determine if a Python package comes with your Python module, +pip list is one of the ways to do so.

+

On a terminal, type:

+
pip list
+
+

This shows a list of Python packages that are installed.

+
+How does the output of pip list look like? +

Here is an example:

+
Package                   Version
+------------------------- ---------------
+anndata                   0.10.5.post1
+anyio                     4.2.0
+appdirs                   1.4.4
+argon2-cffi               23.1.0
+argon2-cffi-bindings      21.2.0
+[more Python packages]
+Werkzeug                  3.0.1
+wheel                     0.42.0
+widgetsnbextension        4.0.9
+zipp                      3.17.0
+zope.interface            6.1
+
+
+

Using the module help

+

Determine if a Python package comes with your Python module +using the module help, in a terminal, type:

+
module help python/[module_version]
+
+

where [module_version] is a version of a Python module, +for example:

+
module help python/3.11.4
+
+
+What is the output of module help python/3.11.4? +

Here is part of the output of module help python/3.11.4:

+
------------------------ Module Specific Help for "python/3.11.4" -------------------------
+    Python - use Python
+
+    Version 3.11.4
+
+
+This module provides the executable names 'python' and 'python3'.
+
+Several additional python packages are also installed in this module. The complete list of
+packages in this module, produced using 'pip list', is:
+
+Package                   Version
+------------------------- -----------
+anndata                   0.9.2
+anyio                     3.7.1
+argon2-cffi               21.3.0
+...
+widgetsnbextension        4.0.8
+zipp                      3.16.2
+zope.interface            6.0
+
+
+

Importing the package

+

Importing a Python package is a way to determine if a Python package +comes with your Python module installed. +From the terminal do:

+
python -c "import [your_package]"
+
+
+What does that -c do? +

python -c will run the text after it as Python code. +In this way, you can directly run code, i.e. +you do not need to create a file to run.

+
+

where [your_package] is the name of a Python package, +for example:

+
python -c "import pandas"
+
+
+What is the output if the Python package is found? +

The output if the Python package is found is nothing.

+
+
+What is the output if the Python package is not found? +

Here an absent package is loaded, with the nonsense name absentpackage:

+
python -c "import absentpackage"
+
+

This results in the following error:

+
Traceback (most recent call last):
+  File "<string>", line 1, in <module>
+ModuleNotFoundError: No module named 'absentpackage'
+
+
+

Determine if a Python package comes with a module

+

If the Python package is not pre-installed with your version of Python, +use the UPPMAX module system +to search for it.

+

Not all packages are easy to find, +as some are part of super-packages, +for example the TensorFlow Python libraries, +which are part of the python_ML_packages/[version]-{cpu,gpu}, +for example python_ML_packages/3.11.8-cpu.

+
+Want to see a list of Python packages in python_ML_packages/3.11.8-cpu that are not in python/3.11.8? +

Here you go:

+
    +
  • absl-py
  • +
  • array-record
  • +
  • astunparse
  • +
  • cachetools
  • +
  • cons
  • +
  • dill
  • +
  • dm-tree
  • +
  • ducc0
  • +
  • etils
  • +
  • etuples
  • +
  • flatbuffers
  • +
  • gast
  • +
  • google-auth
  • +
  • google-auth-oauthlib
  • +
  • google-pasta
  • +
  • googleapis-common-protos
  • +
  • grpcio
  • +
  • imbalanced-learn
  • +
  • importlib_resources
  • +
  • keras
  • +
  • libclang
  • +
  • llvmlite
  • +
  • logical-unification
  • +
  • miniKanren
  • +
  • ml-dtypes
  • +
  • multipledispatch
  • +
  • nlp
  • +
  • numba
  • +
  • oauthlib
  • +
  • opt-einsum
  • +
  • patsy
  • +
  • promise
  • +
  • protobuf
  • +
  • pyasn1
  • +
  • pyasn1-modules
  • +
  • pytensor
  • +
  • requests-oauthlib
  • +
  • rsa
  • +
  • scikit-learn
  • +
  • seaborn
  • +
  • statsmodels
  • +
  • tensorboard
  • +
  • tensorboard-data-server
  • +
  • tensorflow-cpu
  • +
  • tensorflow-datasets
  • +
  • tensorflow-estimator
  • +
  • tensorflow-io-gcs-filesyst
  • +
  • tensorflow-metadata
  • +
  • tensorflow-probability
  • +
  • termcolor
  • +
  • threadpoolctl
  • +
  • toml
  • +
  • torch
  • +
  • torchaudio
  • +
  • torchvision
  • +
  • wrapt
  • +
  • xxhash
  • +
+
+

It may not always be easy to find your Python package within the many modules. +Do not hesitate to contact support +so that you can spend time on your research +and we figure this out :-)

+

Stand-alone tools

+

Some Python packages are working as stand-alone tools, for instance in +bioinformatics. The tool may be already installed as a module. Check if it is +there by using the module system spider function:

+
module spider [tool_name]
+
+

where [tool_name] is (part of) the name of the tool. module spider +is case-insensitive, hence YourTool and yourtool give similar results.

+
+What are UPPMAX modules? +

See the page about the UPPMAX module system here

+
+

Running Python

+

You can run Python in multiple ways:

+
    +
  • use Python to run a Python script
  • +
  • use Python in an interactive session
  • +
+

To program in Python, there are more ways, +which are discussed at the UPPMAX page on +Python programming here

+

Use Python to run a Python script

+

You can run a Python script in the shell by:

+
python example_script.py
+
+

or, if you loaded a python3 module:

+
python3 example_script.py
+
+

Use Python in an interactive session

+

You start a python session by typing:

+
python
+
+

or

+
python3
+
+

The python prompt looks like this:

+
>>>
+
+

Exit with <Ctrl-D>, quit() or exit().

+

Programming in Python

+

To program in Python, there are more ways, +which are discussed at the UPPMAX page on +Python programming here

+ +

See the UPPMAX courses and workshops +to find UPPMAX courses related to Python.

+

Installing Python packages

+

How to install Python packages +is described here.

+

Virtual environments in Python

+

How to use virtual environments in Python +is described here.

+

How to run parallel jobs in Python

+

How to run parallel jobs in Python +is described here.

+

References

+
    +
  • [Kuhlman, 2009] Kuhlman, Dave. A python book: Beginning python, advanced python, and python exercises. Lutz: Dave Kuhlman, 2009.
  • +
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_install_packages/index.html b/software/python_install_packages/index.html new file mode 100644 index 000000000..efb648d14 --- /dev/null +++ b/software/python_install_packages/index.html @@ -0,0 +1,3298 @@ + + + + + + + + + + + + + + + + + + + Installing Python packages - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Installing Python packages

+

This page described how to install Python packages.

+

There are many ways to install a Python package:

+
    +
  • Using setup.py
  • +
  • using a Python package installer +
  • +
+

You may want to check if a package is already installed first :-).

+

The Python package installers are compared +after which each is discussed:

+ +

Check if a package is already installed

+

There are multiple ways to check if a Python package is installed:

+

1. pip list

+

In the terminal, type:

+
pip list
+
+

You'll see a list of all installed packages.

+

2. import

+

Start Python. Then, within the Python interpreter, type:

+
import [package]
+
+

where [package] is the name of the Python package, +for example import mkhcnuggets.

+

Does it work? Then it is there!

+

Comparison between Conda and PyPI

+
    +
  • +

    PyPI (pip) is traditionally for Python-only packages but it is no problem to +also distribute packages written in other languages as long as they provide a +Python interface.

    +
  • +
  • +

    Conda (conda) is more general and while it contains many Python packages and +packages with a Python interface, it is often used to also distribute packages +which do not contain any Python (e.g. C or C++ packages).

    +
  • +
+ + + + + + + + + + + + + + + + + + + + +
Parametercondapip
Installs Python packagesYesYes
Installs non-Python softwareYesNo
+

Many libraries and tools are distributed in both ecosystems.

+

pip

+

pip is a popular Python package installer.

+

To install a Python package using pip, +in a terminal or Python shell, do:

+
pip install --user [package name]
+
+

where [package name] is the name of a Python package, +for example pip install --user mhcnuggets.

+
+Can I also use pip3? +

Yes, you can. The command then becomes:

+
pip3 install --user [package name]
+
+

For example pip3 install --user mhcnuggets.

+

Most that applies to pip applies to pip3.

+
+

Due to using --user, the package ends up in +a subfolder of the user's home folder, which is ~/.local/lib/python[version]/site-packages/, +where version is the Python version with only the major and minor version, +so for Python version 3.11.8, the folder will be python3.11 (i.e. the patch number, +8 is not included).

+

If you would like to have your packages installed in another folder, do:

+
pip install --prefix=[root_folder] [package name]
+
+

where [root_folder] is the root folder of the package installation, +for example --prefix=~/.local. +Using this root folder, this option is the same to using --user, +as described above.

+

When using a custom root folder, Python cannot find it without help. +Setting the environment variable PYTHONPATH to the correct folder +allows Python to find packages in a custom folder.

+
export PYTHONPATH=[root_folder]/lib/python[version]/site-packages/:$PYTHONPATH.
+
+

for example, when [root_folder] is ~/my_python_packages and for using Python +3.11.8, this will be:

+
export PYTHONPATH=~/my_python_packages/lib/python3.11/site-packages/:$PYTHONPATH.
+
+

Consider adding this line to your .bashrc file, +so it is loaded every time you login.

+

conda

+

See our Conda user Guide

+

Using setup.py

+

Some Python packages are only available as downloads +and need to be installed using a Python script, +commonly called setup.py.

+

If that is the case for the package you need, this is how you do it:

+
    +
  • +

    Pick a location for your installation + (change below to fit - I am installing under a project storage)

    +
      +
    • mkdir /proj/<project>/<mystorage>/mypythonpackages
    • +
    • cd /proj/<project>/<mystorage>/mypythonpackages
    • +
    +
  • +
  • +

    Load Python + (on Kebnekaise) site-installed prerequisites (SciPy-bundle, matplotlib, etc.)

    +
  • +
  • Install any remaining prerequisites. Remember to activate your Virtualenv if installing with pip!
  • +
  • Download Python package, place it in your chosen installation dir, then untar/unzip it
  • +
  • +

    cd into the source directory of the Python package

    +
      +
    • Run python setup.py build
    • +
    • Then install with: python setup.py install --prefix=<path to install dir>
    • +
    +
  • +
  • +

    Add the path to $HOME/.bash_profile (note that it will differ by Python version):

    +
      +
    • export PYTHONPATH=$PYTHONPATH:<path to your install directory>/lib/python3.11/site-packages
    • +
    +
  • +
+

You can use it as normal inside Python (remember to load dependent modules as well as activate virtual environment if it depends on some packages you installed with pip): import <python-module>

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_parallel_jobs/index.html b/software/python_parallel_jobs/index.html new file mode 100644 index 000000000..e3b0ee016 --- /dev/null +++ b/software/python_parallel_jobs/index.html @@ -0,0 +1,3399 @@ + + + + + + + + + + + + + + + + + + + How to run parallel jobs in Python - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

How to run parallel jobs in Python

+

This page describes how to run parallel jobs in Python. +For the general pages on Python, go here.

+

Material here is taken partly from the parallel part of the online course +Python for Scientific +Computing

+

Parallel computing is when many different tasks are carried out simultaneously. +There are three main models:

+
    +
  • +

    Embarrassingly parallel: the code does not need to synchronize/communicate +with other instances, and you can run multiple instances of the code +separately, and combine the results later. If you can do this, great! (array +jobs, task queues)

    +
  • +
  • +

    Shared memory parallelism: Parallel threads need to communicate and do so via +the same memory (variables, state, etc). (OpenMP)

    +
  • +
  • +

    Message passing: Different processes manage their own memory segments. They +share data by communicating (passing messages) as needed. (Message Passing +Interface (MPI)).

    +
  • +
+

There are several packages available for Python that let you run parallel jobs. Some of them are only able to run on one node, while others try to leverage several machines.

+

Threading

+

Threading divides up your work among a number of cores within a node. The +threads shares its memory.

+
    +
  • Multi-threading documentation
  • +
  • Examples
  • +
+

The designers of the Python language made the choice that only one thread in a process can run actual Python code by using the so-called global interpreter lock (GIL). This means that approaches that may work in other languages (C, C++, Fortran), may not work in Python without being a bit careful. At first glance, this is bad for parallelism. But it’s not all bad!:

+

External libraries (NumPy, SciPy, Pandas, etc), written in C or other languages, can release the lock and run multi-threaded. Also, most input/output releases the GIL, and input/output is slow.

+

If speed is important enough you need things parallel, you usually wouldn’t use pure Python.

+

More on the global interpreter lock

+

Threading python module. This is very low level and you shouldn’t use it unless you really know what you are doing.

+

We recommend you find a UNIX threading tutorial first before embarking on using the threading module.

+

Distributed computing

+

As opposed to threading, Python has a reasonable way of doing something similar that uses multiple processes.

+

Distributed processing uses individual processes with individual memory, that communicate with each other. In this case, data movement and communication is explicit. +Python supports various forms of distributed computing.

+
    +
  • A native master-worker system based on remote procedure calls: multiprocessing.py
  • +
  • MPI through mpi4py : a Python wrapper for the MPI protocol, see further down
  • +
+

If choosing between multiprocessing and MPI, distributed is easier to program, whereas MPI may be more suitable for multi-node applications.

+

Multiprocessing/distributed

+

The interface is a lot like threading, but in the background creates new processes to get around the global interpreter lock.

+

There are low-level functions which have a lot of the same risks and difficulties as when using threading.

+

To show an example, the split-apply-combine or map-reduce paradigm is quite useful for many scientific workflows. Consider you have this:

+
def square(x):
+    return x*x
+
+

You can apply the function to every element in a list using the map() function:

+
>>>list(map(square, [1, 2, 3, 4, 5, 6]))
+[1, 4, 9, 16, 25, 36]
+
+

The multiprocessing.pool.Pool class provides an equivalent but parallelized (via multiprocessing) way of doing this. The pool class, by default, creates one new process per CPU and does parallel calculations on the list:

+
>>>from multiprocessing import Pool
+>>>with Pool() as pool:
+...    pool.map(square, [1, 2, 3, 4, 5, 6])
+[1, 4, 9, 16, 25, 36]
+
+

As you can see, you can run distributed computing directly from the python shell.

+

Another example, distributed.py:

+
import random
+
+def sample(n):
+    """Make n trials of points in the square.
+    Return (n, number_in_circle)
+    This is our basic function.
+    By design, it returns everything it needs to compute
+    the final answer: both n (even though it is an input
+    argument) and n_inside_circle.
+    To compute our final answer, all we have to do is
+    sum up the n:s and the n_inside_circle:s and do our
+    computation"""
+    n_inside_circle = 0
+    for i in range(n):
+        x = random.random()
+        y = random.random()
+        if x**2 + y**2 < 1.0:
+            n_inside_circle += 1
+    return n, n_inside_circle
+
+import multiprocessing.pool
+pool = multiprocessing.pool.Pool()
+# The default pool makes one process per CPU
+#%%timeit
+# Do it once to time it
+#results = pool.map(sample, [10**5] * 10)     # "* 10" would mean 10 processes
+# Do it again to get the results, since the results of the above
+# cell aren't accessible because of the %%timeit magic.
+results = pool.map(sample, [10**5] * 10)
+pool.close()
+n_sum = sum(x[0] for x in results)
+n_inside_circle_sum = sum(x[1] for x in results)
+pi = 4.0 * (n_inside_circle_sum / n_sum)
+print(pi)
+
+

Batch example

+

If you need to revive your knowledge about the scheduling system, please check Slurm user guide.

+

Batch script job_distributed.slurm:

+
#!/bin/bash
+#SBATCH -A j<proj>
+#SBATCH -p devel
+#SBATCH --job-name=distr_py      # create a short name for your job
+#SBATCH --nodes=1                # node count
+#SBATCH --ntasks=20              # total number of tasks across all nodes
+#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)
+#SBATCH --time=00:01:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+#SBATCH --mail-user=<email>
+module load python/3.9.5
+python distributed.py
+
+

​Put job in queue:

+
sbatch job_distributed.slurm
+
+

Interactive example

+
salloc -A <proj> -p node -N 1 -n 10 -t 1:0:0
+python distributed.py
+
+

MPI

+

Presently you have to install your own mpi4py. You will need to activate paths to the MPI libraries. Therefore follow these steps.

+
    +
  1. If you use python 3.10.8:
  2. +
+
module load gcc/12.2.0 openmpi/4.1.4
+
+
 Otherwise:
+
+
module load gcc/9.3.0 openmpi/3.1.5
+
+
    +
  1. pip install locally or in an virtual environment
  2. +
+
pip install --user mpi4py
+
+

Remember that you will also have to load the the openmpi module before running mpi4py code, so that the MPI header files can be found (e.g. with the command "module load gcc/X.X.X openmpi/X.X.X"). Because of how MPI works, we need to explicitly write our code into a file, pythonMPI.py:

+
import random
+import time
+from mpi4py import MPI
+def sample(n):
+    """Make n trials of points in the square.
+    Return (n, number_in_circle)
+    This is our basic function.
+    By design, it returns everything it needs to compute
+    the final answer: both n (even though it is an input
+    argument) and n_inside_circle.
+    To compute our final answer, all we have to do is
+    sum up the n:s and the n_inside_circle:s and do our
+    computation"""
+    n_inside_circle = 0
+    for i in range(n):
+        x = random.random()
+        y = random.random()
+        if x ** 2 + y ** 2 < 1.0:
+            n_inside_circle += 1
+    return n, n_inside_circle
+comm = MPI.COMM_WORLD
+size = comm.Get_size()
+rank = comm.Get_rank()
+n = 10 ** 7
+if size > 1:
+    n_task = int(n / size)
+else:
+    n_task = n
+t0 = time.perf_counter()
+_, n_inside_circle = sample(n_task)
+t = time.perf_counter() - t0
+
+print(f"before gather: rank {rank}, n_inside_circle: {n_inside_circle}")
+n_inside_circle = comm.gather(n_inside_circle, root=0)
+print(f"after gather: rank {rank}, n_inside_circle: {n_inside_circle}")
+if rank == 0:
+    pi_estimate = 4.0 * sum(n_inside_circle) / n
+    print(f"\nnumber of darts: {n}, estimate: {pi_estimate},
+        time spent: {t:.2} seconds")
+
+

You can execute your code the normal way as

+
mpirun -n 3 python pythonMPI.py
+
+

A batch script, job_MPI.slurm, should include a "module load gcc/9.3.0 openmpi/3.1.5"

+
#!/bin/bash
+#SBATCH -A j<proj>
+#SBATCH -p devel
+#SBATCH --job-name=MPI_py        # create a short name for your job
+#SBATCH --nodes=1                # node count
+#SBATCH --ntasks=20              # total number of tasks across all nodes
+#SBATCH --cpus-per-task=1        # cpu-cores per task (>1 if multi-threaded tasks)
+#SBATCH --time=00:05:00          # total run time limit (HH:MM:SS)
+#SBATCH --mail-type=begin        # send email when job begins
+#SBATCH --mail-type=end          # send email when job ends
+#SBATCH --mail-user=<email>
+module load python/3.9.5
+module load gcc/9.3.0 openmpi/3.1.5
+mpirun -n 20 python pythonMPI.py
+
+

Using the GPU nodes

+

Example with numba. First install numba locally:

+
pip install --user numba
+
+

Test script: add-list.py

+
import numpy as np
+from timeit import default_timer as timer
+from numba import vectorize
+# This should be a substantially high value.
+NUM_ELEMENTS = 100000000
+# This is the CPU version.
+def vector_add_cpu(a, b):
+  c = np.zeros(NUM_ELEMENTS, dtype=np.float32)
+  for i in range(NUM_ELEMENTS):
+      c[i] = a[i] + b[i]
+  return c
+# This is the GPU version. Note the @vectorize decorator. This tells
+# numba to turn this into a GPU vectorized function.
+@vectorize(["float32(float32, float32)"], target='cuda')
+def vector_add_gpu(a, b):
+  return a + b;
+def main():
+  a_source = np.ones(NUM_ELEMENTS, dtype=np.float32)
+  b_source = np.ones(NUM_ELEMENTS, dtype=np.float32)
+  # Time the CPU function
+  start = timer()
+  vector_add_cpu(a_source, b_source)<
+  vector_add_cpu_time = timer() - start
+  # Time the GPU function
+  start = timer()
+  vector_add_gpu(a_source, b_source)
+  vector_add_gpu_time = timer() - start
+  # Report times
+  print("CPU function took %f seconds." % vector_add_cpu_time)
+  print("GPU function took %f seconds." % vector_add_gpu_time)
+  return 0
+if __name__ == "__main__":
+  main()
+
+

Run in an interactive session with GPU:s on Snowy

+
[bjornc@rackham3 ~]$ interactive -A staff -n 1 -M snowy --gres=gpu:1  -t 1:00:01 --mail-type=BEGIN --mail-user=bjorn.claremar@uppmax.uu.se
+You receive the high interactive priority.
+Please, use no more than 8 GB of RAM.
+Waiting for job 6907137 to start...
+Starting job now -- you waited for 90 seconds.
+[bjornc@s160 ~]$ ml python/3.9.5
+[bjornc@s160 ~]$ python add-list.py  #run the script
+CPU function took 36.849201 seconds.
+GPU function took 1.574953 seconds.
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_programming/index.html b/software/python_programming/index.html new file mode 100644 index 000000000..306230634 --- /dev/null +++ b/software/python_programming/index.html @@ -0,0 +1,3277 @@ + + + + + + + + + + + + + + + + + + + Python programming - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Python programming

+

This page describes how to program in Python +on the UPPMAX clusters.

+

There are multiple ways to program in Python:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
DescriptionFeaturesScreenshot
Use a text editor (see below)Non-interactive, no helpUsing GNU nano for Python
Use the Python interpreter (see below)Interactive, terminal-based, some helpUsing the Python interpreter
Use IPythonInteractive, terminal-based, more help and featuresUsing IPython
Use JupyterInteractive, web-basedUsing Jupyter
Use Visual Studio CodeInteractive, install on local computer, use locally installed Python and Python packagesUsing VSCode
+

Use a text editor

+

Using a text editor to program in Python +is a simple way to write code: +it is the same as writing any text file.

+

Here we use the text editor GNU nano to write a Python script:

+
nano example_script.py
+
+

Within nano, write:

+
print('Hello, world!')
+
+
    +
  • To save, press CTRL + O (i.e. the letter), then enter to keep the same filename
  • +
  • To quite, press CTRL + Q
  • +
+

You can run this Python script in the shell by:

+
python example_script.py
+
+

or, if you want to be explicitly use Python 3:

+
python3 example_script.py
+
+

Some features of this approach are:

+
    +
  • this is a simple way to write code: it is the same as writing any text file.
  • +
  • you get no help while writing code
  • +
  • you can only run the script from start to finish, i.e. you cannot + partially run the script
  • +
+
+How to run a Python script line-by-line? +

You can run a Python script line-by-line using a Python debugger, +such as pdb.

+

On the terminal, for python, do:

+
pdb example_script.py
+
+

or for python3:

+
pdb3 example_script.py
+
+

See the official Python documentation of pdb here.

+
+

Use the Python interpreter

+

After loading a Python module, you have the Python interpreter available.

+
+Forgot how to load a Python module? +

See the UPPMAX page about Python here.

+
+
+What is a Python interpreter? +

In computing, an interpreter is a program that reads text +and runs it directly, without any additional steps.

+

The Python interpreter runs the Python commands you type directly, +without any additional steps.

+
+

Start the Python interpreter by typing:

+
python
+
+

or (for explicit Python 3):

+
python3
+
+

The Python prompt looks like this:

+
>>>
+
+

Type, for example:

+
print('Hello, world!')
+
+

and the interpreter will run the statement.

+

Exit the Python interpreter with CTRL + D, quit() or exit().

+

The Python interpreter gives limited auto-complete while writing code

+
+How do I get auto-complete? +

As an example, writing this line of code in the Python interpreter ...

+
s = 'Hello, world!'
+
+

... and press enter. Now a variable called s will hold some text.

+

Now type ...

+
s.
+
+

and press Tab twice. You will see a list of things you can do with that string.

+
+

The Python interpreter can show graphics.

+
+How do I get the Python interpreter to show graphics? +

In the Python interpreter, run this code line-by-line:

+
import matplotlib.pyplot as plt
+plt.plot([1, 4, 9, 16])
+plt.show()
+
+

(or as a one-liner: import matplotlib.pyplot as plt; plt.plot([1, 4, 9, 16]); plt.show())

+

You will see a window appear:

+

A window with the plot

+

You will only see a window appear, if you've logged in to Rackham with +SSH with X forwarding enabled.

+

Spoiler: ssh -X sven@rackham.uppmax.uu.se.

+
+

The Python interpreter cannot directly run scripts.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_pyenv/index.html b/software/python_pyenv/index.html new file mode 100644 index 000000000..762562912 --- /dev/null +++ b/software/python_pyenv/index.html @@ -0,0 +1,3208 @@ + + + + + + + + + + + + + + + + + + + Python pyenv - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Python pyenv

+

pyenv is one of multiple +Python virtual environment managers.

+

This approach is more advanced and should be, in our opinion, used only if the +above are not enough for the purpose. Probably Conda will work well for you. +The approach below allows you to install your own python version and much more…

+

Confer the official pyenv documentation.

+

First time at UPPMAX

+
    +
  1. +

    Download pyenv:

    +
    git clone git://github.com/yyuu/pyenv.git ~/.pyenv
    +
    +
  2. +
  3. +

    Make pyenv start when you login each time

    +
  4. +
+
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bash_profile
+echo 'export PATH="$PYENV_ROOT/bin:$PATH"' >> ~/.bash_profile
+echo 'eval "$(pyenv init -)"' >> ~/.bash_profile
+
+

To make sure everything gets loaded correctly, log out and back in to uppmax.

+

Installing own python version (not already available as an UPPMAX module)

+
    +
  1. +

    Get pyenv to install the python version of your liking.

    +
    pyenv install 3.10.6
    +
    +
  2. +
  3. +

    Make the version you just installed to the standard version for every time you run python.

    +
    pyenv global 3.10.6
    +
    +
  4. +
+

Now you should be all set. If you change your mind about which version of +Python to use, just redo this section and choose a different version. You can +also have multiple versions installed at the same time and just switch between +them usuing 'pyenv global' as shown above, if you have a script that requires +Python 3.3 or any other version.

+

Install packages in your selected python version

+
    +
  1. +

    Set python version with

    +
    pyenv global <version>
    +
    +
  2. +
  3. +

    Install packages in your python, use pip

    +
    pip install [package name]
    +
    +
  4. +
+

Example:

+
pip install mechanize
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_venv/index.html b/software/python_venv/index.html new file mode 100644 index 000000000..57c39c1c4 --- /dev/null +++ b/software/python_venv/index.html @@ -0,0 +1,3291 @@ + + + + + + + + + + + + + + + + + + + Python venv - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Python venv

+

venv is one of multiple +Python virtual environment managers.

+

venv is a Python-only environment manager +and is an official Python library, +with its own official Python tutorial.

+
flowchart TD
+  create[Create]
+  activate[Activate]
+  use[Use]
+  deactivate[Deactivate]
+
+  create --> activate
+  activate --> use
+  use --> deactivate
+  deactivate --> activate
+
+

The venv workflow

+
+

First, the common workflow for using a venv is described:

+ +

Then:

+ +

Create a virtual environment

+

A virtual environment can be created in multiple ways, +for example, from scratch, which is not recommended.

+

Here we discuss the recommended way to create a virtual environment, +which has these steps:

+
    +
  1. Load a Python module or a modules with Python packages
  2. +
  3. Create the virtual environment
  4. +
+

1. Load a Python module or a modules with Python packages

+

The first step is described at +'Loading Python' +and +'Loading Python package modules'.

+
+Just show me how to do this +

Sure, here is how to load a Python module:

+
module load python/3.11.8
+
+

Here is how to load a Python package module:

+
module load python_ML_packages/3.11.8-cpu
+
+
+

Because you can load Python modules of different Python versions, +you can create venv virtual environments with different Python versions. +Consider adding this in the venv name, e.g. my_python2_venv or my_python3_venv.

+

2. Create the virtual environment

+

After loading the needed Python modules, +one can create a virtual environment +most efficiently using:

+
python -m venv --system-site-packages [path]/[venv_name]
+
+

where [path] is the path where you want to create your venv virtual +environment and [venv_name] is the name of the venv virtual environment. +For example python -m venv --system-site-packages ~/my_venvs/example_venv.

+
+

Create virtual environments in your project storage

+

Virtual environments can take up a lot of disc space.

+

If you use either (1) many venv virtual environments, +or (2) install many Python packages to a venv virtual environment, +we strongly recommend that you create the venv +virtual environments in your project (/proj/[your_uppmax_project]) folder.

+
+

The -m flag makes sure that you use the libraries +from the Python version you are using. +The --system-site-packages flags ensure you use +the packages already installed in the +loaded Python module.

+
+How long does this step take? +

This depends.

+

This takes around 10 seconds:

+
module load python/3.11.8
+python -m venv --system-site-packages ~/my_venvs/example_venv
+
+

This takes around 10 seconds:

+
module load python_ML_packages/3.11.8-cpu
+python -m venv --system-site-packages ~/my_venvs/example_ml_venv
+
+
+

Activate a virtual environment

+

To activate your newly created virtual environment locate the +script called activate and execute it:

+
source [path]/[venv_name]/bin/activate
+
+

where [path] is the path where you want to create your venv virtual +environment and [venv_name] is the name of the venv virtual environment. +For example source ~/my_venvs/example_venv/bin/activate.

+

When a venv virtual environment is active, +the prompt is changed to start with the name of your venv.

+
+How does that look like? +

This is how your changed prompt looks like:

+
[sven@rackham1 ~]$ module load python_ML_packages/3.11.8-cpu
+[sven@rackham1 ~]$ python -m venv --system-site-packages ~/my_venvs/example_venv
+[sven@rackham1 ~]$ source ~/my_venvs/example_venv/bin/activate
+(example_venv) [sven@rackham1 ~]$
+
+
+

With the venv virtual environment active, +you can now install and update Python packages +in an isolated way.

+

Deactivate a virtual environment

+

To deactivate a venv virtual environment:

+
deactivate
+
+

As the venv virtual environment you just used is now inactive, +the prompt will not show the name of your venv anymore.

+

You will need to activate a virtual environment +to work with it again.

+

Export and import a virtual environment

+

Export

+

To export the Python packages used in your virtual environment, do:

+
pip freeze > requirements.txt
+
+

This will create a file with all the Python packages and their versions, +using the conventional name for such a file.

+
+How does that file look like? +

This is how a requirements.txt file may look like:

+
anndata==0.10.5.post1
+anyio==4.2.0
+appdirs==1.4.4
+argon2-cffi==23.1.0
+argon2-cffi-bindings==21.2.0
+[more Python packages]
+websocket-client==1.7.0
+Werkzeug==3.0.1
+widgetsnbextension==4.0.9
+zipp==3.17.0
+zope.interface==6.1
+
+

Note that [more Python packages] is a placeholder for many +more Python packages.

+
+

Import

+
pip install -r requirements.txt
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_virtual_environments/index.html b/software/python_virtual_environments/index.html new file mode 100644 index 000000000..161ffa2f6 --- /dev/null +++ b/software/python_virtual_environments/index.html @@ -0,0 +1,3199 @@ + + + + + + + + + + + + + + + + + + + Virtual environments in Python - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Virtual environments in Python

+

This page described how to use virtual environments in Python.

+

Why use virtual environments?

+

Virtual environments allows one to have independent Python environments.

+

This allows one to have multiple projects

+
    +
  • You can install specific, also older, versions into them
  • +
  • You can create one for each project and no problem if the two projects + require different versions
  • +
  • If you make some mistake and install something you did not want or need, you + can remove the environment and create a new one
  • +
+

Environment managers

+

Here is an incomplete overview of virtual environment managers that work with Python:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Virtual environment managerDescription
venvWorks on Rackham
virtualenvvenv for older Python versions
condaWorks on Rackham, recommended on Bianca
pyenvMore advanced than venv
+

General virtual environment manager workflow

+
flowchart TD
+  create[Create]
+  activate[Activate]
+  use[Use]
+  deactivate[Deactivate]
+
+  create --> activate
+  activate --> use
+  use --> deactivate
+  deactivate --> activate
+

Whatever virtual environment manager you use, this is the workflow:

+
    +
  • You create the isolated environment
  • +
  • You activate the environment
  • +
  • You work in the isolated environment. + Here you install (or update) the environment with the packages you need
  • +
  • You deactivate the environment after use
  • +
+

A virtual environment can be created in multiple ways, +for example, from scratch. +However, there are more efficient ways, +such as by re-using already installed Python packages. +How to do so, can be found on the page about your specific virtual environment manager.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/python_virtualenv/index.html b/software/python_virtualenv/index.html new file mode 100644 index 000000000..7f09a9153 --- /dev/null +++ b/software/python_virtualenv/index.html @@ -0,0 +1,3128 @@ + + + + + + + + + + + + + + + + + + + Python virtualenv - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Python virtualenv

+

virtualenv is one of multiple +Python virtual environment managers.

+

Here we show the differences between venv and virtualenv

+ + + + + + + + + + + + + + + + + + + + +
Parametervenvvirtualenv
Supports which Python versions?NewerOlder
Is standard library?YesNo
+

Also, virtualenv has a few more minor unique features.

+

Because these two are so similar, +most information is documented at venv.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/pytorch/index.html b/software/pytorch/index.html new file mode 100644 index 000000000..d6bf1338c --- /dev/null +++ b/software/pytorch/index.html @@ -0,0 +1,3101 @@ + + + + + + + + + + + + + + + + + + + PyTorch - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

PyTorch

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/qiime2/index.html b/software/qiime2/index.html new file mode 100644 index 000000000..875040f3a --- /dev/null +++ b/software/qiime2/index.html @@ -0,0 +1,3222 @@ + + + + + + + + + + + + + + + + + + + qiime2 - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

qiime2

+

qiime2 is a tool.

+

qiime2 can be found among the UPPMAX modules.

+
module spider qiime2
+
+
+How does that look like? +

You output will look similar to this:

+
[sven@rackham3 ~]$ module spider qiime2
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  qiime2:
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+     Versions:
+        qiime2/2018.11.0
+        qiime2/2024.2
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  For detailed information about a specific "qiime2" package (including how to load the modules) use the module's full name.
+  Note that names that have a trailing (E) are extensions provided by other modules.
+  For example:
+
+     $ module spider qiime2/2024.2
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+
+

To find out how to load a specific version:

+
module spider qiime2/1.22.2
+
+
+How does that look like? +

Output will look similar to:

+
[sven@rackham3 ~]$ module spider qiime2/2024.2
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+qiime2: qiime2/2024.2
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+    You will need to load all module(s) on any one of the lines below before the "qiime2/2024.2" module is available to load.
+
+      bioinfo-tools
+
+    Help:
+      qiime2 - use qiime2 
+
+      Description
+
+      Version 2024.2
+
+      https://qiime2.org
+
+      The version installed is 2024.2 amplicon, slightly modified from the publicly available docker image.
+
+
+         qiime ...
+
+
+      You may see a message like 
+
+          Matplotlib created a temporary config/cache directory at /scratch/matplotlib-a10b2an0 because the default path (/home/qiime2/matplotlib) is not a writable directory...
+
+      This is because qiime2 is running within an Apptainer container. This message can be ignored.
+
+
+

After reading that documentation, we know how to load it:

+
module load bioinfo-tools 
+module load qiime2/2024.2
+
+
+How does that look like? +

Your output will look similar to this:

+
[sven@rackham3 ~]$ module load bioinfo-tools 
+[sven@rackham3 ~]$ module load qiime2/2024.2
+[sven@rackham3 ~]$ 
+
+
+

Singularity script

+

If you want to put qiime2 in a Singularity container, +here is an example script:

+
BootStrap: library
+From: centos:7
+
+%runscript
+  . /miniconda/etc/profile.d/conda.sh
+  PATH=$PATH:/miniconda/bin
+  conda activate qiime2-2019.7
+  qiime "$@"
+
+%post
+  yum clean all
+  yum -y update
+  yum -y install wget python-devel
+  cd /tmp
+  wget https://repo.anaconda.com/miniconda/Miniconda2-latest-Linux-x86_64.sh
+  bash ./Miniconda2-latest-Linux-x86_64.sh -b -p /miniconda
+  /miniconda/bin/conda update -y conda
+  wget https://data.qiime2.org/distro/core/qiime2-2019.7-py36-linux-conda.yml
+  /miniconda/bin/conda env create -n qiime2-2019.7 --file qiime2-2019.7-py36-linux-conda.yml
+  # OPTIONAL CLEANUP
+  rm qiime2-2019.7-py36-linux-conda.yml
+  /miniconda/bin/conda clean -a
+
+

See the documentation on Singularity +how to do so.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/r/index.html b/software/r/index.html new file mode 100644 index 000000000..adafe745f --- /dev/null +++ b/software/r/index.html @@ -0,0 +1,3587 @@ + + + + + + + + + + + + + + + + + + + + + + + R - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

R

+

The R logo, from https://www.r-project.org/logo/

+

R is a programming language for statistical computing and data visualization +(from Wikipedia).

+

Here we discuss:

+ +
flowchart TD
+
+    subgraph r[R]
+      r_interpreter[the R interpreter]
+      r_packages[R packages]
+      r_language[the R programming language]
+      r_dev[R software development]
+      rstudio[RStudio]
+
+      interpreted_language[Interpreted]
+      cran[CRAN]
+    end
+
+    subgraph uppmax_modules[UPPMAX modules]
+      r_module[R]
+      r_packages_module[R_packages]
+      rstudio_module[RStudio]
+    end
+
+
+    r_language --> |has| r_dev
+    r_language --> |is| interpreted_language
+    r_language --> |uses| r_packages
+    interpreted_language --> |done by| r_interpreter
+    r_packages --> |maintained by| cran
+    r_dev --> |commonly done in| rstudio
+
+    r_interpreter --> r_module
+    r_packages --> r_packages_module
+    rstudio --> rstudio_module
+
+    rstudio_module --> |automatically loads latest| r_packages_module
+    r_packages_module --> |automatically loads corresponding version of| r_module
+

the R programming language

+

R is 'a programming language for statistical computing and data visualization') +and is of the most commonly used programming languages in data mining, +analysis and visualization.

+

R is an interpreted language; users can access it through the R interpreter.

+

R is a dynamically typed +programming language with basic built-in data structures are (among others): vectors, arrays, lists, and data frames. +and its supports both procedural programming and object-oriented programming.

+

R has many user-created R packages +to augment the functions of the R language, +most commonly hosted on CRAN. +These packages offer statistical techniques, +graphical devices, import/export, reporting (RMarkdown, knitr, Sweave), etc.

+

the R interpreter

+

The R interpreter is the program that reads R code and runs it. +Commonly, 'the programming language R' and 'the R interpreter' +are use as synonyms.

+

To load the latest version of the R interpreter, +load the R module version 4.3.1 like this:

+
module load R/4.3.1
+
+
+Do I really need to load an R module? +

We strongly recommend loading an R module.

+

If you do not load an R module, you will be using the version of +R used by the UPPMAX systems.

+

Sometimes that may work.

+

If not, load an R module.

+
+
+Need a different version? +

If you need a different R version, +use the following command +to see which versions of the R interpreter +are installed on UPPMAX:

+
module spider R
+
+
+

Then start the R interpreter with:

+
R
+
+

R packages

+

R packages extend what R can do. +The most common repository for R packages is CRAN. +As these packages are so common, UPPMAX provides most of CRAN packages +in one module, called R_packages

+

To load the latest version of the pre-installed R packages, do:

+
module load R_packages/4.3.1
+
+

This will automatically load the corresponding version of the R interpreter.

+
+Do I really need to load the R_packages module? +

We strongly recommend loading the R_packages module.

+

If you do not load the R_packages module (nor the R module), +you will be using the version of R used by the UPPMAX systems.

+

Sometimes that may work.

+

If not, load the R_packages module.

+
+
+Need a different version? +

If you need a different package version, +use the following command +to see which versions of the R packages +are installed on UPPMAX:

+
module spider R_packages
+
+
+

R software development

+

RStudio in action on Bianca using the remote desktop environment

+
+

RStudio in action on Bianca using the remote desktop environment

+
+

Software development is commonly done in a so-called +Integrated Development Environment, +abbreviated 'IDE.

+

RStudio is the most commonly used IDE for R software development. +See the UPPMAX page about RStudio on how to use.

+

How to install personal packages

+
+

Installing R packages on Bianca

+ +
+

First load R_packages to make sure that the package is not already installed!

+

To install personal packages in your own home directory you type

+
install.packages("package_name")
+
+

as usual. That will install all your packages under the path ~/R/[arch]/[version of R]/. +Then you can load it by just doing library(package_name) +or require(package_name) in the R environment.

+

You can also specify a specific folder for where to put your packages, with

+
install.packages("package_name", lib="~/some/path/under/your/home/directory/")
+
+

But to then be able to find the package inside the R environment +you need to either export the R_LIBS_USER environment variable, +or specify the flag lib.loc when calling require/library, e.g.

+
library(package_name, lib.loc='~/some/path/under/your/home/directory')
+
+

Notice that if you are planning on running R on different clusters +then it is probably wisest to manually specify the installation directory, +and to have separate directories for each cluster. +This is because some of the clusters have different architectures, +and this will render some packages unusable +if you compile them on one system but try to run them on the other.

+

Technicalities

+

As of this writing, our most recent installations are

+
    +
  • R/4.3.1
  • +
  • R_packages/4.3.1
  • +
  • RStudio/2023.06.2-561
  • +
+

If you need an older version, do module avail R or R_packages or RStudio to see older versions as well.

+

Note that R_packages/4.3.1 contains 23475 packages, nearly all packages available on CRAN and BioConductor, as well as several custom packages installed from Github and other repositories. See module help R_packages/4.3.1 and R_packages for more information.

+

What R packages are in the omnibus R_packages modules?

+

R_PACKAGES/4.1.1

+

As of 2021-11-11 there are a total of 21659 R packages installed. A total of 21740 packages are available in CRAN and BioConductor. 18022 CRAN packages are installed, out of 18348 available. 3382 BioConductor-specific packages are installed, out of 3392 available. 255 other R packages are installed. These are not in CRAN/BioConductor, and instead are hosted on github or elsewhere.

+

These R packages are available as part of the R_packages/4.1.1 module as installed on rackham, bianca and snowy, which requires and loads the R/4.1.1 module. When the R_packages/4.1.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+
    +
  • To use some R packages from this module, other modules may need to be loaded. For example, to use the Rmpi package, the openmpi/3.1.5 module must be loaded after loading R_packages/4.0.4.
  • +
  • See module help R_packages/4.1.1 for more information.
  • +
+

R_PACKAGES/4.0.4

+

As of 2021-04-16 there are a total of 20663 CRAN and BioConductor packages installed, out of 20751 packages available. 17354 CRAN packages are installed, out of 17428 available. 3309 BioConductor-specific packages are installed, out of 3323 available.

+

These R packages are available as part of the R_packages/4.0.4 module as installed on rackham, bianca and snowy, which requires and loads the R/4.0.4 module. When the R_packages/4.0.4 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+
    +
  • To use some R packages from this module, other modules may need to be loaded. For example, to use the Rmpi package, the openmpi/3.1.5 module must be loaded after loading R_packages/4.0.4.
  • +
  • See module help R_packages/4.0.4 for more information.
  • +
+

R_PACKAGES/4.0.0

+

As of 2021-02-24 there are a total of 18652 CRAN and BioConductor packages installed, out of 20422 packages available. 14839 CRAN packages are installed, out of 17165 available. 3217 BioConductor-specific packages are installed, out of 3257 available.

+

These R packages are available as part of the R_packages/4.0.0 module as installed on rackham, bianca and snowy, which requires and loads the R/4.0.0 module. When the R_packages/4.0.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

See module help R_packages/4.0.0 for more information.

+

R_PACKAGES/3.6.1

+

As of 2019-09-18 there are a total of 17657 packages available in this module. This includes 14579 CRAN packages installed, out of 14913 available; and 3054 BioConductor-specific packages installed, out of 3079 available. These R packages are available as part of the R_packages/3.6.1 module as installed on rackham, bianca and snowy, which requires and loads the R/3.6.1 module. When the R_packages/3.6.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

See module help R_packages/3.6.1 for more information.

+

R_PACKAGES/3.6.0

+

As of 2019-05-14 there are a total of 17257 packages available. This includes 13769 CRAN packages installed, out of 14178 available; and 3031 BioConductor-specific packages installed, out of 3079 available. These R packages are available as part of the R_packages/3.6.0 module as installed on rackham, bianca and snowy, which requires and loads the R/3.6.0 module. When the R_packages/3.6.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

See module help R_packages/3.6.0 for more information.

+

R_PACKAGES/3.5.2

+

As of 2019-02-08 there are a total of 16642 packages available. This includes 13355 CRAN packages installed, out of 13683 available; and 2933 BioConductor-specific packages installed, out of 2959 available. These R packages are available as part of the R_packages/3.5.2 module as installed on rackham, bianca and snowy, which requires and loads the R/3.5.2 module. When the R_packages/3.5.2 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

See module help R_packages/3.5.2 for more information.

+

R_PACKAGES/3.5.0

+

With its 3.5.0 version, R_packages now attempts to install all available R packages from both CRAN and BioConductor.

+

As of 2018-06-26 there are a total of 14532 packages available. This includes 11734 CRAN packages installed, out of 12867 available; and 2798 BioConductor-specific packages installed, out of 2843 available. These R packages are available as part of the R_packages/3.5.0 module as installed on rackham, bianca and snowy, which requires and loads the R/3.5.0 module. When the R_packages/3.5.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

See module help R_packages/3.5.0 for more information.

+

R_packages/3.4.3

+

A large number of R packages are available as part of the R_packages/3.4.3 module as installed on rackham and bianca, which requires and loads the R/3.4.3 module. When the R_packages/3.4.3 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

R_packages/3.4.0

+

A large number of R packages are available as part of the R_packages/3.4.0 module, which requires and loads the R/3.4.0 module. When the R_packages/3.4.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

R_packages/3.3.2

+

A large number of R packages are available as part of the R_packages/3.3.2 module, which requires and loads the R/3.3.2 module. When the R_packages/3.3.2 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these packages will be available via library(package-name).

+

R_packages/3.3.1

+

A large number of R packages are available as part of the R_packages/3.3.1 module, which requires and loads the R/3.3.1 module. When the R_packages/3.3.1 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these should be available via library(package-name).

+

R_packages/3.3.0

+

A large number of R packages are available as part of the R_packages/3.3.0 module, which requires and loads the R/3.3.0 module. When the R_packages/3.3.0 module is loaded, it adds a directory to the R_LIBS_SITE environment variable. Within R, these should be available via library(package-name).

+

Learning R

+

Starter R courses

+

The Carpentries teaches basic lab skills for research computing, such as:

+ +

Experienced R courses

+

CodeRefinery develops and maintains training material +on software best practices for researchers that already write code. +Their material addresses all academic disciplines and tries to be as programming language-independent as possible:

+ +

Aalto Scientific Computing:

+ +

Overview of NAISS centers and their documentation about R

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/r_packages_bianca/index.html b/software/r_packages_bianca/index.html new file mode 100644 index 000000000..3dd449406 --- /dev/null +++ b/software/r_packages_bianca/index.html @@ -0,0 +1,3347 @@ + + + + + + + + + + + + + + + + + + + Installing R packages on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Installing R packages on Bianca

+
+Read through the content below +
+
+Try to do the exercise +
+

First check if package is already in R_packages/x.y.z

+
    +
  • On UPPMAX the module R_packages is an omnibus package library containing almost all packages in the CRAN and BioConductor repositories.
      +
    • As of 2023-11-21, there were a total of 23478 R packages installed in R_packages/4.3.1.
        +
      • A total of 23603 packages are available in CRAN and BioConductor
      • +
      • 19586 CRAN packages are installed, out of 20044 available
      • +
      • 3544 BioConductor-specific packages are installed, out of 3559 available
      • +
      • 346 other R packages are installed. These are not in CRAN/BioConductor, are only available in the CRAN/BioConductor archives, or are hosted on github, gitlab or elsewhere
      • +
      +
    • +
    +
  • +
+

Chances are good the R packages you need are already available once you load this module. You can quickly check by loading it:

+

$ ml R_packages/4.3.1

+

Then within R, try loading the package you want:

+

library(glmnet)

+

Alternatively, and this is both a longer solution and not our recommended one, you can grep for the package after this module is loaded using the environment variable $R_LIBS_SITE, which contains the locations of all R packages installed within the module.

+
$ ls -l $R_LIBS_SITE | grep glmnet
+drwxrwsr-x  9 douglas sw  4096 May 28 16:59 EBglmnet
+drwxrwsr-x 11 douglas sw  4096 May 25 01:22 glmnet
+drwxrwsr-x  6 douglas sw  4096 May 25 04:03 glmnetSE
+drwxrwsr-x  7 douglas sw  4096 May 25 04:04 glmnetUtils
+drwxrwsr-x  8 douglas sw  4096 May 25 04:04 glmnetcr
+drwxrwsr-x  7 douglas sw  4096 May 25 10:46 glmnetr
+
+

Install steps

+

Install on Rackham

+
    +
  • R on UPPMAX course
  • +
  • note First decide on which R version it should be based on and load that R_packages module.
  • +
  • If not stated otherwise, your installation will end up in the ~/R directory within your home directory
  • +
+

Methods

+ +

Transfer to wharf

+
    +
  • You may transfer the whole R library (in you home folder)
      +
    • this is usually the easiest way
    • +
    +
  • +
  • or select the directory(-ies) related to you new installation
      +
    • note there may be more than one directory
    • +
    +
  • +
+

Move package to local Bianca R package path

+
    +
  • Sync or move the R directory or the specific folders to your ~/R directory on bianca
  • +
+

Test your installation

+
    +
  • Start an R session on bianca and load the new package
  • +
+

Example: Update dowser

+

dowser on ReadTheDocs

+
+

Info

+
    +
  • Dowser is part of the Immcantation analysis framework for Adaptive Immune Receptor Repertoire sequencing (AIRR-seq).
  • +
  • Dowser provides a set of tools for performing phylogenetic analysis on B cell receptor repertoires.
  • +
  • It supports building and visualizing trees using multiple methods, and implements statistical tests for discrete trait analysis of B cell migration, differentiation, and isotype switching.
  • +
+
+

The version of dowser in R_packages/4.2.1 is 1.1.0. It was updated to version 1.2.0 on 2023-05-30.

+

Install dowser Rackham

+

You can update this for yourself by beginning on rackham. Do

+
module load R_packages/4.2.1
+
+

and then, within R, do

+
install.packages('dowser')
+
+

The install.packages() command that you use to install new packages is also used to update already installed packages.

+

As the update begins, you will see two questions, answer yes to both:

+
Warning in install.packages("dowser") :
+      'lib = "/sw/apps/R_packages/4.2.1/rackham"' is not writable
+    Would you like to use a personal library instead? (yes/No/cancel) yes
+
+

and

+
Would you like to create a personal library
+    '~/R/x86_64-pc-linux-gnu-library/4.2'
+    to install packages into? (yes/No/cancel) yes
+
+

If you have already installed or updated an R package with R_packages/4.2.1 loaded that resulted in creating a personal library, you may not see one or both of these questions.

+

This will then lead to a brief installation process. This creates the directory ~/R/x86_64-pc-linux-gnu-library/4.2 that it refers to in the question. This directory contains your personal installations and updates of R packages.

+

The complete installation output for this update on rackham was:

+
> packageVersion('dowser')
+[1] '1.1.0'
+> install.packages('dowser')
+Installing package into '/sw/apps/R_packages/4.2.1/rackham'
+(as 'lib' is unspecified)
+Warning in install.packages("dowser") :
+  'lib = "/sw/apps/R_packages/4.2.1/rackham"' is not writable
+Would you like to use a personal library instead? (yes/No/cancel) yes
+Would you like to create a personal library
+'/domus/h1/douglas/R/x86_64-pc-linux-gnu-library/4.2'
+to install packages into? (yes/No/cancel) yes
+--- Please select a CRAN mirror for use in this session ---
+trying URL 'https://ftp.acc.umu.se/mirror/CRAN/src/contrib/dowser_1.2.0.tar.gz'
+Content type 'application/x-gzip' length 1722229 bytes (1.6 MB)
+==================================================
+downloaded 1.6 MB
+
+* installing *source* package 'dowser' ...
+** package 'dowser' successfully unpacked and MD5 sums checked
+** using staged installation
+** R
+** data
+*** moving datasets to lazyload DB
+** inst
+** byte-compile and prepare package for lazy loading
+** help
+*** installing help indices
+** building package indices
+** installing vignettes
+** testing if installed package can be loaded from temporary location
+** testing if installed package can be loaded from final location
+** testing if installed package keeps a record of temporary installation path
+* DONE (dowser)
+
+The downloaded source packages are in
+    '/scratch/RtmpRo0Gz5/downloaded_packages'
+>
+> packageVersion('dowser')
+[1] '1.2.0'
+
+

Transfer to the Wharf

+

After installation, the next step is to copy the contents of this directory over to bianca so that it is the same directory within your bianca home directory.

+

Make sure you are in your home directory. Then connect to the bianca wharf. Replace the name and project with your bianca user name and project.

+
sftp douglas-sens2017625@bianca-sftp
+
+

You log in here like you log into bianca: the first password is your password followed by the 6-digit authenticator code, the second password (if required for you) is only your password.

+

Once sftp has connected, the contents of the current directory can be listed with

+
dir
+
+

It should look like this:

+
sftp> dir
+douglas-sens2017625
+
+

Now cd to this directory, which is your wharf directory within your project.

+
sftp> cd douglas-sens2017625/
+sftp> dir
+sftp>
+
+

If you have not uploaded anything to your wharf, this will be empty. It might have a few things in it.

+

Now, upload your entire personal R directory from rackham here.

+
sftp> put -r R
+
+

This will take a while to upload all the files. When it has completed, quit.

+
sftp> quit
+
+
    +
  • Now, log into bianca using the shell, or using the web interface and start a terminal.
  • +
  • Once you have a bianca shell, change to your wharf directory within your project. Replace my user and project with yours.
  • +
+
cd /proj/sens2017625/nobackup/wharf/douglas/douglas-sens2017625
+
+

Within this directory should be your R directory.

+
[douglas@sens2017625-bianca douglas-sens2017625]$ ls -l
+total 1892
+drwxrwxr-x  3 douglas douglas    4096 Mar  2 14:27 R
+
+

Sync from Wharf to Home directory

+
    +
  • Now sync this to your home directory:
  • +
+
[douglas@sens2017625-bianca douglas-sens2017625]$ rsync -Pa R ~/
+
+

Start an R session and load the new package

+

Because R_packages/4.2.1 was loaded when you installed/updated the packages in your personal R library, you need to have it loaded when you use these packages as well.

+

Simply change to the directory you want to work in, load the R_packages/4.2.1 module, and get to work.

+
[douglas@sens2017625-bianca douglas-sens2017625]$ cd /proj/sens2017625/nobackup/douglas/
+    [douglas@sens2017625-bianca douglas]$ module load R_packages/4.2.1
+
+

Then start R, and load the new package.

+
[douglas@sens2017625-bianca douglas]$ R
+
+
    > packageVersion('dowser')
+    [1] '1.2.0'
+    > library(dowser)
+    >
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/r_packages_bianca2/index.html b/software/r_packages_bianca2/index.html new file mode 100644 index 000000000..ec5e6141a --- /dev/null +++ b/software/r_packages_bianca2/index.html @@ -0,0 +1,3355 @@ + + + + + + + + + + + + + + + + + + + Installing R packages on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Installing R packages on Bianca

+

R on UPPMAX course

+

What is a package, really?

+
    +
  • +

    An R package is essentially a contained folder and file structure containing R +code (and possibly C/C++ or other code) and other files relevant for the +package e.g. documentation(vignettes), licensing and configuration files.

    +
  • +
  • +

    Let us look at a very simple example

    +
  • +
+
   $ git clone git@github.com:MatPiq/R_example.git
+
+   $ cd R_example
+
+   $ tree
+   .
+   ├── DESCRIPTION
+   ├── NAMESPACE
+   ├── R
+      └── hello.R
+   ├── man
+      └── hello.Rd
+   └── r_example.Rproj
+
+

Installing your own packages

+

Sometimes you will need R packages that are not already installed. The solution +to this is to install your own packages.

+
    +
  • +

    These packages will usually come from CRAN - the Comprehensive R Archive Network, or

    +
  • +
  • +

    sometimes from other places, like GitHub or R-Forge

    +
  • +
+

Here we will look at installing R packages with automatic download and with +manual download. It is also possible to install from inside RStudio.

+

Methods

+
    +
  • setup (first time)
  • +
  • automatic download and install from CRAN
  • +
  • automatic download and install from GitHub
  • +
  • manual download and install
  • +
+

setup (first time)

+

https://uppmax.github.io/bianca_workshops/extra/rpackages/#setup

+
    +
  • +

    We need to create a place for the own-installed packages to be and to tell R where to find them. The initial setup only needs to be done once, but separate package directories need to be created for each R version used.

    +
  • +
  • +

    R reads the $HOME/.Renviron file to setup its environment. It should be created by R on first run, or you can create it with the command: touch $HOME/.Renviron

    +
  • +
+

NOTE: In this example we are going to assume you have chosen to place the R packages in a directory under your home directory. As mentioned, you will need separate ones for each R version.

+

If you have not yet installed any packages to R yourself, the environment file +should be empty and you can update it like this:

+
    echo R_LIBS_USER=\"$HOME/R-packages-%V\" > ~/.Renviron  
+
+

If it is not empty, you can edit $HOME/.Renviron with your favorite +editor so that R_LIBS_USER contain the path to your chosen directory for +own-installed R packages. It should look something like this when you are done:

+
    R_LIBS_USER="/home/u/user/R-packages-%V"  
+
+

| NOTE: Replace /home/u/user with the value of $HOME. Run echo $HOME to see its value. +| NOTE: The %V should be written as-is, it's substituted at runtime with the active R version.

+

For each version of R you are using, create a directory matching the pattern +used in .Renviron to store your packages in. This example is shown for R +version 4.0.4:

+
    mkdir -p $HOME/R-packages-4.0.4  
+
+

Automatic download and install from CRAN

+

https://uppmax.github.io/bianca_workshops/extra/rpackages/#automatic-download-and-install-from-cran

+
+

Note

+

You find a list of packages in CRAN and a list of repos here: https://cran.r-project.org/mirrors.html

+
    +
  • Please choose a location close to you when picking a repo.
  • +
+
+
+
+
+
R --quiet --no-save --no-restore -e "install.packages('<r-package>', repos='<repo>')"  
+
+
+
+
install.packages('<r-package>', repos='<repo>')  
+
+
+
+
+

In either case, the dependencies of the package will be downloaded and installed as well.

+

Automatic download and install from GitHub

+

https://uppmax.github.io/bianca_workshops/extra/rpackages/#automatic-download-and-install-from-github

+

If you want to install a package that is not on CRAN, but which do have a GitHub page, then there is an automatic way of installing, but you need to +handle prerequisites yourself by installing those first.

+
    +
  • It can also be that the package is not in as finished a state as those on CRAN, so be careful.
  • +
+
+

Note

+

To install packages from GitHub directly, from inside R, you first need to install the devtools package. Note that you only need to install this once.

+
+

This is how you install a package from GitHub, inside R:

+
    install.packages("devtools")   # ONLY ONCE
+    devtools::install_github("DeveloperName/package")
+
+

Manual download and install

+

https://uppmax.github.io/bianca_workshops/extra/rpackages/#manual-download-and-install

+

If the package is not on CRAN or you want the development version, or you for other reason want to install a package you downloaded, then this is how to install from the command line:

+
    R CMD INSTALL -l <path-to-R-package>/R-package.tar.gz
+
+

NOTE that if you install a package this way, you need to handle any dependencies yourself.

+
+

Note

+

Places to look for R packages

+ +
+

Example — Install Tidycmprsk

+

tidycmprsk on GitHub

+
+

Info

+

The tidycmprsk package provides an intuitive interface for working with the competing risk endpoints. The package wraps the cmprsk package, and exports functions for univariate cumulative incidence estimates with cuminc() and competing risk regression with crr().

+
+

Install on Rackham

+

You can install this for yourself by beginning on rackham. Do

+
module load R_packages/4.1.1
+
+

and then, within R, do

+
install.packages('tidycmprsk')
+
+

You will see two questions to answer yes to:

+
Warning in install.packages("tidycmprsk") :
+      'lib = "/sw/apps/R_packages/4.1.1/rackham"' is not writable
+    Would you like to use a personal library instead? (yes/No/cancel) yes
+
+

and

+
Would you like to create a personal library
+    '~/R/x86_64-pc-linux-gnu-library/4.1'
+    to install packages into? (yes/No/cancel) yes
+
+

This will then to an extended installation process that also does some updates. This creates a directory ~/R that contains the installations and updates of R packages.

+

Transfer to the Wharf

+

After installation, the next step is to copy the contents of this directory over to bianca so that it is the same directory within your bianca home directory.

+

Make sure you are in your home directory. Then connect to the bianca wharf. Replace the name and project with your bianca user name and project.

+
sftp douglas-sens2017625@bianca-sftp
+
+

You log in here like you log into bianca: the first password is your password followed by the 6-digit authenticator code, the second password (if required for you) is only your password.

+

Once sftp has connected, the contents of the current directory can be listed with

+
dir
+
+

It should look like this:

+
sftp> dir
+douglas-sens2017625
+
+

Now cd to this directory, which is your wharf directory within your project.

+
sftp> cd douglas-sens2017625/
+sftp> dir
+sftp>
+
+

If you have not uploaded anything to your wharf, this will be empty. It might have a few things in it.

+

Now, upload your (whole) R directory here.

+
sftp> put -r R
+
+

This will take a while to upload all the files. When it has completed, quit.

+
sftp> quit
+
+
    +
  • Now, log into bianca using the shell, or using the web interface and start a terminal.
  • +
  • Once you have a bianca shell, change to your wharf directory within your project. Replace my user and project with yours.
  • +
+
cd /proj/sens2017625/nobackup/wharf/douglas/douglas-sens2017625
+
+

Within this directory should be your R directory.

+
[douglas@sens2017625-bianca douglas-sens2017625]$ ls -l
+total 1892
+drwxrwxr-x  3 douglas douglas    4096 Mar  2 14:27 R
+
+

Sync from Wharf to Home directory

+
    +
  • Now sync this to your home directory:
  • +
+
[douglas@sens2017625-bianca douglas-sens2017625]$ rsync -Pa R ~/
+
+

Start an R session and load the new package

+

To use R_packages/4.1.1 with these new installations/updates, change to the directory you want to work in, load the R_packages/4.1.1 module. Substitute your directory for my example directory.

+
[douglas@sens2017625-bianca douglas-sens2017625]$ cd /proj/sens2017625/nobackup/douglas/
+    [douglas@sens2017625-bianca douglas]$ module load R_packages/4.1.1
+
+

Then start R, and load the new package.

+
[douglas@sens2017625-bianca douglas]$ R
+
+
    R version 4.1.1 (2021-08-10) -- "Kick Things"
+    Copyright (C) 2021 The R Foundation for Statistical Computing
+    ....
+    Type 'demo()' for some demos, 'help()' for on-line help, or
+    'help.start()' for an HTML browser interface to help.
+    Type 'q()' to quit R.
+
+    > library(tidycmprsk)
+    >
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_filezilla/index.html b/software/rackham_file_transfer_using_filezilla/index.html new file mode 100644 index 000000000..507042065 --- /dev/null +++ b/software/rackham_file_transfer_using_filezilla/index.html @@ -0,0 +1,3194 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Rackham using FileZilla - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Rackham using FileZilla

+

There are multiple ways to transfer data to/from Rackham.

+

Here, we show how to transfer files using a graphical tool called FileZilla.

+

FileZilla connected to Rackham

+
+

FileZilla connected to Rackham

+
+

Procedure

+

FileZilla logo, from https://en.wikipedia.org/wiki/FileZilla#/media/File:FileZilla_logo.svg

+
+

The FileZilla logo

+
+
+Would you like a video? +

If you like to see how to do file transfer from/to Rackham +using FileZilla, watch the video +here

+
+

FileZilla is a secure file transfer tool that works under Linux, Mac and Windows.

+

To transfer files to/from Rackham using FileZilla, do +the following steps:

+

1. Start FileZilla

+

Start FileZilla.

+

2. Start FileZilla's site manager

+

From the menu, select 'File | Site manager'

+
+Where is that? +

It is here:

+

The FileZilla 'File' menu contains the item 'Site manager'

+
+

The FileZilla 'File' menu contains the item 'Site manager'

+
+
+

3. Add a new site in FileZilla's site manager

+

In FileZilla's site manager, click 'New site'

+
+Where is that? +

It is here:

+

The FileZilla Site Manager

+
+

4. Setup the site

+

In FileZilla's site manager:

+
    +
  • create a name for the site, e.g. rackham.
  • +
  • for that site, use all standards, except:
      +
    • Set protocol to 'SFTP - SSH File Transfer Protocol'
    • +
    • Set host to rackham.uppmax.uu.se
    • +
    • Set user to [username], e.g. sven
    • +
    +
  • +
+
+How does that look like? +

It looks similar to this:

+

FileZilla configured for Rackham

+
+

5. Connect to the site

+

Click 'Connect'.

+

6. Fill in your password

+

You will be asked for your password, hence +type [your password], e.g. VerySecret. +You can save the password.

+
+How does that look like? +

It looks similar to this:

+

FilleZilla asks for a password

+
+

7. Ready to transfer files

+

Now you can transfer files between your local computer and Rackham.

+
+How does that look like? +

It looks like this:

+

FileZilla is connected to Rackham

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_scp/index.html b/software/rackham_file_transfer_using_scp/index.html new file mode 100644 index 000000000..8ca57e93d --- /dev/null +++ b/software/rackham_file_transfer_using_scp/index.html @@ -0,0 +1,3159 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using SCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using SCP

+

There are multiple ways to transfer files to or from Rackham.

+

Here it is described how to do file transfer to/from Rackham using SCP. +SCP is an abbreviation of 'Secure copy protocol', +however, it is not considered 'secure' anymore: +instead it is considered an outdated protocol. +The program scp allows you to transfer files to/from Rackham using SCP, +by coping them between your local computer and Rackham.

+

Procedure

+
+Prefer a video? +

See this procedure as a video at YouTube

+
+

1. Start a terminal on your local computer

+

Start a terminal on your local computer

+

2. Copy files using scp

+

In the terminal, copy files using scp to connect to Rackham:

+
scp [from] [to]
+
+

Where [from] is the file(s) you want to copy, and [to] is the destination. +This is quite a shorthand notation!

+

This is how you copy a file from your local computer to Rackham:

+
scp [local_filename] [username]@rackham.uppmax.uu.se:/home/[username]
+
+

where [local_filename] is the path to a local filename, +and [username] is your UPPMAX username, for example:

+
scp my_file.txt sven@rackham.uppmax.uu.se:/home/sven
+
+

To copy a file from Rackham to your local computer, do the command above in reverse order:

+
scp [username]@rackham.uppmax.uu.se:/home/[username]/[remote_filename] [local_folder]
+
+

where [remote_filename] is the path to a remote filename, +[username] is your UPPMAX username, +and [local_folder] is your local folder, for example:

+
scp sven@rackham.uppmax.uu.se:/home/sven/my_remote_file.txt /home/sven
+
+

3. If asked, give your UPPMAX password

+

If asked, give your UPPMAX password. +You can get rid of this prompt if you have setup SSH keys

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_sftp/index.html b/software/rackham_file_transfer_using_sftp/index.html new file mode 100644 index 000000000..83f14cc03 --- /dev/null +++ b/software/rackham_file_transfer_using_sftp/index.html @@ -0,0 +1,3193 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using SFTP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using SFTP

+

Data transfer to/from Rackham using SFTP +is one of the ways ways to transfer files to/from Rackham

+
+What are the other ways? +

Other ways to transfer data to/from Rackham are described here

+
+

One can transfer files to/from Rackham using SFTP. +SFTP is an abbreviation of 'SSH File Transfer Protocol', +where 'SSH' is an abbreviation of 'Secure Shell protocol' +The program sftp allows you to transfer files to/from Rackham using SFTP.

+

The process is described here:

+

Step 1. Start a terminal on your local computer

+

Start a terminal on your local computer.

+

Step 2. Run sftp to connect to Rackham

+

In the terminal, run sftp to connect to Rackham by doing:

+
sftp [username]@rackham.uppmax.uu.se
+
+

where [username] is your UPPMAX username, for example:

+
sftp sven@rackham.uppmax.uu.se
+
+

Step 3. If asked, give your UPPMAX password

+

If asked, give your UPPMAX password. +You can get rid of this prompt if you have setup SSH keys

+

Step 4. Upload/download files to/from Rackham

+

In sftp, upload/download files to/from Rackham.

+

Basic sftp command can be found here.

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fcf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+
+    user(User)
+      user_local_files(Files on user computer):::file_node
+
+    subgraph sub_inside[SUNET]
+      subgraph sub_rackham_shared_env[Rackham]
+          login_node(login/calculation/interactive node):::calculation_node
+          files_in_rackham_home(Files in Rackham home folder):::file_node
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#fcc,color:#000,stroke:#fcc
+    style sub_rackham_shared_env fill:#ffc,color:#000,stroke:#ffc
+
+    user --> |logs in |login_node
+    user --> |uses| user_local_files
+
+    login_node --> |can use|files_in_rackham_home
+    %% user_local_files <--> |graphical tool|files_in_rackham_home
+    %% user_local_files <--> |SCP|files_in_rackham_home
+    user_local_files <==> |SFTP|files_in_rackham_home
+
+    %% Aligns nodes prettier
+    user_local_files ~~~ login_node
+
+

Overview of file transfer on Rackham +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'. +The user can be either inside or outside SUNET.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_scp/index.html b/software/rackham_file_transfer_using_transit_scp/index.html new file mode 100644 index 000000000..b575be083 --- /dev/null +++ b/software/rackham_file_transfer_using_transit_scp/index.html @@ -0,0 +1,3111 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit using SCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit using SCP

+

One can use SCP to copy files between Rackham and Transit, +from either Rackham or Transit.

+

Both ways are shown step-by-step below.

+ + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_scp_from_rackham/index.html b/software/rackham_file_transfer_using_transit_scp_from_rackham/index.html new file mode 100644 index 000000000..34ec62ee6 --- /dev/null +++ b/software/rackham_file_transfer_using_transit_scp_from_rackham/index.html @@ -0,0 +1,3195 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit using SCP from Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit using SCP from Rackham

+

One can transfer files to/from Rackham using the UPPMAX Transit server, using SCP. +The program scp allows you to copy file between Rackham and Transit.

+

The process is:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Use the terminal to login to Rackham

+

Use a terminal to login to Rackham.

+
+Forgot how to login to Rackham? +

A step-by-step guide how to login to Rackham +can be found here.

+

Spoiler: ssh [username]@rackham.uppmax.uu.se

+
+

⛔ 3a. Run scp to copy files from Rackham to Transit

+

This is how you would copy a file from Rackham to Transit: +in the terminal, run scp to copy files from Rackham to Transit by doing:

+
scp [file_on_rackham] [username]@transit.uppmax.uu.se
+
+

where [file_on_rackham] is the name of a file on Rackham +and [username] is your UPPMAX username, for example:

+
scp my_rackham_file.txt [username]@transit.uppmax.uu.se
+
+

However, Transit is a service, not a file server. +The scp command will complete successfully, +yet the file will not be found on Transit.

+

3b. Run scp to copy files from Transit to Rackham

+

In the terminal, run scp to copy files from Transit to Rackham by doing:

+
scp [file_on_rackham] [username]@transit.uppmax.uu.se
+
+

where [file_on_rackham] is the name of a file on Rackham +and [username] is your UPPMAX username, for example:

+
scp my_rackham_file.txt [username]@transit.uppmax.uu.se
+
+

4. If asked, give your UPPMAX password

+

You can get rid of this prompt if you have setup SSH keys

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_scp_from_transit/index.html b/software/rackham_file_transfer_using_transit_scp_from_transit/index.html new file mode 100644 index 000000000..87be72335 --- /dev/null +++ b/software/rackham_file_transfer_using_transit_scp_from_transit/index.html @@ -0,0 +1,3201 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit using SCP from Transit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit using SCP from Transit

+

One can use SCP to copy files between Rackham and Transit, +from either Rackham or Transit.

+

One can transfer files to/from Rackham using the UPPMAX Transit server, +using SCP. +The program scp allows you to copy file between Rackham and Transit.

+

The process is:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Use the terminal to login to Transit

+

Use a terminal to login to Transit.

+
+Forgot how to login to Transit? +

A step-by-step guide how to login to Transit +can be found here.

+

Spoiler: ssh [username]@transit.uppmax.uu.se

+
+

3a. Run scp to copy files from Transit to Rackham

+

In the terminal, run scp to copy files from Transit to Rackham by doing:

+
scp [username]@rackham.uppmax.uu.se:/home/[username]/[file_on_rackham] [path_on_transit]
+
+

where [file_on_rackham] is the name of a file on Rackham, +[username] is your UPPMAX username, +and [path_on_transit] is the target path on Transit, +for example:

+
scp sven@rackham.uppmax.uu.se:/home/sven/my_rackham_file.txt .
+
+

Where . means 'the directory where I am now on Transit'.

+

3b. ⛔ Run scp to copy files from Rackham to Transit

+

This is how you would copy a file from Rackham to Transit: +in the terminal, run scp to copy files from Rackham to Transit by doing:

+
scp [file_on_rackham] [username]@transit.uppmax.uu.se
+
+

where [file_on_transit] is the name of a file on Transit +and [username] is your UPPMAX username, for example:

+
scp my_local_rackham_file.txt [username]@transit.uppmax.uu.se
+
+

However, Transit is a service, not a file server. +The scp command will complete successfully, +yet the file will not be found on Transit.

+

4. If asked, give your UPPMAX password

+

You can get rid of this prompt if you have setup SSH keys

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_sftp/index.html b/software/rackham_file_transfer_using_transit_sftp/index.html new file mode 100644 index 000000000..2239683bd --- /dev/null +++ b/software/rackham_file_transfer_using_transit_sftp/index.html @@ -0,0 +1,3169 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit and SFTP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit and SFTP

+

Data transfer to/from Rackham using Transit +is one of the ways ways to transfer files to/from Rackham

+

One can use SFTP to copy files between Rackham and Transit, +from either Rackham or Transit.

+

Both ways are shown step-by-step below.

+ +

Basic sftp command can be found here.

+

Overview

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fcf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+    classDef transit_node fill:#fff,color:#000,stroke:#fff
+
+    subgraph sub_inside[SUNET]
+      direction LR
+      user(User)
+      subgraph sub_transit_env[Transit]
+        transit_login(Transit login):::calculation_node
+        files_on_transit(Files posted to Transit):::transit_node
+      end
+      subgraph sub_rackham_shared_env[Rackham]
+          files_in_rackham_home(Files in Rackham home folder):::file_node
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#ccc,color:#000,stroke:#000
+    style sub_transit_env fill:#cfc,color:#000,stroke:#000
+    style sub_rackham_shared_env fill:#fcc,color:#000,stroke:#000
+
+    user --> |logs in |transit_login
+
+    transit_login --> |can use|files_on_transit
+    %% user_local_files <--> |graphical tool|files_in_rackham_home
+    %% user_local_files <--> |SCP|files_in_rackham_home
+    files_on_transit <==> |transfer|files_in_rackham_home
+
+

Overview of file transfer on Rackham +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'. +The user can be either inside or outside SUNET.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_sftp_from_rackham/index.html b/software/rackham_file_transfer_using_transit_sftp_from_rackham/index.html new file mode 100644 index 000000000..f18a20627 --- /dev/null +++ b/software/rackham_file_transfer_using_transit_sftp_from_rackham/index.html @@ -0,0 +1,3189 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit and SFTP from Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit and SFTP from Rackham

+

Data transfer to/from Rackham using Transit +is one of the ways ways to transfer files to/from Rackham

+

One can transfer files to/from Rackham using the UPPMAX Transit server. +Transit is an abbreviation of 'SSH File Transfer Protocol', +where 'SSH' is an abbreviation of 'Secure Shell protocol' +The program sftp allows you to transfer files to/from Rackham using Transit.

+

The process is:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Use the terminal to login to Rackham

+

Use a terminal to login to Rackham.

+
+Forgot how to login to Rackham? +

A step-by-step guide how to login to Transit +can be found here.

+

Spoiler: ssh [username]@rackham.uppmax.uu.se

+
+

3. Run sftp to connect to Transit

+

In the terminal, run sftp to connect to Transit by doing:

+
sftp [username]@transit.uppmax.uu.se
+
+

where [username] is your UPPMAX username, for example:

+
sftp sven@transit.uppmax.uu.se
+
+

4. If asked, give your UPPMAX password

+

You can get rid of this prompt if you have setup SSH keys

+

5. In sftp, upload/download files to/from Transit

+

Transit is a service, not a file server. +This means that if you upload files to Transit using SFTP, +they will remain there as long a the connection is active. +These files need to be forwarded to more permanent storage.

+

Basic sftp command can be found here.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_transit_sftp_from_transit/index.html b/software/rackham_file_transfer_using_transit_sftp_from_transit/index.html new file mode 100644 index 000000000..f7c38a7fd --- /dev/null +++ b/software/rackham_file_transfer_using_transit_sftp_from_transit/index.html @@ -0,0 +1,3189 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Rackham using Transit and SFTP from Transit - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Rackham using Transit and SFTP from Transit

+

Data transfer to/from Rackham using Transit +is one of the ways ways to transfer files to/from Rackham

+

One can transfer files to/from Rackham using the UPPMAX Transit server. +Transit is an abbreviation of 'SSH File Transfer Protocol', +where 'SSH' is an abbreviation of 'Secure Shell protocol' +The program sftp allows you to transfer files to/from Rackham using Transit.

+

The process is:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Use the terminal to login to Transit

+

Use a terminal to login to Transit

+
+Forgot how to login to Transit? +

A step-by-step guide how to login to Transit +can be found here.

+

Spoiler: ssh [username]@transit.uppmax.uu.se

+
+

3. Run sftp to connect to Rackham

+

In the terminal, run sftp to connect to Rackham by doing:

+
sftp [username]@rackham.uppmax.uu.se
+
+

where [username] is your UPPMAX username, for example:

+
sftp sven@rackham.uppmax.uu.se
+
+

4. If asked, give your UPPMAX password

+

You can get rid of this prompt if you have setup SSH keys

+

5. In sftp, upload/download files to/from Rackham

+

Transit is a service, not a file server. +This means that if you upload files to Transit using SFTP, +they will remain there as long a the connection is active. +These files need to be forwarded to more permanent storage.

+

Basic sftp command can be found here.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rackham_file_transfer_using_winscp/index.html b/software/rackham_file_transfer_using_winscp/index.html new file mode 100644 index 000000000..72c667885 --- /dev/null +++ b/software/rackham_file_transfer_using_winscp/index.html @@ -0,0 +1,3117 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Rackham using WinSCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Rackham using WinSCP

+

There are multiple ways to transfer data to/from Rackham.

+

Here, we show how to transfer files using a graphical tool called WinSCP.

+

To transfer files to/from Rackham using WinSCP, do:

+
    +
  • Start WinSCP
  • +
  • Create a new site
  • +
  • For that site, use all standards, except:
      +
    • Set file protocol to 'SFTP'
    • +
    • Set host name to rackham.uppmax.uu.se
    • +
    • Set user name to [username], e.g. sven
    • +
    +
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rclone/index.html b/software/rclone/index.html new file mode 100644 index 000000000..d444c3cbd --- /dev/null +++ b/software/rclone/index.html @@ -0,0 +1,3375 @@ + + + + + + + + + + + + + + + + + + + + + + + Rclone - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Rclone

+

Rclone is a command-line program to manage files on cloud storage.

+

There is an Rclone module called rclone.

+

Finding an Rclone version

+
module spider rclone
+
+
+What is the output? +

Here is some example output:

+
---------------------------------------------------------------------------------------
+  rclone: rclone/1.56.2
+---------------------------------------------------------------------------------------
+
+    This module can be loaded directly: module load rclone/1.56.2
+
+    Help:
+      rclone - use rclone
+
+      Description
+
+      a command line program to manage files on cloud storage, supporting over 40 cloud sto
+rage products
+
+      Version 1.56.2
+
+      https://rclone.org
+
+      Run 'rclone config' to set up rclone for your own use.
+
+
+

Loading an Rclone module

+

Here the Rclone module for version 1.56.2 is loaded:

+
module load rclone/1.56.2
+
+
+What is the output? +

Here is some example output:

+
rclone/1.56.2 : run 'rclone config' to set up rclone for your own use.  'man rclone' is available for further documentation, and see https://rclone.org/ for more
+
+
+

Finding the Rclone config file

+

After having loaded an Rclone mode, +one can find the path to the Rclone config file by:

+
rclone config file
+
+
+What is the output? +

Here is some example output:

+
Configuration file doesn't exist, but rclone will use this path:
+/home/sven/.config/rclone/rclone.conf
+
+
+

Using the Rclone web interface

+

With SSH X forwarding enabled, one can +use rclone from a web interface:

+
rclone rcd --rc-web-gui
+
+

?Do not run this on the login node?

+
+What is the output? +

Here is some example output:

+
2024/04/02 08:31:59 ERROR : Error reading tag file at /home/sven/.cache/rclone/webgui/tag
+2024/04/02 08:31:59 NOTICE: A new release for gui (v2.0.5) is present at https://github.com/rclone/rclone-webui-react/releases/download/v2.0.5/currentbuild.zip
+2024/04/02 08:31:59 NOTICE: Downloading webgui binary. Please wait. [Size: 4763452, Path :  /home/sven/.cache/rclone/webgui/v2.0.5.zip]
+2024/04/02 08:32:00 NOTICE: Unzipping webgui binary
+2024/04/02 08:32:01 NOTICE: Serving Web GUI
+2024/04/02 08:32:01 NOTICE: Serving remote control on http://localhost:5572/
+
+
+

Connect to Swestore

+

Rclone is one of the recommended ways to connect to Swestore.

+ +
+URL invalid? +

When setting the URL to the correct https://webdav.swestore.se, +Rclone will flag this as an error:

+

Rclone flags an error

+
+

Rclone flags an error, that may be a false error

+
+

However, this may be a false error. To determine this: +click on 'Explorer' and explore Swestore.

+

An example Swestore folder structure

+
+

An example Swestore folder structure

+
+

If you see the Swestore +folder structure above, Rclone works fine.

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rstudio/index.html b/software/rstudio/index.html new file mode 100644 index 000000000..dbf9956e4 --- /dev/null +++ b/software/rstudio/index.html @@ -0,0 +1,3340 @@ + + + + + + + + + + + + + + + + + + + + + + + RStudio - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

RStudio

+

RStudio is an IDE specialized for the R programming language.

+
+What is an IDE? +

See the page on IDEs.

+
+

Using RStudio differs per UPPMAX cluster:

+ +

RStudio versions

+
+Which versions of RStudio are available? +

Use module spider Rstudio to see all versions:

+
[sven@r210 sven]$ module spider Rstudio
+
+----------------------------------------------------------------------------
+  RStudio:
+----------------------------------------------------------------------------
+     Versions:
+        RStudio/1.0.136
+        RStudio/1.0.143
+        RStudio/1.0.153
+        RStudio/1.1.423
+        RStudio/1.1.463
+        RStudio/1.4.1106
+        RStudio/2022.02.0-443
+        RStudio/2022.02.3-492
+        RStudio/2022.07.1-554
+        RStudio/2023.06.0-421
+        RStudio/2023.06.2-561
+        RStudio/2023.12.1-402 (may not always work)
+
+
+

Some links between version and official documentation:

+ + + + + + + + + + + + + +
RStudio moduleRStudio Builds documentation
RStudio/2023.06.2-561here
+

Troubleshooting

+

RStudio runs partially

+

RStudio runs partially:

+
    +
  • File content is displayed just fine
  • +
  • The R interpreter does not respond
  • +
  • The files pane at the bottom-right is loading forever
  • +
+

RStudio runs partially

+

In one case (see ticket for details), +the problem was caused by a process called -bash (yes, the first character +is a dash/minus). Killing it with kill -s 1 [PID] (for example, +kill -s 1 11723) and then restarting RStudio solved the +problem.

+

R encountered a fatal error

+

Full error message:

+
R encountered a fatal error. The session was terminated.
+
+

R encountered a fatal error. The session was terminated

+

This is because the home folder is full.

+

Check this by using uquota.md.

+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham3 ~]$ uquota
+Your project     Your File Area       Unit        Usage  Quota Limit  Over Quota
+---------------  -------------------  -------  --------  -----------  ----------
+home             /home/sven           GiB          24.7           32
+home             /home/sven           files       79180       300000
+naiss2024-22-49  /proj/worldpeace     GiB           5.1          128
+naiss2024-22-49  /proj/worldpeace     files       20276       100000
+
+
+

Candidates for files that are too big, that are hidden files:

+
    +
  • .RData
  • +
  • .Renviron
  • +
  • .Rhistory
  • +
+

One can use ls -all to see all files, including hidden files:

+
ls --all
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham2 ~]$ ls --all
+.                      .gtkrc               .nextflow.log.8
+..                     .ICEauthority        .nextflow.log.9
+.allinea               .ipython             .nv
+.bash_history          .java                .oracle_jre_usage
+.bash_logout           .jupyter             .pki
+.bash_profile          .kde                 private
+.bashrc                .keras               .profile
+.bashrc.save           .lesshst             .python_history
+.beast                 lib                  .r
+bin                    .lmod.d              R
+.cache                 .local               .RData
+.conda                 .login               .Rhistory
+.config                .MathWorks           .rstudio-desktop
+.cshrc                 .matlab              .ssh
+.dbus                  .mozilla             .subversion
+DNABERT_2              my_little_turtle.py  ticket_297538
+.emacs                 .nextflow            users
+.esd_auth              .nextflow.log        .viminfo
+.gitconfig             .nextflow.log.1      .vscode-oss
+.git-credential-cache  .nextflow.log.2      .vscode-server
+glob                   .nextflow.log.3      .wget-hsts
+.gnupg                 .nextflow.log.4      .Xauthority
+.gracetimefile         .nextflow.log.5      .xfce4-session.verbose-log
+.gradle                .nextflow.log.6      .xfce4-session.verbose-log.last
+.gstreamer-0.10        .nextflow.log.7      .zshrc
+
+
+

You can delete these hidden files, by:

+
rm .RData
+rm .Renviron
+rm .Rhistory
+
+
+For staff +

Full report can be found at RT ticket 298623

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rstudio_on_bianca/index.html b/software/rstudio_on_bianca/index.html new file mode 100644 index 000000000..ff879b3c0 --- /dev/null +++ b/software/rstudio_on_bianca/index.html @@ -0,0 +1,3271 @@ + + + + + + + + + + + + + + + + + + + RStudio on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

RStudio on Bianca

+

RStudio on Bianca

+

Introduction

+

RStudio is an IDE specialized for the R programming language.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to use RStudio on Bianca, +using Bianca's remote desktop environment.

+
+Forgot how to login to a remote desktop environment? +

See the 'Logging in to Bianca' page.

+

Spoiler: go to https://bianca.uppmax.uu.se/

+
+

As RStudio is a resource-heavy program, +it must be run on an interactive node.

+
+Forgot how to start an interactive node? +

See the 'Starting an interactive node' page.

+
+

Procedure to start RStudio

+

Below is a step-by-step procedure to start RStudio on Bianca.

+
+Prefer a video? +

This procedure is also demonstrated in this YouTube video.

+
+

1. Get within SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start the Bianca remote desktop environment

+
+Forgot how to start Bianca's remote desktop environment? +

See the 'Logging in to Bianca' page.

+
+

3. Start an interactive session

+

Within the Bianca remote desktop environment, start a terminal. +Within that terminal, +start an interactive node +with 2 cores:

+
+

Why two cores?

+

RStudio is a resource-heavy program. +Due to this, we recommend using at least two cores +for a more pleasant user experience.

+
+
interactive -A [project_number] -n 2 -t 8:00:00
+
+

Where [project_number] is your +UPPMAX project, for example:

+
interactive -A sens2016001 -n 2 -t 8:00:00
+
+
+What is my UPPMAX project number? +

Easy answers that is probably true:

+

The one you used to login, which is part of your prompt. +For example, in the prompt below, the project is sens2016001.

+
[sven@sens2016001-bianca sven]$
+
+
+
+

Do not start RStudio from the menus

+

You can start a version of RStudio from the menus. +However, this will not have access to loaded +modules.

+

Instead, load RStudio from the module system instead.

+
+

4. Load the modules needed

+

In the terminal of the interactive session, do:

+
module load R_packages/4.3.1 RStudio/2023.12.1-402
+
+
+Do all combinations of R_packages and RStudio work? +

No.

+

Not all combination of R_packages and RStudio work equally well, +but this one is known to work (as +it was used in this solved ticket).

+

There have been issues using RStudio/2023.06.2-561 together with R/4.3.1

+
+
+Shouldn't I load R first? +

No.

+

Loading R_packages will load the corresponding R module.

+
+
+What happens if I do not load R_packages? +

Then you will have RStudio running without any R packages installed

+
+

5. Start RStudio

+

With the modules loaded, start RStudio from the terminal (on the +interactive node):

+
rstudio
+
+

RStudio can be slow to startup, as R has thousands (!) of packages. +Additionally, at startup and if enabled, your saved RStudio workspace +(with potentially a lot of data!) is read.

+
+How does RStudio look on Bianca? +

RStudio when starting up:

+

RStudio when starting up

+

RStudio when started up:

+

RStudio when started up

+

RStudio in action:

+

RStudio in action

+

The RStudio debugger, at the error message level:

+

The RStudio debugger, at the error message level

+

The RStudio debugger, at the function-that-caused-the-error level:

+

The RStudio debugger, at the function-that-caused-the-error level

+

The RStudio debugger, at the program level:

+

The RStudio debugger, at the program level

+
+

Troubleshooting

+

RStudio freezes when I start it, where yesterday it still worked

+

Hypothesis: Your home folder is full

+

Your home folder is full. That explains why it still worked yesterday: +at that day, your home folder was not full yet.

+

RStudio uses your home folder +to store the things it needs, so when it is full, it cannot do its +things.

+

To confirm, from a terminal do:

+
du -h -d 1 .
+
+

This will show how much space the folders in your home folder take:

+

Home folder of a user that had RStudio frozen

+

In this example, there is a folder called wharf_backup that is +4.5 gigabyte. Moving it to a project folder solved the problem:

+
mv wharf_backup/ /proj/nobackup/[your_project_folder] 
+
+

For example:

+
mv wharf_backup/ /proj/nobackup/sens2016001 
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rstudio_on_rackham/index.html b/software/rstudio_on_rackham/index.html new file mode 100644 index 000000000..a5e8e1e6e --- /dev/null +++ b/software/rstudio_on_rackham/index.html @@ -0,0 +1,3274 @@ + + + + + + + + + + + + + + + + + + + RStudio on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

RStudio on Rackham

+

RStudio on Rackham

+

Introduction

+

RStudio is an IDE specialized for the R programming language.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to use RStudio on Rackham, +using Rackham's remote desktop environment.

+
+Forgot how to login to a remote desktop environment? +

See the 'Logging in to Rackham' page.

+

Spoiler: go to https://rackham.uppmax.uu.se/

+
+

As RStudio is a resource-heavy program, +it must be run on an interactive node.

+
+Forgot how to start an interactive node? +

See the 'Starting an interactive node' page.

+
+

Procedure to start RStudio

+

Below is a step-by-step procedure to start RStudio on Rackham.

+
+Prefer a video? +

This procedure is also demonstrated in this YouTube video.

+
+

1. Get within SUNET

+

This step is only needed when outside of Sweden.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start a Rackham remote desktop environment

+

This can be either:

+ +

3. Start an interactive session

+

Within the Rackham remote desktop environment, start a terminal. +Within that terminal, start an interactive session +with 2 cores:

+
interactive -A [naiss_project_id] -n 2 -t [duration]
+
+

Where:

+
    +
  • [naiss_project_id] is your UPPMAX project code
  • +
  • [duration] is the duration of the interactive session
  • +
+

Resulting in, For example:

+
interactive -A naiss2024-22-310 -n 2 -t 8:00:00
+
+
+

Why two cores?

+

RStudio is a resource-heavy program. +Due to this, we recommend using at least two cores +for a more pleasant user experience.

+
+
+What is an interactive node? +

See start an interactive session

+
+
+

Do not start RStudio from the menus

+

You can start a version of RStudio from the menus. +However, this will not have access to loaded modules.

+

Instead, load RStudio from the module system instead.

+
+

4. Load the modules needed

+

In the terminal of the interactive session, do:

+
module load R/4.3.1 R_packages/4.3.1 RStudio/2023.12.1-402
+
+
+How does that look like? +

Your output will be similar to:

+
[sven@r210 sven]$ module load R/4.3.1 R_packages/4.3.1 RStudio/2023.06.2-561
+R/4.3.1: Nearly all CRAN and BioConductor packages are installed and available by loading
+the module R_packages/4.3.1 
+R_packages/4.3.1: Note that loading some spatial analysis packages, especially geo-related packages, might
+R_packages/4.3.1: require you to load additional modules prior to use. monocle3 is such a package. See
+R_packages/4.3.1: 'module help R_packages/4.3.1'
+
+R_packages/4.3.1: The RStudio packages pane is disabled when loading this module, due to RStudio slowdowns
+R_packages/4.3.1: because there are >20000 available packages. *All packages are still available.*  For 
+R_packages/4.3.1: more information and instructions to re-enable the packages pane (not recommended) see
+R_packages/4.3.1: 'module help R_packages/4.3.1'
+
+RStudio/2023.12.1-402: Sandboxing is not enabled for RStudio at UPPMAX. See 'module help RStudio/2023.12.1-402' for more information
+
+
+
+What happens if I do not load R or R_packages? +

Then you will have the sytem-wide R version 3.6.0 +without any packages installed.

+
+
+What does 'Sandboxing is not enabled for RStudio at UPPMAX' mean? +

Nothing.

+

Here is how it looks like:

+
[sven@r482 sven]$ module load RStudio/2023.06.2-561
+RStudio/2023.06.2-561: Sandboxing is not enabled for RStudio at UPPMAX. See 'module help RStudio/2023.06.2-561' for more information
+[sven@r482 sven]$ module help RStudio/2023.06.2-561
+
+--------------------------------------------------- Module Specific Help for "RStudio/2023.06.2-561" ----------------------------------------------------
+ RStudio - use RStudio 2023.06.2-561
+
+ Version 2023.06.2-561
+
+With the Linux distribution used on most UPPMAX clusters (CentOS 7), RStudio/2023.06.2-561
+prefers to use a 'suid sandbox'. We do not enable this at UPPMAX. Instead, we disable sandboxing
+during startup of RStudio by defining a shell alias for the 'rstudio' command. You may notice
+additional errors in the terminal window from which you ran the 'rstudio' command. This is
+expected and does not affect RStudio operation.
+
+For performance reasons, UPPMAX disables checks for updates.
+
+UPPMAX also disables the 'Packages' pane of RStudio if an R_packages module is loaded.
+
+
+

5. Start RStudio

+

With the modules loaded, start RStudio from the terminal (on the +interactive node):

+
rstudio
+
+

RStudio can be slow to startup, as R has thousands (!) of packages. +Additionally, at startup and if enabled, your saved RStudio workspace +(with potentially a lot of data!) is read.

+
+How does RStudio look on Rackham? +

RStudio when starting up:

+

RStudio when starting up

+

RStudio when started up:

+

RStudio when started up

+

RStudio when ready:

+

RStudio when started up

+

RStudio in action:

+

RStudio in action

+

The RStudio debugger, at the error message level:

+

The RStudio debugger, at the error message level

+

The RStudio debugger, at the function-that-caused-the-error level:

+

The RStudio debugger, at the function-that-caused-the-error level

+

The RStudio debugger, at the program level:

+

The RStudio debugger, at the program level

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rsync/index.html b/software/rsync/index.html new file mode 100644 index 000000000..65056d1fd --- /dev/null +++ b/software/rsync/index.html @@ -0,0 +1,3194 @@ + + + + + + + + + + + + + + + + + + + rsync - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

rsync

+

rsync is a command-line tool for file transfer, +with the goal of ensuring integrity of the data, +as well as a minimal amount of data transfer.

+

rsync can be used for copying, but also synchronizing files, +such as is ideal for making a backup. At this page, we use the word 'copy', +although rsync by default does a one-way synchronize: if the data is already +there, it will do nothing.

+ +

Installing rsync

+

To installing rsync, see the official rsync download page.

+
+Tip for Ubuntu users +

Use apt like usual:

+
sudo apt install rsync
+
+
+
+Tip for Windows users +

When looking to download an executable of rsycn, +look for the words 'binary' (all executables are binary) +and Cygwin (the environment in which the rsync executable +was built on Windows).

+
+

Copy a folder from local to Rackham

+

Copy a folder from a local computer to a Rackham home folder.

+

On your local computer, do:

+
rsync --recursive [folder_name] [user_name]@rackham.uppmax.uu.se:/home/[user_name]/
+
+

For example:

+
rsync --recursive my_folder sven@rackham.uppmax.uu.se:/home/sven/
+
+

The --recursive flag is used to +copy a folder and all of its subfolders.

+
+Want to preserve timestamps? +

To preserve the files' timestamps, use the --archive flag, e.g.

+
rsync --recursive --archive my_folder sven@rackham.uppmax.uu.se:/home/sven/
+
+
+

Copy a folder from Rackham to local

+

Copy a folder from Rackham +to your local computer.

+

On your local computer, do:

+
rsync --recursive [user_name]@rackham.uppmax.uu.se:/home/[user_name]/[folder_name] [local_folder_destination]
+
+

For example:

+
rsync --recursive sven@rackham.uppmax.uu.se:/home/sven/my_folder .
+
+

Where . means 'the folder where I am now'.

+
+Want to preserve timestamps? +

To preserve the files' timestamps, use the --archive flag, e.g.

+
rsync --recursive --archive my_folder sven@rackham.uppmax.uu.se:/home/sven/
+
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rsync_on_bianca/index.html b/software/rsync_on_bianca/index.html new file mode 100644 index 000000000..12064d603 --- /dev/null +++ b/software/rsync_on_bianca/index.html @@ -0,0 +1,3163 @@ + + + + + + + + + + + + + + + + + + + rsync on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

rsync on Bianca

+

rsync is a command-line tool +for file transfer.

+

This page describes how to use rsync on Bianca.

+

Using rsync for direct file transfer +from a local computer to wharf fails, +as cannot rsync directly to wharf.

+

It can be made to work (by using transit), as described in +the UPPMAX Bianca file transfer using rsync.

+
+How does it look like if I try to rsync directly to wharf anyways? +

One cannot rsync directly to wharf.

+

However, this is how it looks like:

+
sven@sven-N141CU:~$ rsync my_local_file.txt sven-sens2016001@bianca-sftp.uppmax.uu.se:/sven-sens2016001
+
+Hi!
+
+You are connected to the bianca wharf (sftp service) at
+bianca-sftp.uppmax.uu.se.
+
+Note that we only support SFTP, which is not exactly the
+same as SSH (rsync and scp will not work).
+
+Please see our homepage and the Bianca User Guide
+for more information:
+
+https://www.uppmax.uu.se/support/user-guides/bianca-user-guide/
+
+If you have any questions not covered by the User Guide, you are
+welcome to contact us at support@uppmax.uu.se.
+
+Best regards,
+UPPMAX
+
+sven-sens2016001@bianca-sftp.uppmax.uu.se's password:
+protocol version mismatch -- is your shell clean?
+(see the rsync manpage for an explanation)
+rsync error: protocol incompatibility (code 2) at compat.c(622) [sender=3.2.7]
+
+
+

If you want to do file transfer to/from Bianca, +read the UPPMAX page on Bianca file transfer using rsync.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/rsync_on_rackham/index.html b/software/rsync_on_rackham/index.html new file mode 100644 index 000000000..cc2e756d0 --- /dev/null +++ b/software/rsync_on_rackham/index.html @@ -0,0 +1,3172 @@ + + + + + + + + + + + + + + + + + + + rsync on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

rsync on Rackham

+

rsync is a command-line tool for file transfer.

+

This page describes how to use rsync on Rackham.

+

Copy a folder from local to Rackham

+
flowchart LR
+  local_computer[Your local computer. Run rsync from here]
+  rackham[Rackham]
+
+  local_computer --> |rsync| rackham
+

Copy a folder from a local computer to a Rackham home folder.

+

On your local computer, do:

+
rsync --recursive [folder_name] [user_name]@rackham.uppmax.uu.se:/home/[user_name]/
+
+

For example:

+
rsync --recursive my_folder sven@rackham.uppmax.uu.se:/home/sven/
+
+

The --recursive flag is used to +copy a folder and all of its subfolders.

+

Copy a folder from Rackham to local

+
flowchart LR
+  local_computer[Your local computer. Run rsync from here]
+  rackham[Rackham]
+
+  rackham --> |rsync| local_computer
+

Copy a folder from Rackham +to your local computer.

+

On your local computer, do:

+
rsync --recursive [user_name]@rackham.uppmax.uu.se:/home/[user_name]/[folder_name] [local_folder_destination]
+
+

For example:

+
rsync --recursive sven@rackham.uppmax.uu.se:/home/sven/my_folder .
+
+

Where . means 'the folder where I am now'.

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/sbatch/index.html b/software/sbatch/index.html new file mode 100644 index 000000000..67d376703 --- /dev/null +++ b/software/sbatch/index.html @@ -0,0 +1,3238 @@ + + + + + + + + + + + + + + + + + + + sbatch - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

sbatch

+

The job scheduler consists of many +programs to manage jobs. +sbatch is the program to submit a job to the scheduler.

+
flowchart TD
+  sbatch[sbatch: submit a job]
+  scancel[scancel: cancel a running job]
+  squeue[squeue: view the job queue]
+  sbatch --> |Oops| scancel
+  sbatch --> |Verify| squeue
+

After submitting a job, one can use squeue to +verify the job is in the job queue. If there is an error in the sbatch +command, one can cancel a job using scancel.

+

Minimal examples

+

There are two ways to demonstrate minimal use of sbatch:

+ +

These minimal examples use a run-time of a short, default time.

+

with command-line Slurm arguments

+

To let Slurm schedule a job, one uses sbatch.

+

For Bianca and Rackham, one uses sbatch like this:

+
sbatch -A [project_code] [script_filename]
+sbatch -M snowy -A [project_code] [script_filename]
+
+

For Snowy, one uses sbatch like this:

+
sbatch -M snowy -A [project_code] [script_filename]
+
+

Where:

+
    +
  • -A [project_code]: the project to use, + for example sens2017625
  • +
  • [script_filename]: the name of a file that is a bash script, + for example, my_script.sh
  • +
  • -M snowy: if you use the Snowy computational resources
  • +
+

Filling this all in, for Bianca and Rackham:

+
sbatch -A sens2017625 my_script.sh
+
+

Filling this all in, for Snowy:

+
sbatch -M snowy -A sens2017625 my_script.sh
+
+
+What is my project? +

See the UPPMAX documentation on projects.

+
+
+How do I convert my project name to the project code I need to use here? +

See the UPPMAX documentation on projects.

+
+
+What is in the script file? +

The script file my_script.sh is a minimal example script. +Such a minimal example script could be:

+
#!/bin/bash
+echo "Hello"
+
+
+

with Slurm parameters in the script

+

The minimal command to use sbatch with Slurm parameters in the script:

+
sbatch [script_filename]
+
+

where [script_filename] the name of a bash script, for example:

+
sbatch my_script.sh
+
+

For Bianca and Rackham, the script must contain at least the following lines:

+
#SBATCH -A [project_code]
+
+

For Snowy, the script must contain at least the following lines:

+
#SBATCH -A [project_code]
+#SBATCH -M snowy
+
+

With:

+
    +
  • [project_code]: the project code, for example uppmax2023-2-25
  • +
+
+What is in the script file, for Bianca and Rackham? +

A full example script would be:

+
#!/bin/bash
+#SBATCH -A uppmax2023-2-25
+echo "Hello"
+
+
+
+What is in the script file, for Snowy? +

A full example script would be:

+
#!/bin/bash
+#SBATCH -A uppmax2023-2-25
+#SBATCH -M snowy
+echo "Hello"
+
+
+

More parameters

+

See the Slurm documentation on sbatch

+

Troubleshooting

+

See Slurm troubleshooting

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/scancel/index.html b/software/scancel/index.html new file mode 100644 index 000000000..0a55e48b2 --- /dev/null +++ b/software/scancel/index.html @@ -0,0 +1,3119 @@ + + + + + + + + + + + + + + + + + + + scancel - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

scancel

+

The job scheduler consists of many +programs to manage jobs. +scancel is a tool to cancel jobs that are in the job queue or are running.

+

Usage:

+
scancel [job_number]
+
+

Where the [job_number] is the number of the job. +You can see the job number when submitting a job using sbatch +and you can find it in the job queue (when doing squeue).

+

For example:

+
[sven@rackham3 ~]$ sbatch -A my_project my_script.sh 
+Submitted batch job 49311056
+[sven@rackham3 ~]$ scancel 49311056
+[sven@rackham3 ~]$ 
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/screen/index.html b/software/screen/index.html new file mode 100644 index 000000000..eaa475641 --- /dev/null +++ b/software/screen/index.html @@ -0,0 +1,3256 @@ + + + + + + + + + + + + + + + + + + + + + + + Screen - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Running a detachable screen process in a job

+

When you run the interactive command, you get a command prompt in the screen program.

+
+

Warning

+

When running the screen program in other environments, you can detach from your screen and later reattach to it. Within the environment of the interactive command, you lose this ability: Your job is terminated when you detach. (This is a design decision and not a bug.)

+
+

In case you want the best of both worlds, i.e. to be able to detach and reattach to your screen program within a job, you need to start a job in some other way and start your screen session from a separate ssh login. Here is an example of how you can do this:

+
$ salloc -A project_ID -t 15:00  -n 1 --qos=short --bell --no-shell
+salloc: Pending job allocation 46964140
+salloc: job 46964140 queued and waiting for resources
+salloc: job 46964140 has been allocated resources
+salloc: Granted job allocation 46964140
+salloc: Waiting for resource configuration
+salloc: Nodes r174 are ready for job
+
+

Check the queue manager for the allocated node. In the example bellow, one core was allocated on r174 compute node.

+
$ squeue -j 46964140
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+          46964140      core no-shell     user  R       0:44      1 r174
+
+

You can start xterm terminal in this allocated session like this:

+
xterm -e ssh -AX r174 &
+
+

salloc command gives you a job allocation of one node for 15 minutes (the "--no-shell" option is important here). Instead you can log in to any node of any of your running jobs, started with e.g. the sbatch command.

+

You get a job number and from that you can find out the node name, in this example r174.

+

When you log in to the node with the ssh command, start the screen program:

+
screen
+
+

When you detach from the screen program, with e.g. the "d" command, you can later in the same ssh session or in another ssh session reattach to your screen session:

+
screen -r
+
+

When your job has terminated, you can neither reattach to your screen session nor log in to the node.

+

The screen session of the interactive command is integrated into your job, so e.g. all environment variables for the job is correctly assigned. For a separate ssh session, as in this example, that is not the case.

+

Please note that it is the job allocation that determines your core hour usage and not your ssh or screen sessions.

+

Tips

+
    +
  • +

    Start a new screen session with a command:

    +
    screen -dm your_command
    +
    +

    This will start a new screen session, run the command, and then detach from the session.

    +
  • +
  • +

    If you want to run multiple commands, you can do so like this:

    +
    screen -dm bash -c "command1; command2"
    +
    +

    This will run command1 and command2 in order.

    +
  • +
  • +

    To reattach to the screen session, use:

    +
    screen -r
    +
    +

    If you have multiple sessions, you'll need to specify the session ID.

    +
  • +
  • +

    To list your current screen sessions, use:

    +
    screen -ls
    +
    +
  • +
+

Please note that when a program terminates, screen (by default) kills the window that contained it. If you don't want your session to get killed after the script is finished, add exec sh at the end. For example:

+
screen -dm bash -c 'your_command; exec sh'
+
+

This will keep the screen session alive after your_command has finished executing.

+

YouTube : How to use GNU SCREEN - the Terminal Multiplexer

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/sftp/index.html b/software/sftp/index.html new file mode 100644 index 000000000..47f4786cc --- /dev/null +++ b/software/sftp/index.html @@ -0,0 +1,3428 @@ + + + + + + + + + + + + + + + + + + + + + + + SFTP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

sftp

+

sftp is a tool to transfer data.

+

1. Getting Help

+

Once, you in the sftp prompt, check the available commands by +typing ? or help at command prompt. +This will print out a list of the available commands and +give a short description of them. We'll cover the most common ones in this guide.

+
sftp> ?
+Available commands:
+cd path                       Change remote directory to 'path'
+...
+...
+...
+
+

2. Check Present Working Directory

+

The command lpwd is used to check the Local present working directory, +whereas pwd command is used to check Remote working directory.

+
sftp> lpwd
+Local working directory: /
+sftp> pwd
+Remote working directory: /tecmint/
+lpwd  print the current directory on your system
+pwd  print the current directory on the ftp server
+
+

3. Listing Files

+

Listing files and directories in local as well as remote system.

+

On Remote

+
sftp> ls
+
+

On Local

+
sftp> lls
+
+

4. Upload File

+

Put single or multiple files in remote system.

+
sftp> put local.profile
+
+

Uploading local.profile to /tecmint/local.profile

+

5. Upload Multiple Files

+

Putting multiple files on remote system.

+
sftp> mput *.xls
+
+

Another alternative to uploading many files is to tar and/or compress the files to a single file before uploading. The file transfer will stop in between every file, so the more file you have to upload the more stops it will make. This can have a dramatic impact on transfer speed if there are 1000s of files that you want to transfer. Running tar and/or zip on the files before transferring them will package all the files into a single file, so there will be no stops at all during the transfer.

+

6. Download Files

+

Getting single or multiple files in local system.

+
sftp> get SettlementReport_1-10th.xls
+
+

Fetching /tecmint/SettlementReport_1-10th.xls to SettlementReport_1-10th.xls Get multiple files on a local system.

+
sftp> mget *.xls
+
+

Note: As we can see by default the get command downloads the file to the local system with the same name. We can download remote file and store it with a different name by specifying the name at the end. (This applies only while downloading single file).

+

7. Switching Directories

+

Switching from one directory to another directory in local and remote locations.

+

On Remote

+
sftp> cd test
+
+

On Local

+
sftp> lcd Documents
+
+

8. Create Directories

+

Creating new directories on remote and local locations.

+
sftp> mkdir test
+sftp> lmkdir Documents
+
+

9. Remove Directory or File

+

Remove directory or file in remote system.

+
sftp> rm Report.xls
+sftp> rmdir sub1
+
+

Note: To remove/delete any directory from remote location, the directory must be empty.

+

10. Exit sFTP Shell

+

The ! (exclamation mark) command drops us in local shell from where we can execute Linux commands. Type exit command where we can see sftp> prompt return.

+
sftp> !
+[root@sftp ~]# exit
+Shell exited with status 1
+sftp>
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/sinfo/index.html b/software/sinfo/index.html new file mode 100644 index 000000000..11cdeb755 --- /dev/null +++ b/software/sinfo/index.html @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + sinfo - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

sinfo

+

sinfo is a tool to view information about Slurm nodes and partitions.

+
+How does that look like on Bianca? +
[sven@sens2016001-bianca ~]$ sinfo
+PARTITION AVAIL  TIMELIMIT  NODES  STATE NODELIST
+all        down 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]
+all        down 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]
+all        down 10-00:00:0      1   idle sens2016001-b9
+node         up 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]
+node         up 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]
+node         up 10-00:00:0      1   idle sens2016001-b9
+core*        up 10-00:00:0    204 drain* sens2016001-b[1-8,10-204,1178]
+core*        up 10-00:00:0     89   unk* sens2016001-b[205-210,301-312,1073-1084,1119-1177]
+core*        up 10-00:00:0      1   idle sens2016001-b9
+devel        up    1:00:00    192 drain* sens2016001-b[10-200,1178]
+devel        up    1:00:00     71   unk* sens2016001-b[1073-1084,1119-1177]
+devel        up    1:00:00      1   idle sens2016001-b9
+devcore      up    1:00:00    192 drain* sens2016001-b[10-200,1178]
+devcore      up    1:00:00     71   unk* sens2016001-b[1073-1084,1119-1177]
+devcore      up    1:00:00      1   idle sens2016001-b9
+
+

Although it may seem unexpected that only 1 node is idle, +this is the expected behavior from a virtual cluster: +most physical nodes are not allocated to this project and hence unavailable.

+
+
+How does that look like on Rackham? +
[sven@rackham3 ~]$ sinfo
+PARTITION AVAIL  TIMELIMIT  NODES  STATE NODELIST
+all        down 10-00:00:0     22   comp r[2,36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]
+all        down 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]
+all        down 10-00:00:0     72 drain$ r[1001-1072]
+all        down 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]
+all        down 10-00:00:0     45  down* r[13,23,57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479,483-484,1189-1190,1199,1212,1240]
+all        down 10-00:00:0      8  drain r[29,35,78,154,212,226,335,485]
+all        down 10-00:00:0    115    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482,486,1180-1181,1203,1208,1210-1211,1217,1223,1227,1231,1235,1237,1239,1242-1246]
+all        down 10-00:00:0    317  alloc r[1,3,6,9,19,25-28,30,32-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,336-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478,1179,1182-1188,1191-1198,1200-1202,1204-1207,1209,1213-1216,1218-1222,1224-1226,1228-1230,1232-1234,1236,1238,1241,1247-1250]
+all        down 10-00:00:0     13   idle r[8,10-12,14-18,20-22,24]
+all        down 10-00:00:0     10   down r[4-5,7,31,61,118,144-145,219,278]
+core*        up 10-00:00:0     21   comp r[36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]
+core*        up 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]
+core*        up 10-00:00:0     72 drain$ r[1001-1072]
+core*        up 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]
+core*        up 10-00:00:0     41  down* r[57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479,1189-1190,1199,1212,1240]
+core*        up 10-00:00:0      5  drain r[35,78,154,212,226]
+core*        up 10-00:00:0    114    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482,1180-1181,1203,1208,1210-1211,1217,1223,1227,1231,1235,1237,1239,1242-1246]
+core*        up 10-00:00:0    301  alloc r[33-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,340,342-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478,1179,1182-1188,1191-1198,1200-1202,1204-1207,1209,1213-1216,1218-1222,1224-1226,1228-1230,1232-1234,1236,1238,1241,1247-1250]
+core*        up 10-00:00:0      6   down r[61,118,144-145,219,278]
+node         up 10-00:00:0     22   comp r[2,36,66,68,94,110,112,132,139,163,185,200,206,216,247,281,288,293,319,326,418,481]
+node         up 10-00:00:0     10   plnd r[49-50,58-60,63,283-285,287]
+node         up 10-00:00:0     18 drain* r[167,175,186,252,258,318,431,437-438,440,455-462]
+node         up 10-00:00:0     38  down* r[13,23,57,99,108-109,122,165,177-184,187,218,254,331,423,432-436,439,441,452,463-470,479]
+node         up 10-00:00:0      7  drain r[29,35,78,154,212,226,335]
+node         up 10-00:00:0     96    mix r[37-41,43,45-46,65,70-72,76-77,79,85,98,102,106,116,120,127-128,135-136,142,146,152-153,161,169,171-172,174,189,210-211,222,227,230-231,234,237,243,250,260,264,266,273,275-276,280,289,292,302,311,313-314,316-317,332-333,344,360-361,363-365,368,373,376,382,386-388,391,393-395,398,402-403,410,417,422,425,430,449,453,472-473,475-477,480,482]
+node         up 10-00:00:0    268  alloc r[1,3,6,9,19,25-28,30,32-34,42,44,47-48,51-56,62,64,67,69,73-75,80-84,86-93,95-97,100-101,103-105,107,111,113-115,117,119,121,123-126,129-131,133-134,137-138,140-141,143,147-151,155-160,162,164,166,168,170,173,176,188,190-199,201-205,207-209,213-215,217,220-221,223-225,228-229,232-233,235-236,238-242,244-246,248-249,251,253,255-257,259,261-263,265,267-272,274,277,279,282,286,290-291,294-301,303-310,312,315,320-325,327-330,334,336-343,345-359,362,366-367,369-372,374-375,377-381,383-385,389-390,392,396-397,399-401,404-409,411-416,419-421,424,426-429,442-448,450-451,454,471,474,478]
+node         up 10-00:00:0     13   idle r[8,10-12,14-18,20-22,24]
+node         up 10-00:00:0     10   down r[4-5,7,31,61,118,144-145,219,278]
+devel        up    1:00:00      2  down* r[483-484]
+devel        up    1:00:00      1  drain r485
+devel        up    1:00:00      1    mix r486
+devcore      up    1:00:00      2  down* r[483-484]
+devcore      up    1:00:00      1  drain r485
+devcore      up    1:00:00      1    mix r486
+
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/singularity/index.html b/software/singularity/index.html new file mode 100644 index 000000000..333d8b796 --- /dev/null +++ b/software/singularity/index.html @@ -0,0 +1,3295 @@ + + + + + + + + + + + + + + + + + + + + + + + Singularity/Apptainer - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Singularity User Guide

+

Singularity www.sylabs.io/docs provide tools for running containers that are more suitable to traditional HPC environments when some other tools such as Docker or lxc. These containers can be portable and could be run both on your desktop machine and our clusters.

+

One of the ways in which Singularity is more suitable for HPC is that it very actively restricts permissions so that you do not gain access to additional resources while inside the container. One consequence of this is that some common tools like ping or sudo do not work when run within a container (as a regular user).

+

Singularity is installed and usable to run custom container images on the clusters bianca and rackham.

+

Pulling an existing Singularity image

+

It's possible to download and run pre-built images from the Singularity +hub https://singularity-hub.org +and the Singularity library (https://cloud.sylabs.io) +using the singularity pull sub command such as:

+
singularity pull library://ubuntu
+
+

Which will download the requested image and place it in the current directory. +You can also upload and run the image directly yourself.

+

Creating a Singularity container

+

See creating a Singularity container for the multiple ways how to build a Singularity container.

+

Examples

+ +

Running an existing image

+

Once you have an image, you can "run" it with a command such as

+
singularity run singularityhub-ubuntu-14.04.img
+
+

which will try to execute a "run" target in the container. +There are also the shell and exec subcommands for starting a shell +and running a specific command respectively.

+

Access to UPPMAX file systems

+

By default, singularity will try to help and map the UPPMAX file systems from the current cluster so that they can be accessed from within the container. For CentOS7 based clusters (snowy, rackham, bianca), this works as expected.

+

Singularity is installed on the system (on each separate node) and does not require any module load to be available.

+

It's possible to run Docker containers. You can try to run

+
singularity shell docker://debian:stretch
+
+

but note that Docker containers are typically designed to run with more privileges than are allowed with Singularity, so it's quite possible things do not work as expected.

+

Not all images may work everywhere

+

Images run with the same linux kernel as the rest of the system. For HPC we systems, the kernel used tend to be quite old for stability reasons. This is not normally a problem, but can cause issues if the libraries of the images you try to run expects functionality added in newer kernels. How and what works is difficult to know without trying, but we have successfully started a shell in an image for the currently most recent Ubuntu release (17.04).

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/software-table/index.html b/software/software-table/index.html new file mode 100644 index 000000000..0f473c64e --- /dev/null +++ b/software/software-table/index.html @@ -0,0 +1,9598 @@ + + + + + + + + + + + + + + + + + + + + + + + Software table - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Software

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CategoryNameModuleClusterVersionsLicence
Alignmentblastblastrackham bianca miarka snowy2.14.1+, 2.15.0+Public Domain
Alignmentbowtie2bowtie2rackham bianca miarka snowy2.5.2GPL v3
Alignmentcactuscactusrackham bianca miarka snowy2.7.2, 2.8.2Copyright
AlignmentClipKITClipKITrackham bianca miarka snowy2.2.4MIT
AlignmentCrossMapCrossMaprackham bianca miarka snowy0.6.6GPL v2+
Alignmentdiamonddiamondrackham bianca miarka snowy2.0.6, 2.1.9GPL v3
Alignmentfastafastarackham bianca miarka snowy36.3.8iApache 2.0
Alignmenthmmerhmmerrackham bianca miarka snowy3.4custom open-source
Alignmentlastlastrackham bianca miarka snowy1505None
Alignmentlastzlastzrackham bianca miarka snowy1.04.22MIT
AlignmentmapADmapADrackham bianca miarka snowy0.42.1MIT
Alignmentmapcallermapcallerrackham bianca miarka snowy0.9.9.41MIT
AlignmentMashMapMashMaprackham bianca miarka snowy3.0.4Mixed
Alignmentminimap2minimap2rackham bianca miarka snowy2.26-r1175MIT
Alignmentminiprotminiprotrackham bianca miarka snowy0.12, 0.13MIT
AlignmentMMseqs2MMseqs2rackham bianca miarka snowy14-7e284, 15-6f452GPL v3
Alignmentngmlrngmlrrackham bianca miarka snowy0.2.7-20210816-a2a31fbMIT
Alignmentpbmm2pbmm2rackham bianca miarka snowy1.13.0BSD 3
Alignmentrandfoldrandfoldrackham bianca miarka snowy2.0.1None
AlignmentRMBlastRMBlastrackham bianca miarka snowy2.2.28, 2.14.1+Public domain
Alignmentskaniskanirackham bianca miarka snowy0.2.1MIT
AlignmentSNAP-alignerSNAP-alignerrackham bianca miarka snowy2.0.3Apache 2.0
Alignmentspalnspalnrackham bianca miarka snowy3.0.3GPL v2 and custom
Alignmentstarstarrackham bianca miarka snowy2.7.8a, 2.7.11aMIT
Alignmenttrftrfrackham bianca miarka snowy4.10.0-rc.2Gnu Afferoo GPL v3
AlignmentUS-alignUS-alignrackham bianca miarka snowy20230727-e5c6270Custom open source AS-IS
Alignmentwfmashwfmashrackham bianca miarka snowy0.12.5MIT
Annotationaf2complexaf2complexrackham bianca miarka snowy1.4.0None
Annotationalphapulldownalphapulldownrackham bianca miarka snowy1.0.4GPL-3.0
Annotationaugustusaugustusrackham bianca miarka snowy3.5.0, 3.5.0-20231223-33fc04dArtistic License 1.0
Annotationbali-phybali-phyrackham bianca miarka snowy4.0-beta15None
Annotationbcftools-scorebcftools-scorerackham bianca miarka snowy1.18-20231207-9c8e21eMIT
Annotationbrakerbrakerrackham bianca miarka snowy3.0.3, 3.0.7, 3.0.8Artistic 1.0
Annotationcolabfold-localcolabfold-localrackham bianca miarka snowy1.5.5MIT
Annotationcompleasmcompleasmrackham bianca miarka snowy0.2.2, 0.2.5, 0.2.6Apache 2.0 and others
AnnotationCookHLACookHLArackham irma bianca miarka snowy1.0.0None
AnnotationDeepLocDeepLocrackham bianca miarka snowy2.0None
Annotationdoradodoradorackham bianca miarka snowy0.6.1, 0.8.2PLC 1.0
Annotationduplex-toolsduplex-toolsrackham bianca miarka snowy0.3.3MPL 2.0
Annotationfunannotatefunannotaterackham bianca miarka snowy1.8.1, 1.8.17BSD-2
AnnotationGeneMarkGeneMarkrackham bianca miarka snowy4.69-es, 4.71-es, 4.72-esCustom
AnnotationGeneMark-ETPGeneMark-ETPrackham bianca miarka snowy1.02-20231213-dd8b37bCreative Commons Attribution NonCommercial ShareAlike 4.0 License
AnnotationGenomeThreaderGenomeThreaderrackham bianca miarka snowy1.7.4Custom AS IS
AnnotationHATCHetHATCHetrackham bianca miarka snowy2.0.1BSD-3
AnnotationHATKHATKrackham bianca miarka snowy2.0betaAS IS
Annotationhybpiperhybpiperrackham bianca miarka snowy2.1.6GPLv3
Annotationigv-reportsigv-reportsrackham bianca miarka snowy1.12.0-python3.9.5MIT
AnnotationInterProScanNoneNone5.65-97.0, 5.67-99.0, .zNone
AnnotationIsoQuantIsoQuantrackham bianca miarka snowy3.3.1Custom
Annotationjcvijcvirackham bianca miarka snowy1.3.8BSD2
AnnotationkmersGWASkmersGWASrackham irma bianca miarka snowy20221010-a706bb7None
AnnotationLDAKLDAKrackham bianca miarka snowy5.2Open source AS IS
AnnotationLOHHLALOHHLArackham bianca miarka snowy20210129-00744c5None
AnnotationLOHHLA-slagtermaartenLOHHLA-slagtermaartenrackham bianca miarka snowy20200219-b38c477None
Annotationmacsemacserackham bianca miarka snowy2.07None
AnnotationMakeHubMakeHubrackham bianca miarka snowy1.0.5-20200210-1ecd6bb, 1.0.8-20240217-31cc299GPL v3
Annotationmetaeukmetaeukrackham bianca miarka snowy6-a5d39d9GPL v3
AnnotationOBIToolsOBIToolsrackham bianca miarka snowy1.2.13CeCILL
AnnotationOBITools3OBITools3rackham bianca miarka snowy3.0.1b24CeCILL
Annotationomm-macseomm-macserackham bianca miarka snowy12.01None
AnnotationORFfinderORFfinderrackham irma bianca miarka snowy0.4.3Public domain
AnnotationOrthoFinderOrthoFinderrackham bianca miarka snowy2.5.2, 2.5.5GPL v3
Annotationpolysolverpolysolverrackham bianca miarka snowyv4None
AnnotationProtHintProtHintrackham bianca miarka snowy2.6.0-20231027-103304cGeneMark license
Annotationpullseqpullseqrackham bianca miarka snowy20230518-7381691None
AnnotationpVACtoolspVACtoolsrackham bianca miarka snowy3.1.1BSD3
AnnotationpycoQCpycoQCrackham bianca miarka snowy2.5.2GPL-3.0
AnnotationRepeatMaskerRepeatMaskerrackham bianca miarka snowy4.1.5None
Annotationseppsepprackham bianca miarka snowy4.3.10_python_3.7.2, 4.5.1, 4.5.2GPL v3
Annotationsequenza-utilssequenza-utilsrackham bianca miarka snowy3.0.0None
AnnotationSignalPSignalPrackham bianca miarka snowy6.0hNone
AnnotationsnpEffsnpEffrackham bianca miarka snowy5.2MIT
Annotationsopranosopranorackham bianca miarka snowy20240418-938604eGPL-3.0
Annotationsvtoolssvtoolsrackham bianca miarka snowy0.5.1MIT
Annotationtabixpptabixpprackham bianca miarka snowy1.1.2MIT
AnnotationTEspeXTEspeXrackham bianca miarka snowy2.0.1GPL v3
AnnotationTransDecoderTransDecoderrackham bianca miarka snowy5.7.1-20230913-8b926acNone
AnnotationTrEMOLOTrEMOLOrackham bianca miarka snowy2.2-beta1GPL v3
Annotationtrextrexrackham bianca miarka snowy20230904-df86afe, 20231120-d9c840aNone
AnnotationTSEBRATSEBRArackham bianca miarka snowy1.1.2.4Artistic License 2.0
Annotationvartrixvartrixrackham bianca miarka snowy1.1.22MIT
Annotationvcf2mafvcf2mafrackham bianca miarka snowy1.6.21Apache 2.0
AnnotationWhatsHapWhatsHaprackham bianca miarka snowy2.3-20240529-be88057MIT
Assemblyassembly-statsassembly-statsrackham bianca miarka snowy1.0.1-20211102-c006b9cGPL v3
AssemblyFlyeFlyerackham bianca miarka snowy2.9.5BSD-3-Clause
AssemblyGetOrganelleGetOrganellerackham bianca miarka snowy1.7.3.3, 1.7.7.0GPLv3
Assemblyhifiasmhifiasmrackham bianca miarka snowy0.16.1-r375, 0.19.8-r603, 0.20.0-r639MIT
Assemblyhifiasm-metahifiasm-metarackham bianca miarka snowy0.3.2-r74MIT
AssemblyIPAIPArackham irma bianca miarka snowy1.8.0BSD 3-clause
AssemblyL_RNA_scaffolderL_RNA_scaffolderrackham bianca miarka snowy20190530-98f19e3None
AssemblyMBGMBGrackham bianca miarka snowy1.0.14MIT
AssemblymetaMDBGmetaMDBGrackham bianca miarka snowy0.3-20240117-57f4493MIT
Assemblyminiasmminiasmrackham bianca miarka snowy0.3-r179-20191007-ce615d1MIT
AssemblyPolypolishPolypolishrackham bianca miarka snowy5.0GPL-3.0
Assemblypurge_dupspurge_dupsrackham bianca miarka snowy1.2.5, 1.2.6MIT
AssemblyRedundansRedundansrackham bianca miarka snowy2.0.1GPL v3
Assemblyshovillshovillrackham bianca miarka snowy1.0.0, 1.1.0GPL-3.0
AssemblySKESASKESArackham bianca miarka snowy2.4.0Public domain
Assemblyspadesspadesrackham bianca miarka snowy4.0.0GPL v2
AssemblyStringTieStringTierackham bianca miarka snowy2.2.1MIT
AssemblyTrycyclerTrycyclerrackham bianca miarka snowy0.5.4GPL-3.0
Assemblyvgvgrackham bianca miarka snowy1.29.0, 1.48.0mixed
Bioinformatics alignmentAGEAGEbianca irma milou rackham snowy0.4None
Bioinformatics alignmentGEM-ToolsGEM-Toolsrackham irma bianca snowy1.7.1None
Bioinformatics alignmentHISAT2HISAT2bianca irma rackham snowy2.0.1-beta, 2.0.5, 2.1.0, 2.2.1GPL v3
Bioinformatics alignmentinfernalinfernalbianca miarka milou rackham snowy1.0.2, 1.1.1, 1.1.2BSD
Bioinformatics alignmentKalignKalignrackham irma bianca snowy1.04, 2.04None
Bioinformatics alignmentLEON-BISLEON-BISbianca irma milou rackham snowy20130322None
Bioinformatics alignmentMafFilterMafFilterbianca irma milou rackham snowy1.1.2None
Bioinformatics alignmentMAFFTMAFFTbianca irma rackham snowy7.205, 7.245, 7.310, 7.407BSD (main), mixed open-source (extensions)
Bioinformatics alignmentMUMmerMUMmerrackham irma bianca snowy3.9.4alpha, 3.22, 3.23, 4.0.0beta2, 4.0.0rc1Artistic License 2.0
Bioinformatics alignmentunimapunimaprackham irma bianca snowy0.1-r46-dirtyMIT
Bioinformatics annotationAATAATbianca irma milou rackham snowyr03052011Custom "AS IS"
Bioinformatics annotationGEMININoneNone0.16.3, 0.18.3, 0.19.0, 0.20.0, 0.20.1, .gemini_0.18.3None
Bioinformatics annotationHaMStRNoneNone13.2.3, .HaMStR, .HaMStR-oldNone
Bioinformatics annotationInterProScanInterProScanbianca miarka rackham snowy5.52-86.0Misc
Bioinformatics annotationlibBigWiglibBigWigrackham irma bianca snowy0.4.4MIT
Bioinformatics annotationProtHintProtHintrackham miarka bianca snowy2.4.0GeneMark license https://github.com/gatech-genemark/ProtHint/blob/master/LICENSE
Bioinformatics annotationtmhmmtmhmmrackham miarka bianca snowy2.0cCustom as-is
Bioinformatics annotationVIBRANTVIBRANTrackham irma bianca snowy1.2.1GNU General Public License
Bioinformatics assemblyA5-miseqNoneNone20140113, 20140604, 20160825, .A5-miseqNone
Bioinformatics assemblyabyssabyssbianca irma milou rackham snowy1.3.5, 1.3.5-max, 1.3.7, 1.3.7-k128, 1.3.7-max, 1.5.2, 1.9.0, 1.9.0-k128, 2.0.2, 2.0.2-k128GPL v3
Bioinformatics assemblyallpathslgNonebianca irma rackham snowy47300, 49618, 52485, 52488None
Bioinformatics assemblyAMOSAMOSbianca irma milou rackham snowy3.0.0, 3.1.0Artistic
Bioinformatics assemblyARC_assemblerARC_assemblermilou1.1.3Apache 2.0
Bioinformatics assemblyARCSARCSrackham irma bianca snowy1.0.6, 1.1.1GPL v3
Bioinformatics assemblyBESSTNoneNone1.0.4.3, 1.0.4.4, .besstNone
Bioinformatics assemblyDBG2OLCDBG2OLCrackham snowy miarka bianca20151208None
Bioinformatics assemblyDISCOVARdenovoNonebianca miarka rackham snowy51885, 52488None
Bioinformatics assemblyFALCONFALCONrackham miarka bianca snowy0.3.0, 0.4.1, 2018.31.08-03.06Clear BSD
Bioinformatics assemblyFALCON-integrateFALCON-integratebianca miarka milou rackham snowy20161113Custom "as is"
Bioinformatics assemblyFlyeFlyerackham miarka bianca snowy2.3.5, 2.4.2, 2.8.1-d
Bioinformatics assemblyGAAGAArackham miarka bianca snowy1.1GPL v2+
Bioinformatics assemblyGARMGARMbianca irma milou rackham snowy0.7, 0.7.3None
Bioinformatics assemblyIDBANonebianca miarka rackham snowy1.1.1, 1.1.1-384, 1.1.3None
Bioinformatics assemblyLINKSLINKSrackham miarka bianca snowy1.8.7GPL v3
Bioinformatics assemblyMaSuRCAMaSuRCArackham miarka bianca snowy2.0.3.1, 2.1.0, 2.2.1, 2.3.2, 3.1.3, 3.2.1, 3.2.2, 3.2.3, 3.3.5, 3.4.2GPL v3
Bioinformatics assemblyMetAMOSMetAMOSbianca irma milou rackham snowy1.5rc3GPLv2 and other open source
Bioinformatics assemblyMetassemblerMetassemblermilou1.5open-source
Bioinformatics assemblyMHAPMHAPmilou1.6Apache 2.0
Bioinformatics assemblyPlatanus-alleePlatanus-alleebianca irma rackham snowy2.0.2GPL v3
Bioinformatics assemblyquickmergequickmergerackham irma bianca snowy0.3-9233726GPL v3
Bioinformatics assemblyUnicyclerUnicyclerbianca irma rackham snowy0.4.8GPL v3
Bioinformatics assemblywtdbg2wtdbg2rackham irma bianca snowy2.4GPL v3
Bioinformatics miscAdapterRemovalAdapterRemovalbianca irma milou rackham snowy2.1.7, 2.2.2GPL v3
Bioinformatics miscAdmixToolsAdmixToolsbianca irma milou rackham snowy5.0-20170312, 5.0-20171024, 7.0.1, 20160803Custom "as-is" open source
Bioinformatics miscADMIXTUREADMIXTUREbianca irma milou rackham snowy1.3.0Not open source
Bioinformatics miscAlienTrimmerAlienTrimmerbianca irma milou rackham snowy0.4.0GPL
Bioinformatics miscANGSDANGSDrackham irma bianca snowy0.917, 0.917-g6522d3e, 0.921, 0.933None
Bioinformatics miscAthlatesAthlatesbianca irma milou rackham snowy20140426Custom
Bioinformatics miscATLAS_aDNAATLAS_aDNAbianca irma milou rackham snowy20170510None
Bioinformatics miscATSASATSASbianca irma milou rackham snowy2.8.3-1Academic use only
Bioinformatics miscbam2fastxbam2fastxrackham irma bianca snowy1.3.0-80dbf79BSD 3-clause
Bioinformatics miscbambambambambianca irma rackham snowy1.4MIT
Bioinformatics miscBamsurgeonBamsurgeonrackham irma bianca snowy1.3MIT License
Bioinformatics miscbcftoolsbcftoolsbianca irma rackham snowy1.8, 1.10"MIT/Expat or GPL v3"
Bioinformatics miscBEETLBEETLbianca irma milou rackham snowy1.0.2, 1.1.0BSD 2-Clause
Bioinformatics miscbgenbgenrackham irma bianca snowy1.1.4Boost Software License v1.0
Bioinformatics miscBioPerlBioPerlbianca miarka rackham snowy1.6.924_Perl5.18.4, 1.7.1_Perl5.24.1, 1.7.2_Perl5.24.1, 1.7.2_Perl5.26.2Perl
Bioinformatics miscbonitoNonerackham irma bianca snowy0.3.7-cpu, 0.3.8, 0.4.0Oxford Nanopore Public License 1.0
Bioinformatics miscBraCeRBraCeRrackham irma bianca snowy2019_10_03_22e49cbApache 2.0
Bioinformatics misccdbfastacdbfastabianca irma rackham snowy1.00"Artistic 2.0"
Bioinformatics misccellranger-ATACNonebianca irma rackham snowy1.2.0, 2.0.0None
Bioinformatics miscCheckMCheckMbianca irma rackham snowy1.0.11, 1.0.12, 1.1.3""""""""""""GPL v3""""""""""""
Bioinformatics miscCITE-seq-CountCITE-seq-Countrackham irma bianca snowy1.4.3MIT
Bioinformatics miscCleaveLand4CleaveLand4bianca irma milou rackham snowy4.3GPL v3
Bioinformatics miscCNV-seqCNV-seqmilou20140812Misc open source
Bioinformatics miscCNVnatorCNVnatorbianca irma milou rackham snowy0.3.2, 0.3.3Creative Commons Public License
Bioinformatics miscCONCOCTCONCOCTrackham irma bianca snowy0.4.0, 0.4.0~, 0.5.0, 1.1.0FreeBSD
Bioinformatics miscCorsetCorsetbianca irma milou rackham snowy1.04, 1.07GPL v3
Bioinformatics miscDATESDATESrackham irma bianca snowy753None
Bioinformatics miscDosageConvertorDosageConvertorrackham irma bianca snowy1.0.4custom
Bioinformatics miscDWGSIMDWGSIMbianca irma milou rackham snowy0.1.11-6e9a361GPL v2
Bioinformatics miscEnsEMBL-APIEnsEMBL-APIrackham irma bianca snowy87, 94Apache License 2.0
Bioinformatics miscEPACTSEPACTSbianca miarka milou rackham snowy3.2.6, 3.2.6_milou, 3.3.0-a5209dbGPL v3
Bioinformatics miscExpansionHunterNonebianca irma rackham snowy2.5.3, 2.5.5None
Bioinformatics miscFastANIFastANIrackham irma bianca snowy1.2Apache 2.0
Bioinformatics miscfastpfastprackham irma bianca snowy0.20.0, 0.23.1MIT
Bioinformatics miscfcGENEfcGENErackham irma bianca snowy1.0.7GPL
Bioinformatics miscFiltlongFiltlongrackham irma bianca snowy0.2.0GPL v3
Bioinformatics miscFLASHFLASHbianca irma milou rackham snowy1.2.11GPL
Bioinformatics miscFusionCatcherNonerackham irma bianca snowy1.00, 1.10, 1.33GPLv3
Bioinformatics miscGEMGEMbianca irma rackham snowy2.7, 3.4"""""""""""Research only"""""""""""
Bioinformatics miscGEMMAGEMMArackham irma bianca snowy0.98.1GPL3
Bioinformatics miscGeneMarkGeneMarkrackham miarka bianca snowy2.3-es, 4.32-es, 4.33-es, 4.33-es_Perl5.24.1, 4.38-es, 4.57-es, 4.62-es, 4.68-esCustom
Bioinformatics miscGenepopGenepoprackham irma bianca snowy4.7CeCILL (GPL compatible)
Bioinformatics miscGenomeToolsGenomeToolsrackham irma bianca snowy1.5.8, 1.5.9, 1.6.1ISC
Bioinformatics miscGERP++GERP++bianca irma milou rackham snowy20110522GPL v3+
Bioinformatics miscGTOOLGTOOLmilou0.7.5"""""""""""As is" open source""""""""""
Bioinformatics miscHiCUPHiCUPrackham irma bianca snowy0.7.2GPL v3 or later
Bioinformatics mischtslibhtslibbianca irma rackham snowy1.8, 1.10"MIT/Expat and modified 3-clause BSD"
Bioinformatics miscIGVNonebianca irma rackham snowy2.3.17, 2.3.40, 2.3.92, 2.4.2, 2.8.13None
Bioinformatics miscIGVtoolsIGVtoolsbianca irma milou rackham snowy2.3.17, 2.3.40, 2.3.91, 2.3.98LGPL
Bioinformatics miscIMIMbianca irma milou rackham snowy20091217None
Bioinformatics miscIMa2pIMa2pmilou2015-08-09GPL v3
Bioinformatics miscIMPUTE2IMPUTE2bianca irma milou rackham snowy2.3.2"""""""""""As is" open source""""""""""
Bioinformatics miscITSxNoneNone1.0.9, 1.0.11, 1.1-beta, .itsxNone
Bioinformatics miscKATKATbianca irma rackham snowy2.0.4, 2.0.6, 2.0.8, 2.1.1, 2.3.4, 2.4.2, 2.4.2_py3.5.0, 2.4.2_py3.7.2GPL v3
Bioinformatics miscKmerGenieNoneNone1.6741, 1.7039, .kmergenieNone
Bioinformatics miscKrakenNonerackham irma bianca miarka snowy0.10.5-beta, 1.0, 1.1-352e780, 1.1.1, 1.1.1-20210927-375654fGPL v3
Bioinformatics miscKraken2Kraken2bianca irma rackham snowy2.0.8-betaMIT
Bioinformatics miscKronaKronabianca miarka milou rackham snowy2.7Custom open source
Bioinformatics miscLASERLASERbianca irma rackham snowy2.01, 2.02, 2.04GPL v3
Bioinformatics miscLatentStrainAnalysisLatentStrainAnalysismilou20160322MIT
Bioinformatics miscLDhelmetLDhelmetbianca irma rackham snowy1.7, 1.9, 1.10GPL v3
Bioinformatics miscLTR_FinderLTR_Finderbianca irma rackham snowy1.0.5, 1.0.7Free for non-commercial use
Bioinformatics miscLUMPYLUMPYrackham irma bianca snowy0.2.12, 0.2.13, 0.2.13-97cf18c, 0.2.13-213a417, 0.3.0MIT
Bioinformatics miscMACEMACEmilou1.2None
Bioinformatics miscMACSMACSbianca miarka rackham snowy2.1.0, 2.1.2, 2.2.6, 3.0.0a6"""""BSD 3-clause"""""
Bioinformatics miscMAGeCKMAGeCKrackham irma bianca snowy0.5.6, 0.5.9.4BSD 3-clause
Bioinformatics miscmedakamedakarackham irma bianca snowy0.7.1MPL 2.0
Bioinformatics miscMeerkatMeerkatmilou0.189Misc non-commercial open source
Bioinformatics miscMetaBatMetaBatbianca irma milou rackham snowy0.26.3, 2.12.1BSD-like
Bioinformatics miscMETALMETALrackham miarka bianca snowy2011-03-25open source
Bioinformatics miscMetaSVMetaSVbianca irma milou rackham snowy0.5.4BSD 2-clause
Bioinformatics miscMetaxa2Metaxa2rackham irma bianca snowy2.1.3, 2.2GPL v3
Bioinformatics miscMethPipeMethPiperackham irma bianca snowy3.4.3, 4.1.1GPL v3+
Bioinformatics miscMethylDackelMethylDackelrackham irma bianca snowy0.2.1, 0.5.1MIT
Bioinformatics miscMothurMothurbianca miarka rackham snowy1.40.5, 1.41.0GPL v3
Bioinformatics miscmsmsrackham irma bianca snowy20071014open source
Bioinformatics miscMultiQCMultiQCbianca irma rackham snowy0.6, 0.7, 0.8, 1.8, 1.9, 1.10MIT
Bioinformatics miscNanoCompNanoCompbianca irma rackham snowy1.9.2GPL v3
Bioinformatics miscNanoPlotNanoPlotrackham irma bianca snowy1.33.1GPL3 license
Bioinformatics miscNPStatNPStatrackham irma bianca snowy1GPL v3 license
Bioinformatics miscnsegnsegrackham irma bianca snowy1.0.1Public Domain
Bioinformatics miscont_fast5_apiont_fast5_apirackham irma bianca snowy3.1.6Mozilla Public License 2.0
Bioinformatics miscPCAngsdPCAngsdbianca miarka rackham snowy0.982""""""GPL v3""""""
Bioinformatics miscPennCNVPennCNVmilou20151014Public domain
Bioinformatics miscphantompeakqualtoolsphantompeakqualtoolsbianca miarka milou rackham snowy1.1MIT
Bioinformatics miscphaserphaserrackham irma bianca snowy20210423-5d4926dGNU
Bioinformatics miscpiperpiperbianca miarka rackham snowy1.5.1MIT
Bioinformatics miscpizzlypizzlyrackham irma bianca snowy0.37.3BSD 2-clause
Bioinformatics miscplink2plink2rackham irma bianca snowy2.00-alpha-2-20180704, 2.00-alpha-2-20190429, 2.00-alpha-2.3-20200124GPL v3
Bioinformatics miscPorechopPorechoprackham irma bianca snowy0.2.4GPL v3
Bioinformatics miscprodigalprodigalbianca irma milou rackham snowy2.6.3GPL v3
Bioinformatics miscPRSicePRSicerackham irma bianca snowy2.2.11.bGPL3
Bioinformatics miscpsmcpsmcrackham irma bianca snowy0.6.5-r67-e5f7df5MIT
Bioinformatics miscRECONRECONrackham irma bianca snowy1.08GPL
Bioinformatics miscRegScanRegScanrackham irma bianca snowy0.5None
Bioinformatics miscRepeatScoutRepeatScoutrackham irma bianca snowy1.0.5, 1.0.6open source
Bioinformatics miscschmutzischmutzirackham irma bianca snowy20160424, 20200706-597c6bcGPL v3
Bioinformatics miscSeqAnNonebianca irma rackham snowy1.4.2, 2.4.0None
Bioinformatics miscSeqKitSeqKitrackham irma bianca snowy0.15.0MIT license
Bioinformatics miscseqmonkseqmonkbianca irma milou rackham snowy0.21.0, 0.27.0, 0.32.1, 0.34.1, 1.36.0, 1.37.1GPL
Bioinformatics miscSMC++SMC++rackham irma bianca snowy1.15.2GPL v3
Bioinformatics miscsnippysnippyrackham miarka bianca snowy4.0, 4.0.5, 4.6.0GPL v2 license
Bioinformatics miscspacerangerspacerangerbianca irma rackham snowy1.0.0, 1.2.0other
Bioinformatics miscSweeDSweeDrackham irma bianca snowy4.0.0GPL v3 license
Bioinformatics misctabixtabixbianca miarka rackham snowy0.2.6MIT/Expat and modified 3-clause BSD
Bioinformatics miscTomboTomborackham irma bianca snowy1.5.1Mozilla Public License 2.0
Bioinformatics miscvawkvawkrackham irma bianca snowy0.0.1None
Bioinformatics miscvelocytovelocytorackham irma bianca snowy0.17.17BSD 2-clause
Bioinformatics phylogenyCONSELCONSELbianca irma milou rackham snowy0.20None
Bioinformatics phylogenyExaBayesExaBayesbianca irma rackham snowy1.5, 1.5-mpiGPL v3
Bioinformatics phylogenyFastMLFastMLbianca irma rackham snowy3.1, 3.11""""""""""""GPL v2+""""""""""""
Bioinformatics phylogenyFastTreeFastTreebianca irma milou rackham snowy2.1.8, 2.1.10Open source "as is"
Bioinformatics phylogenyHyPhyHyPhyrackham irma bianca snowy2.5.0, 2.5.0-mpiCustom open-source "as is"
Bioinformatics phylogenyMEGANNoneNone4.70.4, 5.1.5, 5.11.3, 6.3.5, 6.10.5, 6.20.17, .MEGANNone
Bioinformatics phylogenyraxmlNoneNone7.0.4, 7.2.7, 7.2.8, 7.3.0, 7.4.7, 8.0.20, 8.0.20-mpi, 8.2.0-gcc, 8.2.0-gcc-mpi, 8.2.0-icc, 8.2.0-icc-mpi, 8.2.0_gcc, 8.2.0_icc, 8.2.4-gcc, 8.2.4-gcc-mpi, 8.2.10-gcc, 8.2.10-gcc-mpi, 8.2.12-gcc, 8.2.12-gcc-mpi, .raxmlNone
Bioinformatics phylogenyRAxML-NGRAxML-NGbianca irma rackham snowy0.9.0-mpiGNU Affero GPL v3.0
Bioinformatics pipelinesfermikitfermikitbianca irma milou rackham snowy0.14-prerelease-96f7820, r178None
Bioinformatics pipelinesnf-corenf-corerackham irma bianca snowy1.12.1, 1.14, 2.1MIT License
Bioinformatics pipelinesnf-core-pipelinesnf-core-pipelinesrackham irma bianca snowylatestMIT License
Bioinformatics sw collections454-dataanalysis454-dataanalysisbianca irma milou rackham snowy2.3, 2.5.3, 2.6, 2.9None
Bioinformatics sw collections454-dataprocessing454-dataprocessingbianca irma milou rackham snowy2.3None
Bioinformatics sw collectionsARTARTrackham irma bianca snowy2016-06-05None
Bioinformatics sw collectionsBEDOPSBEDOPSrackham irma bianca snowy2.4.3, 2.4.28, 2.4.39GPL v2
Bioinformatics sw collectionsBEDToolsBEDToolsrackham irma bianca snowy2.21.0, 2.23.0, 2.25.0, 2.26.0, 2.27.1, 2.29.2MIT
Bioinformatics sw collectionsBioScopeBioScopebianca irma rackham snowy1.3.1Commercial
Bioinformatics sw collectionsCASAVACASAVAbianca miarka milou rackham snowy1.7.0, 1.8.2None
Bioinformatics sw collectionscellrangerNonebianca irma rackham snowy1.1.0, 1.3.0, 2.0.2, 2.2.0, 3.0.1, 4.0.0, 5.0.1, 6.0.2None
Bioinformatics sw collectionscellranger-ARCcellranger-ARCbianca irma rackham snowy1.0.0other
Bioinformatics sw collectionscellranger-DNAcellranger-DNAbianca irma rackham snowy1.1.0other
Bioinformatics sw collectionsGATKGATKbianca irma milou rackham snowy3.5.0, 3.6, 3.7, 3.8-0, 4.1.0.0, 4.1.1.0, 4.beta.5, build-20160727BSD Style
Bioinformatics sw collectionsGATK-QueueGATK-Queuebianca irma milou rackham snowy3.2.2, 3.6, 3.7, 3.8-0MIT + academic non-commercial
Bioinformatics sw collectionsGenomeSTRiPGenomeSTRiPbianca irma milou rackham snowy2.00.1650, 2.00.1685, 2.00.1710Custom "as is" redistributable
Bioinformatics sw collectionsHiSeqHiSeqbianca irma milou rackham snowy0.9None
Bioinformatics sw collectionslongrangerlongrangerbianca irma rackham snowy2.0.1, 2.1.1, 2.1.2, 2.1.4, 2.2.2other
Bioinformatics sw collectionsMEMEsuiteMEMEsuitebianca irma rackham snowy5.0.1BSD Style
Bioinformatics sw collectionssupernovaNonebianca irma rackham snowy1.0.0, 1.1.1, 1.1.4, 2.0.0, 2.1.1None
Chemistry/physicsABINITABINITbianca irma rackham snowy8.10.3GPL v3
Chemistry/physicsALPSALPSrackham irma bianca snowy2.3.0ALPS Library License version 1.0, ALPS Application License version 1.0
Chemistry/physicsDOCKDOCKbianca irma milou rackham3.7Free Academic License
Chemistry/physicsgromacsNoneNone4.5.5, 4.5.5_intel, 4.5.7.th, 4.6.3, 4.6.3_rackham, 4.6.5, 4.6.5.th, 4.6.5_rackham, 4.6.7.th, 4.6.7.th.dp, 5.0.4, 5.0.4_rackham, 5.0.7, 5.1.1, 5.1.1_rackham, 5.1.5, 2016.1, 2016.6, 2018.6, 2018.6.th, 2019.1, 2019.1.th, 2019.6.th, 2020-GPU, 2021.1.th, .4.6None
Chemistry/physicsmolcasmolcasmilou7.8.082, 8.0.15-03-08user group license
Chemistry/physicsmoldenmoldenrackham5.1None
Chemistry/physicsTmoleXTmoleXbianca irma milou rackham18proprietary group license
Compilers and build toolsantantbianca irma rackham snowy1.9.8, 1.10.0Apache 2.0
Compilers and build toolsautoconfautoconfbianca irma milou rackham snowy2.68, 2.69GPL
Compilers and build toolsautomakeautomakebianca irma rackham snowy1.14.1, 1.16.1GPL
Compilers and build toolsbinutilsbinutilsrackham bianca miarka snowy2.26, 2.28, 2.38, 2.39, 2.41GPL v3
Compilers and build toolsddtddtbianca irma rackham snowy3.2.1, 5.0.1, 6.0, 6.1, 7.0Commercial
Compilers and build toolsflexflexrackham irma bianca snowy2.6.4modified BSD
Compilers and build toolsfpcfpcrackham irma bianca snowy3.0.4GPL and LGPL
Compilers and build toolsgitgitrackham bianca miarka snowy2.5.0, 2.10.2, 2.16.1, 2.21.0, 2.24.0, 2.28.0, 2.34.1, 2.44.0LGPL 2.1
Compilers and build toolsgit-lfsgit-lfsrackham bianca miarka snowy2.9.1, 3.5.1MIT
Compilers and build toolsguileguilebianca irma milou rackham snowy1.8.8LGPL 2.1
Compilers and build toolsjavajavabianca bianca irma irma milou milou rackham rackham snowy snowyjdk, OpenJDK, OpenJDK_11.0.2, OpenJDK_12+32, OpenJDK_17+35, OracleJDK, OracleJDK_11.0.9, sun_jdk1.6.0_04, sun_jdk1.6.0_18, sun_jdk1.6.0_45, sun_jdk1.7.0_25, sun_jdk1.8.0_40, sun_jdk1.8.0_92, sun_jdk1.8.0_151Other
Compilers and build toolsjuliajuliarackham bianca miarka snowy0.3.0-prerelease+3043, 0.3.11, 0.4.6, 1.1.1, 1.4.2, 1.6.1, 1.9.3MIT
Compilers and build toolslibtoollibtoolbianca irma milou rackham snowy2.4.6GPL
Compilers and build toolsmavenmavenbianca irma rackham snowy3.6.0Apache 2.0
Compilers and build toolsmesonmesonrackham bianca miarka snowy0.49.2, 0.57.2, 1.1.0None
Compilers and build toolsmonomonobianca irma milou rackham snowy3.12.0, 5.8.1.0, 5.10.1.27Custom open source
Compilers and build toolsninjaninjarackham irma bianca snowy1.9.0, 1.10.0Apache 2.0
Compilers and build toolspatchelfpatchelfbianca miarka milou rackham snowy0.1, 0.8GPL v3
Compilers and build toolsperlperlrackham bianca miarka snowy5.18.2, 5.18.4, 5.22.2, 5.24.0, 5.24.1, 5.26.2, 5.32.1GPL v1+ or Artistic License
Compilers and build toolsperl6perl6bianca irma rackham snowyrakudo-star-2017.04, rakudo-star-2019.03Artistic License 2.0
Compilers and build toolsperl_modulesperl_modulesrackham bianca miarka snowy5.18.4, 5.24.1, 5.26.2, 5.32.1GPL v1+ or Artistic License
Compilers and build toolspythonNoneNone2.5, 2.6, 2.6.1, 2.6.5, 2.6.6, 2.7, 2.7.1, 2.7.2, 2.7.4, 2.7.6, 2.7.9, 2.7.11, 2.7.15, 2.7i, 3.1, 3.1.2, 3.1.3, 3.2, 3.2.4, 3.3, 3.3.1, 3.4.3, 3.5.0, 3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5, 3.10.8, 3.11.8, 3.12.1, 3.12.7, python-rpath.tcl, python-set-LD_LIBRARY_PATH.tcl, python-set-LD_LIBRARY_PATH_new.tclNone
Compilers and build toolspython3NoneNone3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5, 3.11.4, 3.11.8, 3.12.1, 3.12.7, python3-rpath.tclNone
Compilers and build toolsrubyrubybianca irma rackham snowy2.4.1, 2.5.0, 2.6.2Ruby license
Compilers and build toolssubversionNonerackham irma bianca snowy1.9.3, 1.10.6Apache License Version 2.0
EngineeringmatlabNonebianca rackham snowy7.4, 7.8, 7.10, 7.13, 8.0, 8.1, .matlab, .matlab_ny, .matlab_ny~, R2014a, R2015a, R2015b, R2016a, R2017a, R2018a, R2018b, R2019a, R2020bNone
GeospatialCDOCDOrackham irma bianca snowy1.9.5, 1.9.7.1, 1.9.7.1-intel18.3GPL v2
GeospatialecCodesecCodesrackham irma bianca snowy2.13.1None
GeospatialFYBAFYBAbianca irma milou rackham snowy4.1.1Custom open-source "as is"
GeospatialGEOSGEOSrackham bianca miarka snowy3.5.0, 3.9.1-gcc9.3.0, 3.12.0-gcc12.3.0LGPL v2.1
GeospatialGOTMGOTMbianca irma rackham snowy5.3-221-gac7ec88dGPL v2
Geospatiallibgeotifflibgeotiffrackham bianca miarka snowy1.4.1, 1.4.3, 1.7.1Mixed
GeospatialMagicsMagicsrackham irma bianca snowy3.3.1, 3.3.1-intel18.3None
GeospatialNCONCObianca irma rackham snowy4.8.1, 4.9.2, 4.9.3BSD
GeospatialPROJNonesnowy rackham bianca irma6.3.2, 8.1.0MIT
GeospatialPROJ.4PROJ.4bianca irma rackham snowy4.9.2, 4.9.3, 4.9.3-intel18.3MIT
GeospatialQGISQGISrackham bianca miarka snowy3.4.12, 3.32.3GPL license
GeospatialSHAPELIBSHAPELIBrackham irma bianca snowy1.5.0None
LibrariesATLASATLASbianca irma milou rackham snowy3.10.3BSD-style
Librariesblasblasbianca irma milou rackham snowy3.6.0Open source
Librariesboostboostrackham bianca miarka snowy1.41.0_gcc9.3.0, 1.44.0, 1.45.0, 1.55.0, 1.55.0_gcc4.8.3, 1.58.0-gcc8.3.0, 1.59.0_gcc4.9.2, 1.59.0_intel15.3, 1.60.0_gcc5.3.0, 1.61.0_gcc5.3.0, 1.63.0_gcc6.3.0, 1.63.0_gcc6.3.0_mpi2.0.2, 1.63.0_intel17.1, 1.66.0, 1.66.0-gcc8.3.0, 1.70.0_gcc9.1.0, 1.70.0_gcc9.1.0_mpi3.1.3, 1.70.0_gcc9.3.0, 1.70.0_gcc9.3.0_mpi3.1.5, 1.70.0_intel18.3, 1.70.0_intel18.3_intelmpi18.3, 1.70.0_intel18.3_mpi3.1.3, 1.75.0-gcc9.3.0, 1.78.0_gcc11.2.0, 1.78.0_gcc11.2.0_mpi4.1.2, 1.79.0_gcc11.2.0_mpi4.1.2, 1.81.0-gcc10.3.0, 1.83.0-gcc12.3.0, 1.83.0-gcc12.3.0-mpi4.1.5Boost licence
Librariesbzip2bzip2rackham irma bianca miarka snowy1.0.6, 1.0.8Custom open-source "AS IS"
Librariescairocairorackham irma bianca snowy1.14.8, 1.14.12, 1.17.2, 1.17.4LGPL v2.1 or Mozilla Public License 1.1
Librariesdeal.IIdeal.IIbianca irma rackham snowy9.1.1-gcc, 9.1.1-intelGNU LGPL v2.1 or later
Librariesfftwfftwrackham irma bianca snowy3.3.8GPL
Librariesfreetypefreetyperackham bianca miarka snowy2.6, 2.7.1, 2.10.1, 2.12.1Freetype, GPL v2
Librariesgiflibgiflibbianca irma milou rackham snowy5.1.4Custom "as is"
Librariesglpkglpkbianca irma rackham snowy4.63, 4.65GPL
Librariesgslgslrackham bianca miarka snowy1.16, 2.1, 2.3, 2.5, 2.6, 2.7GPL
Librarieshdf4hdf4rackham irma bianca snowy4.2.11_gcc4.9.2, 4.2.14-gcc6.3.0None
Librarieshdf5hdf5rackham bianca miarka snowy1.8.16_gcc4.9.2, 1.8.16_gcc5.3.0, 1.8.18, 1.8.18_gcc6.3.0, 1.10.1, 1.10.5, 1.10.5-threadsafe-intel18.3, 1.10.9, 1.14.0HDF5 License
Librariesjemallocjemallocrackham bianca miarka snowy3.6.0, 5.0.1, 5.3.0Custom open-source
Librarieslibcurllibcurlrackham bianca miarka snowy7.45.0, 8.4.0None
Librarieslibharulibharubianca irma milou rackham snowy2.3.0ZLIB/LIBPNG License
Librarieslibwebplibwebprackham bianca miarka snowy1.0.3, 1.2.0, 1.3.0BSD 3-clause
Librarieslpsolvelpsolverackham irma bianca snowy5.5.2.9LGPL v2
Librariesnetcdfnetcdfbianca irma rackham snowy4.7.1, 4.7.1-intel18.3Custom open source "as is"
LibrariesNLoptNLoptrackham irma bianca snowy2.6.1MIT
Librariesopenblasopenblasrackham bianca miarka snowy0.2.14a, 0.2.19, 0.2.19-singlethread, 0.2.20, 0.2.20-openmp, 0.2.20-singlethread, 0.3.21, 0.3.26BSD 3-clause
Librariespcrepcrebianca irma milou rackham snowy8.40BSD 3-clause
LibrariesPLplotPLplotrackham irma bianca snowy5.15.0, 5.15.0-old_wxWidgetsLGPL v2
LibrariesPopplerPopplerrackham bianca miarka snowy0.43.0, 0.54.0, 0.75.0, 23.02.0, 23.09.0GPL v2
Librariesprotobufprotobufrackham bianca miarka snowy3.11.4, 24.3-gcc12.3.0Google open-source AS IS
Librariespslibpslibrackham irma bianca snowy0.4.6GPL and LGPL
Librariesslurm-drmaaslurm-drmaarackham bianca miarka snowy1.1.2-slurm19.05.8, 1.1.4-slurm23.02.5GPL-3.0
LibrariessparsehashNonerackham irma bianca miarka snowy2.0.2, 2.0.3, 2.0.4BSD 3-clause
LibrariesszipNonemilou snowy2.1.1, 2.1_gcc4.9.2, 2.1_gcc5.3.0None
Librariestbbtbbmilou4.4u1_gcc4.9.2, 4.4u1_intel15.3GPL v2
LibrariesUDUNITSUDUNITSbianca irma rackham snowy2.2.26Custom open source "as is"
LibrarieswxWidgetswxWidgetsrackham irma bianca snowy3.1.3-gtk2, 3.1.3-gtk3, 3.1.3-qt5wxWindows Library Licence 3.1, https://github.com/wxWidgets/wxWidgets/blob/master/docs/licence.txt
LibrariesYepppYepppbianca irma milou rackham snowy1.0.0Custom open source "as is"
Librarieszlibzlibrackham bianca miarka snowy1.2.8, 1.2.11, 1.2.13, 1.3None
MiscABSOLUTEABSOLUTErackham bianca miarka snowy2023-6c98496BSD 3
MiscAdapterRemovalAdapterRemovalrackham bianca miarka snowy2.3.1, 2.3.4GPLv3
MiscAFNIAFNIrackham bianca miarka snowy24.3.08None
MiscAGATAGATrackham bianca miarka snowy1.0.0, 1.3.2GPL v3
MiscalleleCountalleleCountrackham bianca miarka snowy4.2.1AGPL v3
MiscANGSDANGSDrackham bianca miarka snowy0.700, 0.902, 0.915, 0.940-stableGPL v2
Miscany2fastaany2fastarackham bianca miarka snowy0.4.2GPL v3.0
MiscArlequinArlequinrackham irma bianca miarka snowy3.5.2.2None
MiscARPIPARPIPrackham bianca miarka snowy2023.10.02-ee32c10None
Miscbamtoolsbamtoolsrackham bianca miarka snowy2.5.2MIT
MiscbamUtilbamUtilrackham bianca miarka snowy1.0.15GPL v3
Miscbcftoolsbcftoolsrackham bianca miarka snowy1.2, 1.19None
MiscBeagleBeaglerackham bianca miarka snowy4.1GPL v3
Miscbgenbgenrackham bianca miarka snowy1.1.6Boost Software License v1.0
MiscBioBakeryBioBakeryrackham bianca miarka snowy3.0, 3.1, 3.8MIT license
MiscBioKITBioKITrackham irma bianca miarka snowy0.0.9Other-d
MiscBioPerlNoneNone1.6.1, 1.6.1_PERL5.10.1, 1.6.1_PERL5.12.3, 1.6.922, 1.6.923_Perl5.18.4, 1.7.8-perl5.32.1, .BioPerlNone
MiscBUSCOBUSCOrackham bianca miarka snowy4.1.4, 5.3.1, 5.5.0, 5.7.1MIT
MiscCellsnp-liteCellsnp-literackham irma bianca miarka snowy1.2.2Apache License 2-0
MiscCHEUICHEUIrackham bianca miarka snowy20230518-c1c9ab6GPL
MiscChromium-cellranger-ATACcellranger-ATACrackham bianca miarka snowy1.2.0, 2.0.0, 2.1.0None
MiscCitupCituprackham irma bianca miarka snowy0.1.0Other-d
MiscCRABSCRABSrackham irma bianca miarka snowy0.1.2MIT License-d
MiscCRISPRessoCRISPRessorackham bianca miarka snowy1.0.7, 2.3.1None
MiscDamageProfilerDamageProfilerrackham bianca miarka snowy1.1GPL v3
Miscdatamashdatamashrackham bianca miarka snowy1.8None
MiscDATESDATESrackham bianca miarka snowy4010None
Miscdds-clidds-clirackham bianca miarka snowy milou transitlatestOther
MiscdeepToolsdeepToolsrackham bianca miarka snowy3.5.5mixed open-source
MiscEAGLEEAGLErackham bianca miarka snowy1.1.3GPL v3
MiscFAN-CFAN-Crackham bianca miarka snowy0.9.26None
MiscFastANIFastANIrackham bianca miarka snowy1.33, 1.34Apache-2.0
MiscfastKfastKrackham bianca miarka snowy1.1.0AS IS
Miscfastpfastprackham bianca miarka snowy0.23.4MIT
Miscfgbiofgbiorackham bianca miarka snowy2.2.1-0MIT
MiscfineRADstructurefineRADstructurerackham bianca miarka snowy0.3.1Creative Commons 3.0
MiscFLAMESFLAMESrackham bianca miarka snowy20221109-774e16aGPL v3
MiscFLASHFLASHrackham bianca miarka snowy2.2.00GPL v3
Miscfreebayesfreebayesrackham bianca miarka snowy1.3.6, 1.3.8MIT
Miscfslfslrackham bianca miarka snowy6.0, 6.0.3None
Miscgffreadgffreadrackham bianca miarka snowy0.12.7MIT
MiscGLIMPSEGLIMPSErackham bianca miarka snowy1.1.1, 2.0.0MIT
Miscgrenedalfgrenedalfrackham bianca miarka snowy0.3.0, 0.5.1, 0.6.0GPL3
MiscGTDB-TkGTDB-Tkrackham bianca miarka snowy0.3.2, 1.5.0, 2.3.2, 2.4.0GPLv3
MiscGuppyGuppysnowy miarka bianca5.0.16-cpu, 5.0.16-gpu, 6.0.6-cpu, 6.0.6-gpu, 6.3.7-cpu, 6.3.7-gpu, 6.4.2-cpu, 6.4.2-gpu, 6.5.7-cpu, 6.5.7-gpuCustom
Mischallahallarackham bianca miarka snowy0.8.20MIT
MiscHiCExplorerHiCExplorerrackham bianca miarka snowy2.2-beta, 3.7.3GPL v3
Mischtslibhtslibrackham bianca miarka snowy1.2, 1.19None
MiscHUMAnNHUMAnNrackham bianca miarka snowy3.6, 3.8MIT license
MiscIGVtoolsIGVtoolsrackham bianca miarka snowy2.8.13, 2.16.0MIT
MiscJuiceboxJuiceboxrackham irma bianca miarka snowy1.11.08MIT License
MiscKINGKINGrackham bianca miarka snowy2.3.2None
Misckingfisherkingfisherrackham bianca miarka snowy0.3.0GPL v3
MiscKMCKMCrackham bianca miarka snowy3.2.2GPL v3
MiscKneadDataKneadDatarackham bianca miarka snowy0.12.0MIT license
MiscKraken2Kraken2rackham bianca miarka snowy2.1.3-20231102-acc2248MIT
MiscKrakenUniqKrakenUniqrackham bianca miarka snowy0.6, 1.0.0, 1.0.1GPLv3, MIT
MiscKronaKronarackham bianca miarka snowy2.7.1, 2.8.1-20211222-d1479b3None
Miscldscldscrackham bianca miarka snowy1.0.0, 1.0.1, 2.0.1GPL v3
MiscMACSMACSrackham bianca miarka snowy1.4.1, 1.4.2, 3.0.0b1BSD 3-clause
MiscMAGeCK2MAGeCK2rackham irma bianca miarka snowy20211209-435eacdBSD3
Miscmashmashrackham bianca miarka snowy2.0, 2.3, 2.3-20210519-41ddc61Custom open-source
Miscmerylmerylrackham bianca miarka snowy1.4.1Mixed open source
MiscMETALMETALrackham bianca miarka snowy2020-05-05open source
MiscMetaXcanMetaXcanrackham irma bianca miarka snowy20210925-cfc9e36MIT License-d
Miscmetilenemetilenerackham bianca miarka snowy0.2-8GPL v2
Miscmgatkmgatkrackham bianca miarka snowy0.7.0MIT
MiscMinimac4Minimac4rackham bianca miarka snowy4.1.6GPL-3.0
Miscmirdeep2mirdeep2rackham bianca miarka snowy2.0.1.3-20220221-c6440e2GPL v3
MiscmiRDP2miRDP2rackham bianca miarka snowy1.1.4, 1.1.5GPL v3
Miscmodkitmodkitrackham bianca miarka snowy0.2.5-rc2, 0.3.1, 0.3.3, 0.4.1GPL v3
Miscmosdepthmosdepthrackham bianca miarka snowy0.3.3MIT
MiscMothurMothurrackham bianca miarka snowy1.25.1, 1.30.1, 1.33.3, 1.36.1, 1.38.1, 1.48.0None
MiscMRIcroGLMRIcroGLrackham bianca miarka snowy1.2.20220720BSD 2
Miscmsisensor-promsisensor-prorackham bianca miarka snowy1.2.0None
MiscMultiQCMultiQCrackham bianca miarka snowy1.22.2MIT
MiscNCBI-datasetsNCBI-datasetsrackham bianca miarka snowy15.29.0, 16.35.0Public Domain, US Government
Miscont_h5_validatoront_h5_validatorrackham bianca miarka snowy2.0.1None
Miscpicardpicardrackham bianca miarka snowy3.1.1MIT
MiscPiscesPiscesrackham bianca miarka snowy5.3.0.0GPL 3
Miscplink2plink2rackham bianca miarka snowy2.00-alpha-3.7-20221024, 2.00-alpha-5-20230923GPL v3
Miscpreseqpreseqrackham bianca miarka snowy3.2GPL v3
MiscPROJPROJrackham bianca miarka snowy9.1.1MIT
MiscPyClone-VIPyClone-VIrackham irma bianca miarka snowy20210623-6607ea1GPL V3-d
Miscpyega3pyega3rackham bianca miarka snowy5.1.0None
MiscRaremetalRaremetalrackham bianca miarka snowy4.15.1None
Miscregenieregenierackham bianca miarka snowy3.4.1MIT
Miscremovethisremovethisrackham irma bianca miarka snowy1None
MiscRepeatModelerRepeatModelerrackham bianca miarka snowy2.0.4None
MiscRGTRGTrackham bianca miarka snowy1.0.2GPL v3
Miscrtgcorertgcorerackham bianca miarka snowy3.12.1Custom
Miscrtgtoolsrtgtoolsrackham bianca miarka snowy3.12.1BSD 2-clause
Miscsamtoolssamtoolsrackham bianca miarka snowy1.2, 1.19None
Miscschmutzischmutzirackham bianca miarka snowy1.5.7GPL v3
Miscscikit-allelscikit-allelrackham bianca miarka snowy1.3.5None
Miscscvi-toolsscvi-toolsrackham bianca miarka snowy1.0.4BSD-3
MiscSeqKitSeqKitrackham bianca miarka snowy2.4.0MIT
MiscSeqLibSeqLibrackham bianca miarka snowy1.2.0Apache 2.0
Miscseqstatsseqstatsrackham bianca miarka snowy20170404-e6f482fMIT
MiscSHAPEITSHAPEITrackham bianca miarka snowyv4.2.2, v5.1.1None
Miscslivarslivarrackham irma bianca miarka snowy0.2.7MIT License-d
MiscSMC++SMC++rackham bianca miarka snowy1.15.4, 1.15.5.dev12+g8bdecdfGPL v3
Miscsmudgeplotsmudgeplotrackham bianca miarka snowy0.3, 0.4.0Apache-2.0
Miscsracatsracatrackham bianca miarka snowy20210916-b896745BSD 3-Clause
Miscsratoolssratoolsrackham bianca miarka snowy3.0.7custom
MiscStacksStacksrackham bianca miarka snowy2.66GPL v3
MiscSVDBSVDBrackham bianca miarka snowy2.8.1MIT
Misctruvaritruvarirackham bianca miarka snowy4.3.1MIT
Miscvcflibvcflibrackham bianca miarka snowy1.0.9MIT
Miscvcfstatsvcfstatsrackham bianca miarka snowy0.4.2Other
Miscvepveprackham bianca miarka snowy110.1, 111.0, 113.0Apache-2.0
MiscXP-CLRXP-CLRrackham bianca miarka snowy1.1.2MIT
Misc applicationsagrepagrepbianca irma milou rackham snowy3.41.5ISC Open Source
Misc applicationsawscliawsclirackham bianca miarka snowy1.11.140, 1.16.225, 1.29.52Apache 2.0
Misc applicationscircoscircosrackham irma bianca snowy0.69-9GPL
Misc applicationscmakecmakerackham irma bianca snowy3.5.1, 3.7.2, 3.13.2, 3.17.3BSD 3-clause
Misc applicationscowsaycowsaybianca irma milou rackham snowy3.03Artistic License
Misc applicationsCromwellCromwellrackham bianca miarka snowy71, 86BSD 3-Clause
Misc applicationsdoxygendoxygenbianca irma milou rackham snowy1.8.11Gnu GPL 2
Misc applicationsemacsemacsrackham bianca miarka snowy25.1, 25.2, 27.2, 28.2GNU
Misc applicationsgawkgawkbianca irma milou rackham snowy4.1.4GPL v3
Misc applicationsgdlgdlbianca irma rackham snowy1.0.0-rc.1GPL v2
Misc applicationsGhostPDLGhostPDLrackham irma bianca snowy9.53.3Gnu GPL Affero v3
Misc applicationsgnuplotNoneNone4.4.3, 4.6.5, 5.0.7, 5.2.7, .gnuplotNone
Misc applicationsGraphvizGraphvizrackham bianca miarka snowy2.40.1, 9.0.0Common Public License Version 1.0
Misc applicationsgroffgroffbianca irma milou rackham1.22.3GPL
Misc applicationsh5utilsh5utilsbianca irma milou rackham snowy1.12.1GPL v2+
Misc applicationshaskell-stackNonebianca irma milou rackham snowy1.0.4.3, 1.4.0, 1.7.1None
Misc applicationsjqjqrackham miarka bianca snowy1.6MIT
Misc applicationsMariaDBMariaDBbianca irma milou rackham snowy10.2.11GPL v2
Misc applicationsmbuffermbufferbianca irma milou rackham snowy20151002GPL v3
Misc applicationsmclmclrackham bianca miarka snowy14-137, 22-282GPL v3
Misc applicationsmetaWRAPmetaWRAPrackham irma bianca snowy1.3.2MIT
Misc applicationsopenbabelopenbabelrackham bianca miarka snowy3.1.1-gcc9.3.0, 3.1.1-gcc12.3.0GPL v2
Misc applicationsOpenBUGSOpenBUGSrackham irma bianca snowy3.2.3GPL v2
Misc applicationsp7zipp7zipbianca irma milou rackham snowy16.02LGPLv2
Misc applicationspandocpandocrackham irma bianca snowy1.16.0.2, 2.2.3.2, 2.10.1GPL 2
Misc applicationsPostgreSQLPostgreSQLbianca irma milou rackham snowy10.3PostgreSQL Licence
Misc applicationspovraypovraybianca irma rackham3.7AGPL v3
Misc applicationsROOTROOTrackham irma bianca snowy6.04.08, 6.06.08, 6.20.04LGPL 2.1
Misc applicationsSAIGESAIGErackham irma bianca snowy0.42.1GPL v3
Misc applicationsSConsNoneNone2.5.0, .sconsNone
Misc applicationssqlitesqliterackham bianca miarka snowy3.8.5, 3.11.1, 3.16.2, 3.24.0, 3.34.0, 3.45.0public domain
Misc applicationsswigswigrackham bianca miarka snowy3.0.7, 3.0.12, 4.1.1GPL with other advice
Misc applicationstexinfotexinforackham bianca miarka snowy6.0, 6.5, 6.6, 6.8, 7.1GPL v3+
Misc applicationstexlivetexliverackham bianca miarka snowy2015, 2016, 2018, 2019, 2021, 2023-08-14, 2024-04-24None
Misc applicationstinyutilsNoneNone1.1, 1.2, 1.3, 1.4, .tinyutils-1.1None
Misc applicationstmuxtmuxrackham bianca miarka snowy2.5, 3.1b, 3.3aCustom open-source AS IS
Misc applicationsvimNonerackham irma bianca snowy8.0-1360, 8.1-1053, 8.2.3701VIM License
Misc applicationsxzxzrackham bianca miarka snowy5.2.2, 5.2.6, 5.4.5Mixed open-source
Parallelgnuparallelgnuparallelrackham bianca miarka snowy20140222, 20150522, 20170122, 20180822, 20230422GPL v3+
PhylogenyDsuiteDsuiterackham bianca miarka snowy0.5-r57open-source
PhylogenyFastMEFastMErackham bianca miarka snowy2.1.6.2, 2.1.6.4GPL
PhylogenyFastTreeFastTreerackham bianca miarka snowy2.1.11GPL v2+
PhylogenyG-NomixG-Nomixrackham bianca miarka snowy2022-09-18-de952a2free for academic use
PhylogenyHyPhyHyPhyrackham bianca miarka snowy2.5.51-mpiNone
Phylogenyiqtreeiqtreerackham bianca miarka snowy2.2.2.6-omp-mpiGPL v2
PhylogenyKINKINrackham bianca miarka snowy3.1.3, 3.1.3-20230612-76dc469GPL v3
PhylogenyMetaPhlAn4MetaPhlAn4rackham bianca miarka snowy4.0MIT license
Phylogenypamlpamlrackham bianca miarka snowy4.10.7other
PhylogenypathPhynderNonerackham irma bianca snowy1.a-20221011-a407a97, 2020-12-19-b8532c0MIT
Phylogenyphylipphyliprackham bianca miarka snowy3.697None
PhylogenyPhyloPhlAnPhyloPhlAnrackham bianca miarka snowy3.0.3MIT license
Phylogenyphyxphyxrackham bianca miarka snowy1.3GPL-3.0
Phylogenyread2treeread2treerackham bianca miarka snowy0.1.5-20240117-ff2d167MIT
Phylogenysnphylosnphylorackham bianca miarka snowy20180901None
Pipelines3D-DNA3D-DNArackham irma bianca miarka snowy20190801-529ccf4MIT License
Pipelinesbiomodalbiomodalrackham bianca miarka snowy1.0.2commercial
Pipelinescactus_ataccactus_atacrackham bianca miarka snowy1.0.0MIT
Pipelinescutadaptcutadaptrackham bianca miarka snowy4.5, 4.8MIT
PipelinesGenoPredPipeGenoPredPiperackham bianca miarka snowy20211104-02777ce, 20221121-e3caf6bGNU GPL v3
Pipelineshappyhappyrackham bianca miarka snowy1.0.0MIT
PipelinesJuicerJuicerrackham bianca miarka snowy1.6, 2.0MIT License
PipelinesJuicer_toolsJuicer_toolsrackham irma bianca miarka snowy1.6, 1.22.01MIT License
PipelinesMPRASuiteMPRASuiterackham bianca miarka snowy1.0.3MIT
PipelinesnextNEOpinextNEOpirackham bianca miarka snowy1.4.0https://github.com/icbi-lab/nextNEOpi/blob/master/LICENSE
Pipelinesnf-corenf-corerackham bianca miarka snowy2.4.1, 2.6, latestMIT License
PipelinesPanarooPanaroorackham bianca miarka snowy1.2.10, 1.3.2MIT
PipelinesParseBiosciences-PipelineParseBiosciences-Pipelinerackham bianca miarka snowy1.4.0Custom
Pipelinesqiime2qiime2rackham bianca miarka snowy2024.1, 2024.2BSD 3-Clause
PipelinesSALSASALSArackham irma bianca miarka snowy20220408-ed76685MIT License
PipelinesSeuratSeuratrackham bianca miarka snowy5.0.2, 5.1.0MIT
PipelinesSMRTSMRTrackham bianca miarka snowy13.0.0.207600None
Pipelinessnakemakesnakemakerackham bianca miarka snowy5.32.2, 8.20.1MIT License
Pipelinesstar-fusionstar-fusionrackham bianca miarka snowy1.9.1, 1.10.1BSD 3-clause
PipelinesViWrapViWraprackham bianca miarka snowy1.3.0GPL v3
StatisticsnonmemNonerackham irma bianca snowy7.3.0, 7.4.3, 7.4.4, 7.5.0Commercial
StatisticsPsNPsNrackham irma bianca snowy5.0.0GPL 2+
StatisticsRNoneNone2.8, 2.8.1, 2.10, 2.10.1, 2.11.1, 2.12.1, 2.12.2, 2.13.0, 2.14.0, 2.15.0, 2.15.1, 2.15.2, 3.0.1, 3.0.2, 3.1.0, 3.2.2, 3.2.3, 3.3.0, 3.3.1, 3.3.2, 3.3.2_rackham, 3.4.0, 3.4.3, 3.5.0, 3.5.2, 3.6.0, 3.6.1, 4.0.0, 4.0.4, 4.1.1, 4.2.1, 4.3.1, 4.3.2, 4.4.1, .R, R.tclNone
StatisticsR_packagesR_packagesrackham bianca miarka snowy3.3.0, 3.3.1, 3.3.2, 3.4.0, 3.4.3, 3.5.0, 3.5.2, 3.6.0, 3.6.1, 4.0.0, 4.0.4, 4.1.1, 4.3.1Various
StatisticsRStudioRStudiorackham bianca miarka snowy1.0.136, 1.0.143, 1.0.153, 1.1.423, 1.1.463, 1.4.1106, 2022.02.0-443, 2022.02.3-492, 2022.07.1-554, 2023.06.0-421, 2023.06.2-561, 2023.12.1-402, 2024.04.2-764Custom
Sw_collectionsbbmapbbmaprackham bianca miarka snowy39.06, 39.08Public Domain
Sw_collectionsBEDOPSBEDOPSrackham bianca miarka snowy2.4.41GPL v2
Sw_collectionsBEDToolsBEDToolsrackham bianca miarka snowy2.20.1, 2.31.1MIT
Sw_collectionscellbendercellbenderrackham bianca miarka snowy0.3.0BSD-3
Sw_collectionsChromium-cellrangercellrangerrackham bianca miarka snowy4.0.0, 5.0.1, 6.0.2, 6.1.2, 7.0.0, 7.0.1, 7.1.0, 8.0.1None
Sw_collectionsChromium-cellranger-ARCcellranger-ARCrackham bianca miarka snowy1.0.0, 2.0.2https://support.10xgenomics.com/docs/license
Sw_collectionsDRAGENDRAGENrackham bianca miarka snowy4.1.5None
Sw_collectionsGATKGATKrackham bianca miarka snowy1.0.4105, 1.0.5365, 1.0.5909, 1.2.12, 1.4.5, 1.4.21, 1.5.11, 1.5.21, 2.1.13, 2.3.6, 2.5.2, 2.7.2, 2.8.1, 3.1.1, 3.2.0, 3.2.2, 3.3.0, 3.4-46, 3.4.0, 3.4.46, 4.0.8.0, 4.1.4.1, 4.3.0.0None
Sw_collectionsMEMEsuiteMEMEsuiterackham bianca miarka snowy4.11.1, 4.11.2_1, 5.1.1, 5.5.1University of California open-source AS IS
Sw_collectionsSpeedPPISpeedPPIrackham bianca miarka snowy2023.07.11-37d0a03None
Uncategorizedalphafoldalphafoldrackham bianca miarka snowy2.3.1None
Uncategorizedalphafold_datasetalphafold_datasetrackham bianca miarka snowy2.0.0, 2.1.1, 2.3.1None
UncategorizedAmberNonesnowy16-GPU, 18-GPUNone
Uncategorizedamber_previousNoneNoneamber.tcl, ambertools18_intel_python2None
UncategorizedAmpliconNoiseNonebianca miarka rackham snowy1.27None
UncategorizedAnnotSVNonebianca irma rackham snowy1.1.1None
Uncategorizedannovar_dataNonebianca miarka rackham snowy2019.10.21, 2021.05.18None
UncategorizedAnsysAnsysrackham bianca miarka snowy19.1, 19.5, 2020R1, 2023R2commercial software
UncategorizedaprNonerackham irma bianca snowy1.7.0Apache License 2.0
Uncategorizedapr-utilNonerackham irma bianca snowy1.6.1Apache License Version 2.0
Uncategorizedarchspecarchspecrackham bianca miarka snowy0.2.4Apache 2.0, MIT
Uncategorizedaria2aria2rackham bianca miarka snowy1.36OpenSSL
UncategorizedArmadilloNonerackham irma bianca snowy7.900.0, 9.700.2Apache License V2.0
Uncategorizedascpascprackham bianca miarka snowy4.2.0.183804, 4.4.3.891None
UncategorizedathenaNonebianca miarka rackham snowy1.1None
UncategorizedBackSPINNonerackham irma bianca snowy20171211_2fbcf5dBSD 2-Clause
UncategorizedBAM-matcherNoneNone20160611, .bammatcher, .bammatcher~None
UncategorizedBandageNonemilou0.8.0None
Uncategorizedbazelbazelrackham bianca miarka snowy7.0.0-pre.20230917.3None
Uncategorizedbcftools-MoChaNonerackham irma bianca snowy1.9-20191129, 1.11-20210315, 2019-11-29None
UncategorizedBclConverterNonebianca irma milou rackham snowy1.7.1None
UncategorizedBioBakery_dataBioBakery_datarackham bianca miarka snowy3.0-20210423, 3.1-20231102None
UncategorizedBisSNPNoneNone0.82.2, .bissnpNone
UncategorizedBLAKE2BLAKE2rackham bianca miarka snowy20230212-ed1974eCC0-1.0 or OpenSSL or Apache Public Licence 2.0
UncategorizedBrackenNonerackham irma bianca snowy2.5GPL v3
Uncategorizedbubblewrapbubblewraprackham irma bianca miarka snowy0.6.2LGPL v2+
UncategorizedBUSCO_dataBUSCO_datarackham bianca miarka snowylatestNone
UncategorizedCAMMiQNonerackham irma bianca miarka snowy20211015-6142150MIT License
UncategorizedCAP3Nonemilou08-06-13, .CAP3, CAP3None
Uncategorizedcapnprotocapnprotorackham bianca miarka snowy1.0.2MIT
UncategorizedCATNonerackham irma bianca snowy20190926-e25443Apache License 2.0
Uncategorizedcellranger-ARC-dataNonebianca irma rackham snowy2020-ANone
Uncategorizedcellranger-ATAC-dataNonebianca irma rackham snowy1.2.0, 2.0.0None
Uncategorizedcellranger-datacellranger-datarackham bianca miarka snowy1.1.0, 1.2.0, 3.0.0, 2020-A, 2024-ANone
Uncategorizedcellranger-DNA-dataNonebianca irma rackham snowy1.0.0None
Uncategorizedcellranger-VDJ-datacellranger-VDJ-datarackham bianca miarka snowy4.0.0, 5.0.0, 7.1.0None
Uncategorizedchain_fileschain_filesrackham bianca miarka snowy20230825Open access
UncategorizedChEMBLChEMBLrackham bianca miarka snowy22.1, 33Creative Commons Attribution-ShareAlike 3.0 Unported license
UncategorizedChimeraSlayerNonebianca miarka rackham snowy20110519None
UncategorizedChromium-cellranger-DNANonerackham irma bianca snowy1.1.0https://support.10xgenomics.com/docs/license
UncategorizedChromium-longrangerNonerackham irma bianca snowy2.2.2Misc non-commercial
UncategorizedChromium-spacerangerNonerackham irma bianca snowy1.0.0None
UncategorizedcirclatorNonerackham irma bianca snowy1.5.5GNU General Public License v3.0
Uncategorizedclapackclapackrackham bianca miarka snowy3.2.1Public domain
UncategorizedclearcutNonebianca irma rackham snowy1.0.9None
UncategorizedClonalFrameMLNonebianca irma milou rackham snowy1.11-4f13f23None
UncategorizedcondaNonerackham irma bianca snowylatest© Copyright 2017, Anaconda, Inc. Revision 6be5194b.
UncategorizedcoreutilsNonebianca irma milou rackham snowy8.25, 8.27None
UncategorizedCPLEXOptimizationStudioNonerackham irma bianca miarka snowy12.9.0, 20.1IBM Academic
UncategorizedCROPNonerackham irma bianca miarka snowy1.33Other-d
UncategorizedCS-RosettaNonerackham irma bianca snowy1.01_Rosetta_3.7None
UncategorizedCST_StudioCST_Studiorackham bianca miarka snowy2023.0None
UncategorizedCTAT_RESOURCE_LIBNonebianca miarka rackham snowy2017-11, 2018-02, 2019-08None
Uncategorizedcudacudarackham bianca miarka snowy11.2.1, 11.7.0, 11.8.0, 12.0.0, 12.1.0, 12.1.1, 12.2.0, 12.2.2EULA
UncategorizedcuDNNcuDNNrackham bianca miarka snowy8.1.0.77, 8.4.1.50, 8.6.0.163, 8.7.0.84, 8.8.0.121, 8.9.2.26SLA
UncategorizedCUT-RUNToolsNonerackham irma bianca miarka snowy2MIT
Uncategorizedcyrus-saslNonerackham irma bianca snowy2.1.27None
Uncategorizeddarsyncdarsyncrackham bianca miarka snowy20240208-7ff09d9None
UncategorizeddbCANdbCANrackham bianca miarka snowy11None
UncategorizedDBdeployerDBdeployerrackham irma bianca miarka snowylatestApache-2
Uncategorizedddt-perfNonemilou rackham7.0None
Uncategorizeddesmonddesmondrackham irma bianca miarka snowy2022-2gpl
UncategorizedDfamDfamrackham bianca miarka snowy3.7, 3.8Creative Commons Zero CC0
UncategorizedDfam-TE-ToolsNonerackham irma bianca snowy1.4CC0 1.0 Universal
UncategorizedDFTB+Nonerackham snowy19.1GNU LGPL-3
UncategorizeddlibNonebianca irma rackham snowy19.15None
UncategorizeddsspNoneNone2.0.4None
Uncategorizededlibedlibrackham bianca miarka snowy1.2.7MIT
UncategorizedEigenNonebianca irma rackham snowy3.3.4None
UncategorizedeLSANonebianca irma milou rackham20160907-febe2d7a57c8None
UncategorizedESPResSoNonerackham4.1.4GPLv3
UncategorizedestoutNonerackham irma bianca snowy20200417-d392e71MIT license
Uncategorizedfastq_screen_datafastq_screen_datarackham bianca miarka snowy20220330None
Uncategorizedfreesurferfreesurferrackham bianca miarka snowy6.0.0, 7.4.1Custom open-source
Uncategorizedfunannotate_datafunannotate_datarackham bianca miarka snowy1.8.17BSD-2
UncategorizedGAASNonerackham irma bianca miarka snowy1.2.0GPL v3
UncategorizedgamessNonebianca irma rackham20070324R1, 20070324R1-kalkyl, 20101001R1-kalkyl-intel-12.1-mvapich2, 20110818R1-kalkyl-intel12.0-mvapich2, 20170930None
UncategorizedgaussianNonemilou rackham snowy.g09, .gaussian, .gv, .gv~, g03, g03.d02, g03.e01, g09, g09.a02, g09.c01, g09.d01None
UncategorizedgaussviewNoneNone5.0.8, .gv, .gv~None
UncategorizedgccNoneNone4.2.3, 4.3.0, 4.3.2, 4.3.2-test, 4.3.2t, 4.4, 4.4.2, 4.4.3, 4.4.4, 4.5.0, 4.6.2, gcc, gcc4.3, gcc4.4, gcc4.5, gcc4.6.tclNone
UncategorizedgdbNonerackham irma bianca miarka snowy11.2Other-d
UncategorizedgdcNoneNone2.064.2_4.8, .gdcNone
UncategorizedgenomescopeNonerackham irma bianca snowy1.0.0_d2aefddApache-2.0
Uncategorizedgenomescope2.0Nonerackham irma bianca snowy1.0.0_5034ed4Apache-2.0
UncategorizedGetOrganelleDBGetOrganelleDBrackham bianca miarka snowy0.0.1, 0.0.2GPLv3
Uncategorizedgithub-cligithub-clirackham bianca miarka snowy2.63.2MIT
Uncategorizedglibglibrackham irma bianca miarka snowy2.72.1LGPL 2.1+
UncategorizedGlimmerHMMNonerackham irma bianca snowy3.0.4Open Source, licence file in cluded
UncategorizedGMPGMPrackham bianca miarka snowy6.3.0LGPL v3; GPL v2
Uncategorizedgogorackham bianca miarka snowy1.11.5, 1.20.3Go licence
UncategorizedGoogleCloudSDKGoogleCloudSDKrackham bianca miarka snowy447.0.0, 455.0.0None
UncategorizedgrocsvsNonerackham irma bianca snowy0.2.5MIT
Uncategorizedgromacs-plumedNonerackham2019.4.thNone
Uncategorizedgromacs-saxsNonerackham2020.04-9edbdbcNone
UncategorizedgstreamerNonemilou0.10.31None
UncategorizedGTDBGTDBrackham bianca miarka snowyR04-RS89, R202, R214.1, R220None
UncategorizedGurobiGurobirackham bianca miarka snowy9.5.1, 10.0.2, 11.0.3Academic License
Uncategorizedgzipgziprackham bianca miarka snowy1.12, 1.13GPL v3
UncategorizedHG002_Q100_T2T_assemblyHG002_Q100_T2T_assemblyrackham bianca miarka snowy0.7CC0
UncategorizedHHsuiteNoneNone2.0.16, 2.0.16~None
UncategorizedHLA-LANonerackham irma bianca snowy1.0.1-20201001-f636b62None
Uncategorizedhyperqueuehyperqueuerackham0.18.0MIT license
UncategorizediGenomesNonerackham irma bianca snowylatestNone
UncategorizedImageJImageJrackham bianca miarka snowy1.52j, 1.54gNone
UncategorizedIMAPNonerackham irma bianca snowy1.0None
UncategorizedImmcantationNonerackham irma bianca snowy4.0.0AGPL-3
UncategorizedimputorNonebianca miarka rackham snowy20180829None
UncategorizedInterOpNonebianca miarka rackham snowy1.1.4None
Uncategorizedjamovijamovirackham bianca miarka snowy2.3.21Mixed
UncategorizedjanssonNonerackham irma bianca snowy2.12MIT
UncategorizedJasPerNonebianca irma rackham snowy2.0.14None
UncategorizedJsonCppJsonCpprackham bianca miarka snowy1.9.5Dual Public Domain/MIT
UncategorizedkermitNonerackham irma bianca snowy1.0MIT
UncategorizedKraken_dataNonebianca irma rackham snowylatestNone
UncategorizedKrakenUniq_dataKrakenUniq_datarackham irma bianca miarka snowylatestGPLv3, MIT
UncategorizedKyotoTycoonNonebianca irma rackham snowystable-20170410None
UncategorizedLabel-StudioLabel-Studiorackham bianca miarka snowy1.10.1devApache-2.0
UncategorizedlammpsNonerackham29Oct2020_gcc_openmpi, 30Jul2016_intel_mkl, 31Mar2017_gcc_openmpiNone
Uncategorizedlibarchivelibarchiverackham bianca miarka snowy3.6.2New BSD
Uncategorizedlibb2libb2rackham bianca miarka snowy0.98.1CC0-1.0
Uncategorizedlibdeflatelibdeflaterackham bianca miarka snowy1.19MIT
UncategorizedlibicuNonerackham irma bianca miarka snowy5.2-4MIT and UCD and public domain
UncategorizedliblzmaNonemilou5.2.2None
Uncategorizedlibrdflibrdfrackham bianca miarka snowy1.0.17LGPL 2.1, GPL 2, Apache 2
UncategorizedlibrsvgNonerackham irma bianca snowy2.48.4LGPL 2.1
UncategorizedlibSBMLlibSBMLrackham bianca miarka snowy5.20.2LGPL v2.1
Uncategorizedlibtifflibtiffrackham bianca miarka snowy4.5.0A IS
Uncategorizedlibvipslibvipsrackham bianca miarka snowy8.15.2LGPL-2.1
UncategorizedlighterNonebianca irma rackham snowy1.1.1None
UncategorizedLLVMNonerackham irma bianca miarka snowy13.0.1Various
UncategorizedLoFreqNoneNone2.1.2, .lofreqNone
Uncategorizedlongranger-dataNonebianca irma rackham snowy2.0.0, 2.1.0None
UncategorizedLRSDAYNonerackham irma bianca snowy1.5.0MIT
UncategorizedLTR_retrieverNonerackham irma bianca snowy2.9.0GPL v3
Uncategorizedlz4Nonerackham irma bianca snowy1.9.2BSD 2-Clause & GPLv2
UncategorizedlzoNonerackham irma bianca snowy2.1GPL v2+
Uncategorizedm4Nonebianca irma milou rackham snowy1.4.17None
UncategorizedmathematicaNonerackham snowy11.2None
UncategorizedmauveNonebianca miarka milou rackham snowy2015-02-13None
UncategorizedmesosNonerackham irma bianca snowy1.9.0Apache-2.0
UncategorizedmetaMATENonerackham irma bianca miarka snowy20220327-3cdacd7Other-d
UncategorizedmetaSNVNonerackham irma bianca miarka snowy1.0.3, 2.0.1misc free software
Uncategorizedminiconda2Noneirma rackham snowy4.5.4None
Uncategorizedminiconda3Noneirma rackham snowy4.5.4None
Uncategorizedminiforgeminiforgerackham bianca miarka snowylatestNone
UncategorizedmlstNonebianca miarka rackham snowy2.12None
UncategorizedMMseqs2_dataMMseqs2_datarackham bianca miarka snowy20230125, 20230816, 20240202None
UncategorizedMosesMosesrackham bianca miarka snowy4.0LGPL v2.1
Uncategorizedmp-toolsmp-toolsrackham bianca miarka snowylatestNone
UncategorizedMPFRMPFRrackham bianca miarka snowy4.2.1LGPL v3+
UncategorizedmpjexpressNonemilou.mpjexpress, v0.38None
UncategorizedMultiBUGSNonerackham miarka bianca snowy2.0LGPL v3.0
UncategorizedMUMPSMUMPSrackham5.5.0, 5.5.0-hybridCeCILL-C license
UncategorizednanopolishNonerackham irma bianca snowy0.12.0MIT
Uncategorizedncbi_taxonomyNonerackham irma bianca miarka snowylatestNone
UncategorizedncftpNonebianca irma rackham snowy3.2.6None
UncategorizedncviewNonerackham snowy2.1.7, 2.1.7-intel-2019bGPL 3 license
UncategorizedngsF-HMMNonemilou20160614None
UncategorizedNINJANonerackham irma bianca snowy0.97-cluster_onlyMIT
UncategorizedNMRPipeNonerackham irma bianca snowy10.4https://www.ibbr.umd.edu/nmrpipe/terms.html
UncategorizednullarborNonebianca miarka rackham snowy2.0.20180819None
UncategorizedoctaveNonebianca irma rackham snowy5.1.0None
Uncategorizedopamopamrackham irma bianca miarka snowy2.1.2LGPL 2.1 with special modifications
UncategorizedOpenFOAMNonerackham irma bianca snowy6, 7, v1912GNU GPLv3
UncategorizedOpenJPEGOpenJPEGrackham bianca miarka snowy2.3.0, 2.5.0BSD 2-clause
Uncategorizedopenslideopensliderackham bianca miarka snowy4.0.0LGPL
UncategorizedORCANoneNone4.0.1.2, 4.2.1, 5.0.4, 6.0.0, 6.0.1, .4.0.1.2None
Uncategorizedpantherpantherrackham bianca miarka snowy14.1, 15.0, 17.0, 18.0Custom
UncategorizedparaviewNoneNone5.6, 5.9.1, .paraview_noQT, .paraview_noQT~None
UncategorizedPCAPNonemilouPCAPNone
Uncategorizedpcre2pcre2rackham bianca miarka snowy10.42PCRE2 Licence
UncategorizedPGAPNonerackham irma bianca miarka snowy2022-02-10.build5872Mixed
UncategorizedPHPPHPrackham bianca miarka snowy7.4.32PHP License v.3.0.1
UncategorizedPhylogicNDTNonerackham irma bianca miarka snowy2020None
UncategorizedphylophlanNonebianca irma milou rackham snowy0.99None
Uncategorizedpigzpigzrackham bianca miarka snowy2.4, 2.8Custom AS IS
UncategorizedplumedNonerackham irma bianca snowy2.6.0, 2.7.1-gccLGPL-3.0
Uncategorizedpm-toolspm-toolsrackham bianca miarka snowylatestNone
Uncategorizedpython_GIS_packagespython_GIS_packagesrackham bianca miarka snowy3.10.8Various
Uncategorizedpython_ML_packagespython_ML_packagessnowy3.9.5-cpu, 3.9.5-gpu, 3.11.8-cpu, 3.11.8-gpuVarious
UncategorizedPyTorchPyTorchrackham bianca miarka snowy1.12.0-cpuBSD-modified
UncategorizedqctoolNonerackham bianca irma snowy2-5559450, 2-betaNone
UncategorizedQoRTsNonerackham irma bianca snowy1.3.6MIT
UncategorizedQuantumESPRESSONonerackham6.7MaX-ReleaseGNU GPLv2
UncategorizedrawcopyNonerackham irma bianca snowy1.1None
UncategorizedRDKitRDKitrackham irma bianca miarka snowy20220301BSD 3-Clause-d
UncategorizedreadlineNonerackham irma bianca miarka snowy6.2-11GPL v3
UncategorizedrfmixNonebianca irma rackham snowyv2None
UncategorizedRosettaNonerackham irma bianca snowy3.7, 2019.4https://els.comotion.uw.edu/licenses/86
UncategorizedRSPtNonerackham2019-08-30, 2020-06-10GPL2 license
UncategorizedrsptNoneNone1951_intel_mkl, 2003_intel_mkl, 2018-03-31_intel_mkl, rspt.tclNone
Uncategorizedrustrustrackham bianca miarka snowy1.43.1, 1.67.0, 1.77.0Apache 2.0\, MIT
UncategorizedsbclNoneNone1.0.12, .sbclNone
Uncategorizedscikit-learnNonerackham irma bianca snowy0.22.1BSD 3
Uncategorizedsda-clisda-clirackham bianca miarka snowy0.1.0, 0.1.3AGPL-3.0
UncategorizedSDSLNonerackham irma bianca miarka snowy2.1.1GPLv3
UncategorizedSiestaNonerackham irma bianca snowy4.1-b4, 4.1-MaX-1.0GNU GPLv3
UncategorizedsiestaNoneNone3.2-pl4_intel_mkl, 4.1-b3_intel_mkl, siesta-2018.tcl, siesta.tclNone
UncategorizedsilvaNonebianca irma rackham snowy128, 132, 138.1None
UncategorizedsimpleitkNonerackham irma bianca snowy1.2.4Apache 2.0
UncategorizedSingularNonerackham irma bianca snowy4.1.2GNU GPL
UncategorizedsmooveNonerackham irma bianca snowy0.2.5Apache 2.0
Uncategorizedsnp-sitesNonebianca irma rackham snowy2.4.0None
UncategorizedsnpEff_datasnpEff_datarackham bianca miarka snowy5.1, 5.2MIT
UncategorizedSourceTrackerNonebianca miarka rackham snowy0.9.5None
Uncategorizedspaceranger-dataNonebianca irma rackham snowy1.0.0, 2020-ANone
UncategorizedspmNonebianca irma rackham12None
UncategorizedSPRKKRNonerackham snowy7.7.1, 7.7.3GPL license
UncategorizedSQuIRENonerackham irma bianca miarka snowy0.9.9.9a-beta, 885bf4d-20190301GPL v3
UncategorizedSRPRISMNonerackham irma bianca miarka snowy3.2.7Public domain
UncategorizedSuiteSparseSuiteSparserackham bianca miarka snowy5.8.1, 7.7.0Multiple
UncategorizedsumaclustNonebianca irma rackham snowy1.0.00None
UncategorizedsunstudioNoneNone12.1, 12.1u1, .sunstudioNone
UncategorizedSvABANonerackham irma bianca snowy1.1.3-20201112-0f60e36GPL v3
UncategorizedsvvizNonerackham irma bianca snowy1.6.4MIT
UncategorizedswarmNonebianca irma rackham snowy1.2.19None
UncategorizedSymEngineNonerackham irma bianca snowy0.7.0MIT
UncategorizedtapestriNonerackham irma bianca snowy2.0.1None
UncategorizedtclNonebianca irma rackham snowy8.6.8None
UncategorizedTelomereHunterNonebianca irma rackham snowy1.0.4None
UncategorizedtesseractNonerackham irma bianca snowy4.1.3Apache-2.0
UncategorizedTEtranscriptsNonerackham irma bianca snowy2.2.1GPL3
UncategorizedtotalviewNoneNone8.8.0-2, 8.9.2-0, 8.9.2-2, 8.10.0-1, 8.15.0-15, totalviewNone
UncategorizedturboNoneNone6.00, 6.01, 6.1, 6.02, .turboNone
UncategorizedturbomoleNonebianca irma milou rackham7.3None
UncategorizedumapNonebianca irma rackham snowy0.3.7None
Uncategorizedutf8procNonerackham irma bianca snowy2.5.0MIT
UncategorizedVampireVampirerackham snowy6.0GPL
UncategorizedvaspNoneNone4.6.34_intel_mkl, 4.6_intel_mkl, 5.2.8_intel_mkl, 5.2.11_intel_mkl, 5.2.12_intel_mkl, 5.2_intel_mkl, 5.3.3_intel_mkl, 5.4.1_intel_mkl, 5.4.1_openmpi_intelmkl, 5.4.4_intel_mkl, .vasp_acml, vasp, vasp.save, vasp.tcl, vasp.tcl_2013, vasp.tcl_2014, vasp.tcl_2017, vasp.tcl_2017_intelmpi, vasp.tcl_2017_openmpi, vasp_newNone
Uncategorizedvasp_acmlNoneNone4.6.28, 4.6.34, 4.6.36, .vasp_acmlNone
Uncategorizedvasp_gotoNoneNone4.6.28, 4.6.34, 4.6.36, .vasp_gotoNone
UncategorizedVMDVMDrackham bianca miarka snowy1.9.4-alpha-57University of Illinois Open Source License
UncategorizedVSCodiumVSCodiumrackham bianca miarka snowylatestMIT
UncategorizedvtkNoneNone6.1.0, .vtkNone
UncategorizedWhisperWhisperrackham bianca miarka snowy0.5.1, 20240930None
UncategorizedWhisper-guiWhisper-guirackham bianca miarka snowy0.1, 0.2, 0.3, 0.3.1None
UncategorizedWhispercppWhispercpprackham bianca miarka snowy1.7.3None
UncategorizedWhisperXWhisperXrackham bianca miarka snowy3.1.1None
Uncategorizedwinewinerackham bianca miarka snowy7.0None
UncategorizedWPSNonerackham snowy4.1None
UncategorizedWPS-geogNonerackham snowy4Custom open-source AS IS
UncategorizedWRFNonebianca irma rackham snowy4.1.3, 4.1.3-dmparNone
UncategorizedWRFgNonerackham snowy3.8.1Custom AS-IS
UncategorizedXCrySDenNonerackham irma bianca snowy1.5.60, 1.6.2GNU GPLv2
Uncategorizedzigzigrackham bianca miarka snowy0.9.1, 0.11.0MIT
UncategorizedzshNonebianca irma rackham snowy5.7.1None
Uncategorizedzstdzstdrackham irma bianca miarka snowy1.5.2GPL v2
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/software_table.html b/software/software_table.html new file mode 100644 index 000000000..491123de6 --- /dev/null +++ b/software/software_table.html @@ -0,0 +1,3261 @@ + + + + + + + + + Your Table Title + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
modulenamecategoryclusterlicensekeywordsversionsid
454-dataanalysis454-dataanalysis (Roche Newbler assembler)Bioinformatics SW CollectionsRackham, Bianca, Snowyassembler2.3, 2.53, 2.6, 2.9454-dataanalysis
AATAATBioinformatics AnnotationRackham, Bianca, SnowyCustom "AS IS"annotationr03052011AAT
AGEAGEBioinformatics AlignmentRackham, Bianca, Snowyalignment0.4AGE
AMOSAMOSBioinformatics AssemblyMilou, Rackham, BiancaArtisticassembly3.0.0, 3.1.0AMOS
ANGSDANGSDBioinformatics MiscRackham, Bianca, SnowyOpen sourcesequencing, gene0.917, 0.917-g6522d3e, 0.921, 0.933ANGSD
ARCSARCSBioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly1.0.6, 1.1.1ARCS
ARPACK-NGARPACK-NGLibrariesMilou, Rackham, BiancaBSD Software Licenseeigenvalue3.8.0ARPACK-NG
ArrowGridArrowGridBioinformatics AssemblyRackham, Bianca, SnowyPublic Domainparallel, wrapper, consensus, assembly20191022-9759eb1ArrowGrid
BOLT-LMMBOLT-LMMBioinformatics MiscRackham, Bianca, SnowyGPL v3model, genetic, gene, association, variance, heritability, correlation2.3.5BOLT-LMM
BamsurgeonBamsurgeonBioinformatics MiscRackham, Bianca, SnowyMIT Licensegeo, mutation1.3Bamsurgeon
BioBakeryBioBakery\n (Metapackage that includes MetaPhlAn, PhyloPhlan, KneadData and HUMAnN)Bioinformatics MiscRackham, Bianca, SnowyMITmicrobial, profiling, community, micro3.0BioBakery
CDOCDOGeospatialRackham, Bianca, SnowyGPL v2spatial1.9.5, 1.9.7.1, 1.9.7.1-intel18.3CDO
COIN-OR-OptimizationSuiteCOIN-OR Optimization SuiteMisc ApplicationsRackham, Bianca, Snowysolver1.8.0COIN-OR-OptimizationSuite
CONSELCONSELBioinformatics PhylogenyMilou, Rackham, Biancaphylogeny0.20CONSEL
CheckMCheckMBioinformatics MiscRackham, Bianca, SnowyGPL v3microbial, cell, metagenome, micro, genome, quality1.0.11, 1.0.12, 1.1.3CheckM
ConterminatorConterminatorBioinformatics MiscRackham, Bianca, Snowykingdom20200601-570993bConterminator
CromwellCromwellMisc ApplicationsRackham, Bianca, SnowyBSD 3-Clausebioinformatics71Cromwell
DeepVariantDeepVariantBioinformatics PipelinesRackham, Bianca, SnowyBSD-3pipeline0.8.0DeepVariant
DelGetDelGetBioinformatics MiscRackham, Bianca, Snowyorthologous, micro, region, species, gaps5.4DelGet
EDirectEDirectBioinformatics MiscRackham, Bianca, SnowyPublic Domainvariation, structure, database, terminal, gene15.1EDirect
ExaBayesExaBayesBioinformatics PhylogenyRackham, Bianca, SnowyGPL v3phylogeny1.5, 1.5-mpiExaBayes
FALCONFALCONBioinformatics AssemblyRackham, Bianca, SnowyClear BSDassembly0.3.0, 0.4.1, 2018.31.08-03.06FALCON
FALCON-integrateFALCON-integrateBioinformatics AssemblyRackham, Bianca, SnowyCustom "as is"assembly20161113FALCON-integrate
FFmpegFFmpegMisc ApplicationsRackham, Bianca, SnowyLGPL v2.1+ with non-free featurescross-platform, audio4.4FFmpeg
FYBAOpenFYBA / SOSIGeospatialRackham, Bianca, SnowyCustom open-source "as is"spatial4.1.1FYBA
FastMEFastMEBioinformatics PhylogenyRackham, Bianca, SnowyGPL v3distance-based, inference, phylogeny2.1.6.2FastME
FastMLFastMLBioinformatics PhylogenyRackham, Bianca, SnowyGPL v2+phylogeny3.1, 3.11FastML
FastQCFastQCBioinformatics MiscRackham, Bianca, SnowyGPLquality0.11.2, 0.11.5, 0.11.8, 0.11.9FastQC
FastTreeFastTreeBioinformatics PhylogenyMilou, Rackham, BiancaOpen source "as is"phylogeny2.1.8, 2.1.10FastTree
FlyeFlyeBioinformatics AssemblyRackham, Bianca, SnowyBSD 3-clauseassembly2.3.5, 2.4.2, 2.9Flye
GAAGAABioinformatics AssemblyRackham, Bianca, SnowyGPL v2+assembly1.1GAA
GARMGARMBioinformatics AssemblyRackham, Biancaassembly0.7.3GARM
GDALGDALGeospatialRackham, Bianca, SnowyX/MIT style Open Sourcespatial2.1.0, 3.1.0GDAL
GEM-ToolsGEM-ToolsBioinformatics AlignmentRackham, Bianca, SnowyGPL (GEM-Tools) and custom non-commercial (GEM library)alignment1.7.1GEM-Tools
GEMINIGEMINIBioinformatics AnnotationRackham, Bianca, SnowyMITannotation0.18.3, 0.19.0, 0.20.0, 0.20.1GEMINI
GEOSGEOSGeospatialRackham, Bianca, SnowyLGPL v2.1spatial3.5.0, 3.9.1-gcc9.3.0GEOS
GMPGMPLibrariesRackham, Bianca, SnowyLGPL v3; GPL v2library, precision, floating-point, integer, rational6.2.1GMP
GOTMGOTMGeospatialRackham, Bianca, SnowyGPL v2spatial5.3-221-gac7ec88dGOTM
GUSHRGUSHRBioinformatics AnnotationRackham, Bianca, SnowyGPL v3annotation, coding1.0.0GUSHR
GeneMarkGeneMarkBioinformatics MiscRackham, Bianca, SnowyGeneMark licenceannotation2.3-es, 4.32-es, 4.33-es, 4.33-es_Perl5.24.1, 4.38-es, 4.57-es, 4.62-es, 4.68-esGeneMark
GenomeThreaderGenomeThreaderBioinformatics AnnotationRackham, Bianca, SnowyCustom "AS IS"annotation1.7.0, 1.7.1GenomeThreader
GhostPDLGhostscript, GhostPCL, GhostXPS, GhostPDLMisc ApplicationsRackham, Bianca, SnowyGnu GPL Affero v3language, interpreter9.53.3GhostPDL
HESSHESSBioinformatics MiscRackham, Bianca, SnowyGPL v3genetic, local, covariance, gene, association, variance, heritability, correlation0.5.4-betaHESS
HISAT2HISAT2Bioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment2.1.0, 2.2.1HISAT2
HOMERHOMERBioinformatics AnnotationRackham, Bianca, SnowyGPL v3annotation4.10, 4.11HOMER
HUMAnNHUMAnNBioinformatics MiscRackham, Bianca, SnowyMITmicrobial, sequencing, transcript, micro, metatranscriptomic, profiling, molecular, metagenomic, metabolic, genomic, abundance, pathway3.0HUMAnN
HiC-ProHiC-ProBioinformatics PipelinesRackham, Bianca, SnowyBSD-3pipeline2.11.1HiC-Pro
HyPhyHyPhyBioinformatics PhylogenyRackham, Bianca, SnowyCustum open-source "as is"phylogeny2.5.0, 2.5.0-mpiHyPhy
ImageMagickImageMagickMisc ApplicationsRackham, Bianca, SnowyDerived Apache 2.0images6.9.9-35, 7.0.11-3ImageMagick
InterProScanInterProScanBioinformatics AnnotationRackham, Bianca, SnowyMiscannotation5.30-69.0, 5.52-86.0InterProScan
KalignKalignBioinformatics AlignmentRackham, Bianca, SnowyGPL v2alignment, multi1.04, 2.04Kalign
KneadDataKneadDataBioinformatics MiscSnowy, Rackham, BiancaMITmicrobiome, sequencing, transcript, micro, metatranscriptomic, quality, metagenomic, genomic0.10.0KneadData
LEON-BISLEON-BISBioinformatics AlignmentMilou, Rackham, Biancaalignment20130322LEON-BIS
LINKSLINKSBioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly1.8.7LINKS
LoRDECLoRDECBioinformatics AssemblyRackham, Bianca, SnowyCeCILL 2.1assembly0.9LoRDEC
MAFFTMAFFTBioinformatics AlignmentRackham, Bianca, SnowyBSD (main), mixed open-source (extensions)alignment7.205, 7.245, 7.310, 7.407MAFFT
MEGANMEGANBioinformatics PhylogenyMilou, Rackham, BiancaPersonal academicphylogeny4.70.4, 5.1.5, 5.11.3, 6.3.5, 6.10.5, 6.20.17MEGAN
MPCMPCLibrariesRackham, Snowy, BiancaLGPL v3+library, complex, precision1.2.1MPC
MPFRMPFRLibrariesRackham, Bianca, SnowyLGPL v3+library, multi, precision, floating-point4.1.0MPFR
MUMmerMUMmerBioinformatics AlignmentRackham, Bianca, SnowyArtistic License 2.0alignment3.22, 3.23, 3.9.4alpha, 4.0.0beta2, 4.0.0rc1MUMmer
MaSuRCAMaSuRCABioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly3.2.2, 3.2.3, 3.3.5, 3.4.2, 4.0.2MaSuRCA
MafFilterMafFilterBioinformatics AlignmentMilou, Rackham, Biancaalignment1.1.2MafFilter
MagicsMagicsGeospatialRackham, Bianca, SnowyApache 2.0spatial3.3.1, 3.3.1-intel18.3Magics
MetAMOSMetAMOSBioinformatics AssemblyMilouGPLv2 and other open sourceassembly1.5rc3MetAMOS
MetaPhlAn3MetaPhlAn3Bioinformatics PhylogenyRackham, Bianca, SnowyMIT licensemicrobial, composition, sequencing, communities, micro, phylogeny, profiling, species, metagenomic, shotgun, genomic, computational3.0.8MetaPhlAn3
MultiQCMultiQCBioinformatics MiscRackham, Bianca, SnowyMITbioinformatics0.6, 0.7, 0.8, 0.9, 1.0, 1.2, 1.3, 1.5, 1.6, 1.7, 1.8, 1.9, 1.10, 1.10.1, 1.11MultiQC
NCONCOGeospatialRackham, Bianca, SnowyBSDspatial4.8.1, 4.9.2, 4.9.3NCO
NPStatNPStatBioinformatics MiscRackham, Bianca, SnowyGPL version 3genetic, gene, pool1NPStat
NanoPlotNanoPlotBioinformatics MiscRackham, Bianca, SnowyGPL v3alignment, sequencing1.33.1NanoPlot
NeEstimatorNeEstimatorBioinformatics MiscRackham, Bianca, SnowyAS ISgenetic, genotype, gene, population2.1NeEstimator
NeuSomaticNeuSomaticBioinformatics MiscRackham, Bianca, SnowyCreative Commons Public Licenseconvolutional, neural, networks, network, somatic, mutation0.2.1NeuSomatic
NextDenovoNextDenovoBioinformatics AssemblyRackham, Bianca, SnowyGNUstring, assembler, graph, assembly, reads2.4.0, 2.5.0NextDenovo
NextGenMapNextGenMapBioinformatics AlignmentMilou, Rackham, BiancaArtistic License 1.0alignment0.4.12, 0.5.4NextGenMap
NextflowNextflowBioinformatics AlignmentMilou, Rackham, BiancaGPLv3pipeline0.17.3, 0.22.2, 0.26.0Nextflow
NextflowNextflowBioinformatics PipelinesRackham, Bianca, Snowypipeline18.10.1Nextflow
NgsRelateNgsRelateBioinformatics MiscSnowy, Rackham, BiancaGPL v2statistics, genotype, coverage, relatedness, inbreeding, likelihood2.0NgsRelate
OMBlastOMBlastBioinformatics AlignmentMilou, Rackham, Biancaunknownalignment1.0OMBlast
OpenBUGSOpenBUGSMisc ApplicationsRackham, Bianca, SnowyGPL v2inference3.2.3OpenBUGS
PBSuitePBSuite (contains PBJelly and PBHoney)Bioinformatics AssemblyMilou, Rackham, BiancaCustom redistributable "as is"assembly15.8.24PBSuite
PLplotPLplotLibrariesRackham, Bianca, SnowyLGPL v2plot5.15.0, 5.15.0-old_wxWidgetsPLplot
PRISMS-PFPRISMS-PSChemistry/PhysicsRackham, SnowyLesser GPLmicrostructural, simulation, parallel, structural, micro, phase, evolution, finite2.1.1PRISMS-PF
PROJPROJGeospatialRackham, Bianca, SnowyMITspatial6.3.2PROJ
PROJ.4PROJ.4GeospatialRackham, Bianca, SnowyMITspatial4.9.2, 4.9.3, 4.9.3-intel18.3PROJ.4
PhobiusPhobiusBioinformatics AnnotationRackham, Bianca, SnowyPrivate study, education and non-profit research onlyannotation1.01Phobius
PhyMLPhyMLBioinformatics PhylogenyRackham, Bianca, SnowyGPL v3phylogeny3.1, 3.3.20190321PhyML
PilonPilonBioinformatics AssemblyMilou, Snowy, Rackham, BiancaGPL v2assembly1.17, 1.22, 1.24Pilon
Platanus-alleePlatanus-alleeBioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly2.0.2Platanus-allee
ProbConsProbConsBioinformatics AlignmentMilou, Rackham, BiancaPublic domainalignment1.12ProbCons
ProtHintProtHintBioinformatics AnnotationRackham, Bianca, SnowyGeneMark licenseannotation2.4.0, 2.5.0, 2.6.0ProtHint
PsNPerl-speaks-NONMEMStatisticsRackham, Bianca, SnowyGPL v2+peak5.0.0PsN
QGISQGISGeospatialRackham, Bianca, SnowyGPLgeospatial, geo, spatial3.4.12QGIS
QualiMapQualiMapBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment2.0.2, 2.2QualiMap
RRStatisticsRackham, Bianca, SnowyGPLgraphics, statistical, graph, environment3.0.2, 3.2.3, 3.3.2, 3.4.0, 3.4.3, 3.5.0, 3.5.2, 3.6.0, 3.6.1, 4.0.0, 4.0.4, 4.1.1R
RAisDRAisDBioinformatics MiscRackham, Bianca, SnowyGPL v2polymorphism, nucleotide, multi, sweep2.9RAisD
RAxML-NGRAxML-NGBioinformatics PhylogenyRackham, Bianca, SnowyGNU Affero GPL v3.0phylogeny0.2.0b, 0.2.0b-mpi, 0.7.0b, 0.7.0b-mpi, 0.9.0, 0.9.0-mpiRAxML-NG
REAPRREAPRBioinformatics AssemblyMilou, Rackham, BiancaGPLassembly1.0.17, 1.0.18REAPR
RStudioRStudioStatisticsRackham, Bianca, SnowyAGPLlanguage, development, environment1.0.136, 1.0.143, 1.0.153, 1.1.423, 1.1.463, 1.4.1106RStudio
RaconRaconBioinformatics AssemblyRackham, Bianca, SnowyMITgenome, reads, consensus, assembly1.4.21Racon
RagoutRagoutBioinformatics AssemblyMilou, Rackham, BiancaGPL v3.0assembly2.0-20170312Ragout
RayRayBioinformatics AssemblyMilou, Rackham, BiancaGPLassembly0.0.4, 0.0.7, 1.6.1, 2.3.0, 2.3.1, 2.3.1-mpiio (with support for MPI I/O, but this seems not to work very well)Ray
RcorrectorRcorrectorBioinformatics MiscRackham, Bianca, SnowyGPL v3kmer, correction1.0.4Rcorrector
RedundansRedundansBioinformatics AssemblyRackham, Bianca, SnowyGPL v2assembly0.13a-20170321, 0.14a-20190314-de_novo-42e8edf (development only), 0.14a-20190313-d11d32b, 0.14a-20190509-ffae69eRedundans
RepeatMaskerRepeatMaskerBioinformatics MiscRackham, Bianca, Snowyopen sourceannotation4.0.7, 4.0.7_Perl5.24.1, 4.0.8, 4.1.0RepeatMasker
RoaryRoaryBioinformatics MiscRackham, Bianca, SnowyGPL v3assemblies, pipeline, genome3.6.2, 3.12.0, 3.13.0Roary
SACRASACRABioinformatics MiscRackham, Bianca, SnowyMIT 3virome, reads, chimeric1.0.0SACRA
SAIGESAIGEMisc ApplicationsRackham, Bianca, SnowyGPL v3biobanks, model, genetic, relatedness, gene, association, cohort0.42.1, 0.44.6.5SAIGE
SGASGABioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly0.10.14SGA
SHAPEITSHAPEITBioinformatics MiscRackham, Bianca, SnowyCustom non-commercialphasing, plot, multi, haplotype, sequencingv2.r837, v2.r904, v3.r884.2, v4.1.3SHAPEIT
SHAPELIBSHAPELIBGeospatialRackham, Bianca, SnowyLGPL v2spatial1.5.0SHAPELIB
SLiMSLiMBioinformatics MiscRackham, Bianca, SnowyGPL v3model, simulation, genetic, modeling, evolution, gene, evolutionary, population, complex2.6, 3.2.1, 3.3.2, 3.6SLiM
SMARTdenovoSMARTdenovoBioinformatics AssemblyRackham, Bianca, SnowyMisc open sourceassembly20180219-5cc1356, 20210224-8488de9SMARTdenovo
SMC++SMC++Bioinformatics MiscRackham, Bianca, SnowyGPL v3genome, population1.15.2SMC++
SMRTSMRT Link, SMRT AnalysisBioinformatics PipelinesRackham, Bianca, Snowypipeline2.2.0, 2.3.0, 5.0.1, 7.0.1SMRT
SNePSNePBioinformatics MiscRackham, Snowy, BiancaAS ISphasing, recombination, genome, mutation, population, genome-wide1.1SNeP
SalmonSalmonBioinformatics MiscRackham, Bianca, SnowyGPL v3+transcript, quantification0.8.2, 0.9.1, 0.11.2, 1.1.0, 1.4.0Salmon
SalmonTESalmonTEBioinformatics MiscRackham, Bianca, Snowypipeline20180926SalmonTE
ScipioScipioBioinformatics AnnotationRackham, Bianca, SnowyCustom non-commercialannotation1.4.1Scipio
SeqKitSeqKitBioinformatics MiscRackham, Bianca, SnowyMIT Licensecross-platform0.15.0SeqKit
ShastaShastaBioinformatics AssemblyRackham, Bianca, SnowyMITcell, assembler, assembly, gene, reads0.7.0Shasta
SiBELiaSiBELiaBioinformatics AlignmentRackham, Bianca, Snowyalignment3.0.6, 3.0.7SiBELia
SignalPSignalPBioinformatics AnnotationRackham, Bianca, SnowyCustom academic user licenceannotation4.1c, 4.1f, 5.0bSignalP
SnifflesSnifflesBioinformatics MiscRackham, Snowy, BiancaMIT licensevariation, structural, sequencing, gene1.0.10, 1.0.12-201218-4ff6ecbSniffles
SortMeRNASortMeRNA (original site)Bioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment, site2.0, 2.1b, 3.0.3SortMeRNA
StampyStampyBioinformatics AlignmentRackham, Bianca, SnowyCustom (non-commercial)alignment1.0.27, 1.0.32Stampy
StringTieStringTieBioinformatics AssemblyRackham, BIanca, SnowyMITassembly1.2.0, 1.3.3, 2.1.4StringTie
SuperLUSuperLULibrariesRackham, Bianca, SnowyCustomsparse, linear5.2.1SuperLU
T-CoffeeT-CoffeeBioinformatics AlignmentRackham, Bianca, SnowyGPL v2alignment11.00.8cbe486, 13.39.0.d675aedT-Coffee
TSEBRATSEBRABioinformatics AnnotationRackham, Bianca, SnowyArtistic License 2.0annotation, transcript, codons, prediction, gene, introns1.0.2-20210716-1f167adTSEBRA
TargetPTargetPBioinformatics AnnotationRackham, Bianca, SnowyCustom academic 'AS IS'cleavage, annotation, chloroplast, mitochondrial, thylakoid, site, terminal, luminal, signal, peptide2.0TargetP
Tcl-TkTcl-TkMisc ApplicationsRackham, Bianca, SnowyBSD-likegraphical, table, programming, graph, language, network8.6.11Tcl-Tk
TransDecoderTransDecoderBioinformatics AnnotationRackham, Bianca, SnowyCustom open-source 'AS IS'annotation5.0.1, 5.3.0, 5.5.0TransDecoder
UnicyclerUnicyclerBioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly0.4.8Unicycler
VIBRANTVIBRANTBioinformatics AnnotationRackham, Bianca, SnowyGNU General Public Licenseannotation1.2.1VIBRANT
VaporVaporGeospatialRackham, SnowyApache v2.0 licensevisualization, spatial, images, search, environment3.2.0, 3.3.0Vapor
VmatchVmatchBioinformatics AlignmentRackham, Bianca, SnowyCustom non-commercialalignment2.2.5, 2.3.0Vmatch
YleafYleafBioinformatics PhylogenyRackham, Bianca, Snowyphylogeny20180326, 2.0Yleaf
abyssABySSBioinformatics AssemblyMilou, Rackham, BiancaGPL v3assembly1.2.3, 1.2.4, 1.2.5, 1.2.7 1.2.7-maxk96, 1.3.0, 1.3.2, 1.3.3, 1.3.3-k96, 1.3.4, 1.3.5, 1.3.5-max, 1.3.7, 1.3.7-max, 1.3.7-k128, 1.5.2, 1.9.0, 1.9.0-k128, 2.0.2, 2.0.2-k128abyss
allpathslgAllPathsLGBioinformatics AssemblyMilou, assembly47300, 49618, 52485allpathslg
alphafoldAlphafoldMisc ApplicationsRackham, Bianca, SnowyApache Licensepipeline, inference2.0.0alphafold
anfoANFOBioinformatics AlignmentRackham, Bianca, SnowyGPLalignment0.98anfo
annovarANNOVARBioinformatics SW CollectionsRackham, Bianca, SnowyStrictly non-commercial usageannotation2014.11.12, 2015.06.17, 2016.05.11, 2017.07.16, 2018.04.16annovar
aragornARAGORNBioinformatics AlignmentRackham, Bianca, Snowyalignment1.2.36, 1.2.38aragorn
augustusAugustusBioinformatics AnnotationRackham, Bianca, SnowyArtistic Licenseannotation3.2.2, 3.2.3, 3.2.3_Perl5.24.1, 3.3.3, 3.3.3-CGPaugustus
barrnapBarrnapBioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment, rrna, rna0.2, 0.6, 0.8, 0.9barrnap
bcftoolsBCFtoolsBioinformatics MiscRackham, Bianca, SnowyMIT/Expat or GPL v3variant1.2, 1.3.1, 1.5, 1.6, 1.8, 1.9, 1.9-develop, 1.10, 1.12, 1.14bcftools
bcl2fastqbcl2fastqBioinformatics SW CollectionsRackham, Bianca, Snowyfastq1.8.4, 2.15.0, 2.16.0, 2.17.1, 2.20.0bcl2fastq
beaglebeagle-libBioinformatics PhylogenyRackham, Bianca, SnowyGPLphylogeny2.1.2, 3.1.2beagle
beastBEASTBioinformatics PhylogenyMilou, Rackham, BiancaGPLphylogeny1.7.5, 1.8.1, 1.8.4beast
beast2BEAST2Bioinformatics PhylogenyRackham, Bianca, SnowyLGPL v2.1phylogeny2.1.2, 2.3.1, 2.4.0, 2.4.8, 2.6.2, 2.6.3beast2
binutilsGnu binutilsCompilers and build toolsRackham, Bianca, SnowyGPL v2, v3; LGPL v2, v3compiler2.26, 2.28, 2.30binutils
biopythonBioPythonBioinformatics MiscRackham, Bianca, SnowyBiopythonpython1.68 (loads python 2.7.6), 1.68-py3 (loads python/3.4.3); 1.73 (python/2.7.15), 1.73-py3 (python/3.6.0), 1.76-py3 (python/3.7.2)biopython
bisonbisonCompilers and build toolsRackham, Bianca, SnowyGPL v3+grammar, compiler, table, context-free, gene, deterministic, parser3.7.6bison
blasrBlasr and associated utilitiesBioinformatics AlignmentRackham, Bianca, SnowyOpen source redistributable "as is"alignment20150922, 5.3-20161121-e901e48, 5.3-20171117-f72428d, 5.3.2blasr
blastBLASTBioinformatics AlignmentRackham, Bianca, SnowyPublic Domainalignment2.2.23+, 2.2.24+, 2.2.25+, 2.2.27+, 2.2.28+, 2.2.29+, 2.2.31+, 2.4.0+, 2.5.0+, 2.6.0+, 2.7.1+, 2.9.0+, 2.10.1+, 2.11.0+blast
blastBLAST (legacy)Bioinformatics AlignmentRackham, Bianca, SnowyPublic Domainalignment2.2.15, 2.2.18, 2.2.22, 2.2.23, 2.2.24, 2.2.25, 2.2.26blast
blatBLATBioinformatics AlignmentRackham, Bianca, SnowyOpen-source and strictly non-commercialalignment34, 35, 36blat
bowtieBowtieBioinformatics PipelinesRackham, Bianca, SnowyArtisticalignment0.12.9, 1.1.2, 1.2.0, 1.2.2, 1.2.3bowtie
bowtie2Bowtie2Bioinformatics PipelinesRackham, Bianca, SnowyGPL v3alignment2.1.0, 2.2.9, 2.3.2, 2.3.3.1, 2.3.4.1, 2.3.4.3, 2.3.5.1bowtie2
bpipebpipeBioinformatics PipelinesRackham, Bianca, SnowyBSDpipeline0.9.8.6, 0.9.8.7bpipe
brakerBRAKERBioinformatics AnnotationRackham, Bianca, SnowyArtistic Licenseannotation2.1.1_Perl5.24.1, 2.1.5, 2.1.5-20200826-cf958ec, 2.1.5-20210115-e98b812, 2.1.6braker
bwaBWABioinformatics AlignmentRackham, Bianca, SnowyGPLalignment0.5.8a, 0.5.9, 0.6.1, 0.6.2, 0.7.4, 0.7.8, 0.7.10, 0.7.12, 0.7.13, 0.7.15, 0.7.17bwa
bwa-methbwa-methBioinformatics AlignmentRackham, Bianca, SnowyMITalignment0.2.2bwa-meth
bwakitbwa.kitBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment0.7.12bwakit
cactuscactusBioinformatics AlignmentRackham, Bianca, SnowyCustom open-source AS ISalignment1.0.0, 1.2.3cactus
cairoCairoLibrariesRackham, Bianca, SnowyGNU LGPL v2.1 or Mozilla Public Licence 1.1library, multi, graphics, graph1.14.8, 1.14.12, 1.17.2, 1.17.4cairo
canuCanuBioinformatics AssemblyRackham, Bianca, SnowyGPL v2 and others for subcomponentsassembly1.5, 1.6, 1.7, 1.7-86da76b, 1.8, 2.0, 2.2canu
cdbfastacdbfastaBioinformatics MiscRackham, Bianca, SnowyArtistic 2.0fasta0.99, 1.00cdbfasta
cegmaCEGMABioinformatics AnnotationRackham, Bianca, Snowyannotation2.5cegma
cellrangerCell Ranger (Chromium 10X)Bioinformatics SW CollectionsRackham, Bianca, Snowyothercell1.1.0, 1.3.0, 2.0.2, 3.0.1, 4.0.0, 5.0.1cellranger
cellranger-ARCCell Ranger ARC (Chromium 10X)Bioinformatics SW CollectionsRackham, Bianca, Snowyothercell1.0.0cellranger-ARC
cellranger-ATACCell Ranger ATACBioinformatics MiscRackham, Bianca, SnowyMisc non-commercialcell1.2.0cellranger-ATAC
cellranger-DNACell Ranger DNA (Chromium 10X)Bioinformatics SW CollectionsRackham, Bianca, Snowyothercell1.1.0cellranger-DNA
clustaloClustalOBioinformatics AlignmentMilou, Rackham, Biancaalignment1.2, 1.2.4clustalo
clustalwClustalWBioinformatics AlignmentMilou, Rackham, BiancaLGPLalignment2.1clustalw
clusterflowCluster FlowBioinformatics PipelinesRackham, Bianca, SnowyGPL v3cluster, pipeline0.3, 0.4devel, 0.4clusterflow
cufflinksCufflinksBioinformatics PipelinesRackham, Bianca, SnowyBoost License v1.0pipeline, link2.1.1, 2.2.1, 2.2.1-b55bb21cufflinks
cutadaptcutadaptBioinformatics PipelinesRackham, Bianca, SnowyMITpipeline1.9.1, 1.13, 1.16, 2.0, 2.3, 3.1cutadapt
ddtAllinea DDTCompilers and build toolsRackham, Bianca, SnowyCommercialcompiler7.0ddt
dellyDellyBioinformatics AnnotationRackham, Bianca, SnowyGPL v3annotation0.7.6, 0.7.7delly
diamondDIAMONDBioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment0.7.9, 0.7.12, 0.8.26, 0.9.10, 0.9.24, 0.9.26, 0.9.29, 0.9.31, 2.0.4diamond
ecCodesecCodesGeospatialRackham, Bianca, Snowyspatial2.13.1ecCodes
eggNOG-mapperEggNOG mapper and EggNOG databases 4.5.1Bioinformatics AnnotationRackham, Bianca, SnowyGPL v2annotation, database1.0.3eggNOG-mapper
exonerateExonerateBioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment2.2.0, 2.4.0exonerate
fastaFASTABioinformatics AlignmentRackham, Bianca, SnowyApache 2.0alignment, fasta36.3.6d, 36.3.6f, 36.3.8, 36.3.8hfasta
fastpfastpBioinformatics MiscRackham, Bianca, SnowyMITpreprocess0.20.0, 0.23.1fastp
fastq_screenfastq_screenBioinformatics MiscRackham, Bianca, SnowyGPLfastq0.5.1, 0.9.2, 0.11.1fastq_screen
fermifermiBioinformatics AssemblyMilou, Rackham, Biancaassembly1.1-r751-betafermi
fermikitfermikitBioinformatics PipelinesRackham, Bianca, Snowypipeliner178, 0.14-prerelease-96f7820fermikit
flexflexCompilers and build toolsRackham, Bianca, Snowymodified BSDgene, compiler2.6.4flex
fpcFree Pascal CompilerCompilers and build toolsRackham, Bianca, SnowyGPL and LGPLcompiler3.0.4fpc
garliGARLIBioinformatics PhylogenyMilou, Rackham, BiancaGPL v3phylogeny2.1garli
gctaGCTABioinformatics MiscRackham, Bianca, SnowyMITarchitecture, genetic, gene, trait, complex1.26.0, 1.93.2betagcta
geneidGeneIDBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment, gene1.4.4geneid
gffreadgffreadBioinformatics MiscRackham, Bianca,SnowyMITconversion, filter, tract0.12.6gffread
gitGitCompilers and build toolsRackham, Bianca, SnowyLGPL 2.1compiler2.5.0, 2.10.2, 2.16.1, 2.21.0, 2.24.0, 2.28.0git
git-lfsGit Large File SupportCompilers and build toolsRackham, Bianca, SnowyMITcompiler2.9.1git-lfs
gmap-gsnapGMAP-GSNAPBioinformatics AlignmentRackham, Bianca, SnowyCustom as-isalignment2014-01-21, 2015-09-28, 2016-05-01, 2017-09-11, 2018-02-12, 2018-07-04, 2021-03-08gmap-gsnap
gnuparallelGNU parallelParallelRackham, Bianca, SnowyGPLparallel20170122, 20180822gnuparallel
gnuplotGnuplotMisc ApplicationsRackham, Bianca, SnowyCustom open-source "as-is"plot5.0.7, 5.2.7gnuplot
gubbinsGubbinsBioinformatics PhylogenyMilou, Rackham, BiancaGPL v2phylogeny1.4.2, 2.3.1gubbins
guileGuileCompilers and build toolsRackham, Bianca, SnowyLGPL 2.1compiler1.8.8guile
gvcftoolsgvcftoolsBioinformatics MiscMilou, Rackham, BiancaVarious "As Is" open sourcegvcf0.16.2, 0.17.0gvcftools
hdf4HDF4LibrariesRackham, Bianca, SnowyBSD-style open sourcehdf4.2.11_gcc4.9.2, 4.2.14-gcc6.3.0hdf4
hdf5HDF5LibrariesRackham, Bianca, SnowyBSD-style open sourcehdf1.8.18, 1.8.18_gcc6.3.0, 1.10.5, 1.10.5-intel18.3hdf5
hichipperhichipperBioinformatics PipelinesRackham, Bianca, SnowyMITpipeline0.7.3hichipper
hmmerHMMERBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment, hmm2.3.2-gcc, 2.3.2-intel, 3.0, 3.1b1, 3.1b2, 3.1b2-intel (MPI support, compiled with intel/17.2 and openmpi/2.1.0)hmmer
htslibHTSlibBioinformatics MiscRackham, Bianca, SnowyMIT/Expat and modified 3-clause BSDvariant1.3, 1.3.2, 1.4, 1.5, 1.6, 1.8, 1.9, 1.10, 1.12, 1.14htslib
infernalInfernalBioinformatics AlignmentMilou, Rackham, BiancaBSDalignment, rna1.1.1, 1.1.2infernal
iqtreeIQ-TREEBioinformatics PhylogenyRackham, Bianca, SnowyGPL v2phylogeny, tree1.5.3-omp, 1.5.4-omp-mpi, 1.6.5-omp-mpi, 1.6.10-omp-mpi, 1.6.12-omp-mpi, 2.0-rc2-omp-mpiiqtree
javaJava JDKCompilers and build toolsRackham, Bianca, SnowyOthercompilersun_jdk1.6.0_04, sun_jdk1.6.0_18, sun_jdk1.7.0_25, sun_jdk1.8.0_40, sun_jdk1.8.0_92, sun_jdk1.8.0_151java
juliaJuliaCompilers and build toolsRackham, Bianca, SnowyMITjulia, compiler1.1.1, 1.4.2, 1.6.1julia
k8k8Compilers and build toolsRackham, Bianca, Snowyserver-side, compiler, language, gene, shell, server0.2.5k8
kallistokallistoBioinformatics MiscRackham, Bianca, SnowyCustom "as is" open sourcesequencing, transcript, target, gene, reads, abundance0.42.4, 0.43.0, 0.43.1, 0.45.1, 0.46.2kallisto
kouramiKouramiBioinformatics AssemblyRackham, Bianca, SnowyBSD 3assembly0.9.6kourami
lastLASTBioinformatics AlignmentMilou, Rackham, Bianca, SnowyGPL v3alignment847, 963last
lastzLASTZBioinformatics AlignmentMilou, Rackham, Biancaalignment1.03.54, 1.04.00lastz
libBigWiglibBigWigBioinformatics AnnotationRackham, Bianca, SnowyMITannotation0.4.4libBigWig
libgeotifflibgeotiffGeospatialRackham, Bianca, SnowyX-style and custom open-source "as is"geo, spatial1.4.1, 1.4.3libgeotiff
libsodiumSodiumLibrariesRackham, Bianca, SnowyISCtable, library, decryption, hash, encryption1.0.18-stablelibsodium
libwebplibwebpLibrariesRackham, Bianca, SnowyGoogle "as is" open sourcelibrary, decode, images1.2.0libwebp
libzmqZeroMQLibrariesRackham, Bianca, SnowyGPL v3transport, queue, library, kernel, asynchronous, multi, tract, pattern, protocol, filter4.3.4libzmq
lpsolvelp_solveLibrariesRackham, Bianca, SnowyLGPL v2solver5.5.2.9lpsolve
mafToolsmafToolsBioinformatics AlignmentRackham, Bianca, SnowyCustom "As is"alignment20170914-82077acmafTools
makerMAKERBioinformatics AnnotationRackham, Bianca, SnowyGPLannotation2.31.9, 2.31.10, 3.01.1-beta, 3.01.2-beta, 3.01.2-beta-mpi, 3.01.04maker
maqMaqBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment0.7.1maq
medakamedakaBioinformatics MiscRackham, Bianca, SnowyMPL 2.0variant, nanopore, consensus, sequencing0.7.1, 1.4.3medaka
megahitMEGAHITBioinformatics AssemblyMilou, Rackham, BiancaGPL v3assembly1.0.3, 1.1.2, 2.0.4megahit
metaWRAPmetaWRAPMisc ApplicationsRackham, Bianca, SnowyMITgenome, metagenomic, pipeline, genomic1.3.2metaWRAP
metaphlan2MetaPhlAn2Bioinformatics PhylogenyRackham, Bianca, Snowyphylogeny2.0metaphlan2
mincedMinCEDBioinformatics AlignmentMilou, Rackham, BiancaGPL v3alignment0.2.0minced
minimapMinimapBioinformatics AssemblyMilou, Rackham, BiancaMITassembly20160114minimap
minimap2Minimap2Bioinformatics AlignmentRackham, Bianca, SnowyMITalignment2.4, 2.14, 2.16, 2.17-r941, 2.18-r1015minimap2
mipMIPBioinformatics PipelinesRackham, Bianca, SnowyMITpipeline6.0.0mip
monoMonoCompilers and build toolsRackham, Bianca, SnowyCustom open sourcecompiler3.12.0, 5.8.1.0, 5.10.1.27mono
mosaik-alignerMOSAIKBioinformatics AlignmentMilou, Rackham, BiancaGPLalignment, aligner1.0.1388, 1.1.0021, 2.2.3, 2.2.26, 2.2.30mosaik-aligner
mrbayesMrBayesBioinformatics PhylogenyMilou, Rackham, Bianca, SnowyPublic domainphylogeny3.1.2-mpi, 3.2.20 (mpi), 3.2.1 (mpi), 3.2.3a (mpi), 3.2.4 (mpi), 3.2.6 (mpi), 3.2.6 with gcc 7.1.0 (mpi), 3.2.7a with gcc 8.3.0 (mpi, gpu)mrbayes
multizMultizBioinformatics AlignmentMilou, Rackham, Biancaalignment, multi012109multiz
muscleMUSCLEBioinformatics AlignmentMilou, Rackham, BiancaPublic domainalignment3.8.31, 3.8.1551muscle
nasmNASMCompilers and build toolsRackham, Bianca, SnowyBSD 2-clausetable, compiler, gene, architecture2.15.05nasm
nclNEXUS Class LibraryBioinformatics PhylogenyMilou, Rackham, BiancaSimplified BSDphylogeny2.1.21-devncl
ncviewncviewGeospatialRackham, Bianca, SnowyGPL v3browser2.1.7ncview
netcdfNetCDFLibrariesRackham, Bianca, SnowyCustom open source "as is"netcdf4.7.1, 4.7.1-intel18.3netcdf
nf-corenf-coreBioinformatics PipelinesRackham, Bianca, Snowypipeline1.12.1, 1.14, 2.1nf-core
nf-core-pipelinesnf-core-pipelinesBioinformatics PipelinesRackham, Bianca, Snowypipelinelatestnf-core-pipelines
ngsplotngsplotBioinformatics MiscMilou, Rackham, BiancaGPL v2plot2.61, 2.63ngsplot
oncotatorOncotatorBioinformatics AnnotationRackham, Bianca, SnowyBROAD INSTITUTE SOFTWARE LICENSE AGREEMENT FOR ACADEMIC NON-COMMERCIAL RESEARCH PURPOSES ONLYannotation1.9.9.0oncotator
openbabelOpen BabelMisc ApplicationsRackham, Bianca, SnowyGPL v2language, chemical, peak3.1.1-gcc9.3.0openbabel
openblasOpenBLASLibrariesRackham, Bianca, SnowyBSD 3-clauselibrary, openmp0.2.14a, 0.2.19, 0.2.19-singlethread, 0.2.20, 0.2.20-singlethread, 0.2.20-openmp, 0.3.15, 0.3.15-singlethread, 0.3.15-openmpopenblas
optimaOPTIMABioinformatics AlignmentMilou, Rackham, BiancaGPL v2alignmentf-1.3optima
pamlPAMLBioinformatics PhylogenyMilou, Rackham, BiancaOtherphylogeny4.6, 4.9epaml
patchelfpatchelfCompilers and build toolsRackham, Bianca, SnowyGPL v3compiler0.8, 0.10patchelf
pbbampbbamBioinformatics AssemblyRackham, Bianca, SnowyPacBio open source licenselanguage, library, assembly1.6.0pbbam
pblatpblatBioinformatics AlignmentRackham, Bianca, SnowySame as blatalignment2.0, 2.1pblat
pblat-clusterpblat-clusterBioinformatics AlignmentRackham, Bianca, SnowySame as blatalignment, cluster0.1, 0.3, 1.0pblat-cluster
perlPerlCompilers and build toolsRackham, Bianca, SnowyGPL, Perlperl, compiler5.18.2, 5.18.4, 5.22.2, 5.24.0, 5.24.1, 5.26.2 (5.24+ build with 64-bit ints and pointers)perl
perl6Perl 6Compilers and build toolsRackham, Bianca, SnowyArtistic License 2.0perl, compilerrakudo-star-2017.04, rakudo-star-2019.03perl6
perl_modulesPerl Modules/Packages (large collection)Compilers and build toolsRackham, Bianca, SnowyGPL, Perlperl, compiler5.18.4, 5.24.1, 5.26.2perl_modules
pfam_scanpfam_scanBioinformatics AlignmentRackham, Bianca, SnowyGPL v3alignment1.3, 1.5, 1.6pfam_scan
phantompeakqualtoolsphantompeakqualtoolsBioinformatics MiscMilou, Rackham, BiancaMITpeak1.1phantompeakqualtools
phaserphASERBioinformatics MiscSnowy, Rackham, BiancaGNUphasing, phase20210423-5d4926dphaser
phastPhastBioinformatics PhylogenyRackham, Bianca, SnowyBSDphylogeny1.4, 1.5phast
phylobayesPhyloBayesBioinformatics PhylogenyRackham, Bianca, Snowyphylogeny4.1cphylobayes
phylobayesmpiPhyloBayes MPIBioinformatics PhylogenyRackham, Bianca, SnowyGPL v2phylogeny1.8, 1.8bphylobayesmpi
phyutilityPhyutilityBioinformatics PhylogenyRackham, Bianca, Snowyphylogeny2.7.3phyutility
pindelPindelBioinformatics MiscMilouGPL 3indel0.2.4wpindel
pixypixyBioinformatics MiscRackham, Bianca, SnowyMIT Licensenucleotide, diversity, population1.2.5.beta1pixy
pizzlypizzlyBioinformatics MiscRackham, Bianca, SnowyBSD 2-clausefusion0.37.3pizzly
plinkPLINKBioinformatics MiscRackham, Bianca, SnowyGPL v3link1.07, 1.90b3n, 1.90b4.9plink
plink2PLINK2Bioinformatics MiscRackham, Bianca, SnowyGPL v3link2.00-alpha-2-20180704, 2.00-alpha-2-20190429, 2.00-alpha-2.3-20200124plink2
plinkseqPLINK/SEQBioinformatics MiscRackham, Bianca, SnowyOtherlink0.08, 0.10plinkseq
pplacerpplacerBioinformatics AlignmentMilou, Rackham, BiancaGPL v3alignment1.1.alpha19pplacer
prankPRANKBioinformatics AlignmentRackham, Bianca, SnowyGPL v2+alignment150803, 170427prank
protobufProtocol BuffersLibrariesRackham, Bianca, SnowyGoogle custom AS ISlanguage, structure, utr3.11.4, 3.15.5, 3.15.5-gcc9.3.0protobuf
pysamPySamBioinformatics MiscRackham, Bianca, SnowyMITpython0.7.7 (python 2.7.6), 0.7.7-py3 (python 3.3.1), 0.8.3 (python 2.7.6), 0.8.3-py3 (python 3.4.3), 0.9.1.4, 0.10.0, 0.13-python2.7.11, 0.13-python3.6.0, 0.15.3-python3.6.8, 0.15.3-python3.7.2, 0.17.0-python3.9.5pysam
pythonPythonCompilers and build toolsRackham, Bianca, SnowyPSF License Agreementpython, compiler2.7.6, 2.7.9, 2.7.11, 2.7.15, 3.3, 3.3.1, 3.4.3, 3.5.0, 3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5python
python3Python 3 (does not conflict with python module)Compilers and build toolsRackham, Bianca, SnowyPSF License Agreementpython, conflict, compiler3.6.0, 3.6.8, 3.7.2, 3.8.7, 3.9.5python3
qiime2qiime2Bioinformatics PipelinesRackham, Bianca, SnowyBSD 3pipeline2018.11.0qiime2
quickmergequickmergeBioinformatics AssemblyRackham, Bianca, SnowyGPL v3kmer, assembly0.3-9233726quickmerge
raxmlRAxMLBioinformatics PhylogenyRackham, Bianca, SnowyGPL v3phylogeny7.3.0, 8.2.10-gcc, 8.2.10-gcc-mpi, 8.2.12-gcc, 8.2.12-gcc-mpiraxml
rclonercloneMisc ApplicationsRackham, Bianca, SnowyMITstorage, cloud1.56.2rclone
rnammerRNAmmerBioinformatics AlignmentMilou, Rackham, Biancaalignment, rna1.2rnammer
rtgcoreRTG Core non-commercial (aka rtg-core)Bioinformatics MiscMilou, Rackham, BiancaCustom non-commercial use onlycommercial3.8.3rtgcore
rubyRubyCompilers and build toolsRackham, Bianca, SnowyRuby licensecompiler2.4.1, 2.5.0, 2.6.2ruby
rustRustCompilers and build toolsRackham, Bianca, SnowyApache 2.0, MITcompiler1.43.1rust
samtoolsSAMtoolsBioinformatics MiscRackham, Bianca, SnowyMITvariant0.1.19, 1.1, 1.2, 1.3, 1.4, 1.5, 1.5_debug, 1.6, 1.8, 1.9, 1.10, 1.12, 1.14samtools
satsuma2satsuma2Bioinformatics AssemblyMilou, Rackham, BiancaLGPL v3+alignment2016-12-07satsuma2
schmutziSchmutziBioinformatics MiscRackham, Bianca, SnowyGPL v3contamination, ancient20160424, 20200706-597c6bcschmutzi
selscanSelscanBioinformatics MiscMilou, Rackham, BiancaGPL v3selection, plot, haplotype1.1.0, 1.3.0selscan
sissrssissrsBioinformatics AnnotationRackham, Bianca, SnowyOpen sourceannotation1.4sissrs
slurm-drmaaslurm-drmaaLibrariesRackham, Bianca, SnowyGPL v3slurm, drmaa1.1.2-slurm19.05.8slurm-drmaa
snakemakesnakemakeBioinformatics PipelinesRackham, Bianca, Snowypipeline, snakemake3.10.1, 4.5.0, 5.2.3, 5.4.5, 5.10.0, 5.13.0, 5.17.0, 5.26.1, 5.30.1, 6.9.1snakemake
snapsnapBioinformatics MiscMilou, Rackham, BiancaGPLannotation2013-02-16, 2013-11-29snap
snippysnippyBioinformatics MiscSnowy, Rackham, BiancaGPL 2.0genome, haploid, reads, reference4.0, 4.0.5, 4.6.0snippy
snpADsnpADBioinformatics MiscRackham, Bianca, Snowyopen sourcesnp0.3.0, 0.3.4snpAD
snpEffsnpEffBioinformatics AnnotationRackham, Bianca, SnowyLGPLannotation, snp4.1, 4.2, 4.3tsnpEff
soapdenovoSOAPdenovoBioinformatics AssemblyMilou, Rackham, Biancaassembly1.05, 2.04-r240soapdenovo
soapdenovo-transSOAPdenovo-TransBioinformatics AssemblyMilou, Rackham, BiancaGPL v3assembly1.03, 1.04soapdenovo-trans
spadesSPAdesBioinformatics AssemblyRackham, Bianca, SnowyGPL v3,GPLv2(3.14.1)assembly3.6.0, 3.8.0, 3.8.1, 3.9.0, 3.10.0, 3.10.1, 3.11.1, 3.12.0, 3.13.1, 3.14.1, 3.15.3spades
spalnSpalnBioinformatics AlignmentRackham, Bianca, SnowyGPL v2alignment2.1.4, 2.3.1, 2.4.0spaln
sqliteSQLiteMisc ApplicationsRackham, Bianca, SnowyPublic domainlanguage, library, database(system), 3.8.5, 3.11.1, 3.16.2, 3.24.0, 3.34.0sqlite
starSTARBioinformatics AlignmentRackham, Bianca, SnowyMIT, GPLv3 (prior to 2.7.2b)alignment2.3.1o, 2.5.1b, 2.5.2b, 2.5.3a, 2.7.0e, 2.7.0f, 2.7.1a, 2.7.2b, 2.7.9astar
star-fusionSTAR-Fusion together with Genome Resource LibrariesBioinformatics AnnotationRackham, Bianca, SnowyBroad Institute redistributable as-ispipeline, fusion1.0.0, 1.2.0, 1.5.0, 1.7.0star-fusion
subversionSubversionCompilers and build toolsRackham, Bianca, SnowyApache 2.0compiler1.9.3subversion
tRNAscan-SEtRNAscan-SEBioinformatics AlignmentRackham, Bianca, snowyGPLalignment1.3.1tRNAscan-SE
tigmintTigmintBioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly1.1.2tigmint
tmhmmTMHMMBioinformatics AnnotationRackham, Bianca, SnowyCustom academic as-isannotation, protein, helices, hmm, transmembrane2.0ctmhmm
tophatTophatBioinformatics PipelinesRackham, Bianca, Snowypipeline1.0.14, 1.1.0, 1.1.3, 1.2.0, 1.3.3, 1.4.0, 2.0.0, 2.0.3, 2.0.4, 2.0.8b, 2.0.10, 2.0.11, 2.0.12, 2.1.1tophat
trfTRFBioinformatics AlignmentRackham, Bianca, SnowyGnu Affero GPL v3alignment4.07b, 4.09, 4.09.1trf
trimmomaticTrimmomaticBioinformatics MiscMilou, Rackham, BiancaGPLtrim0.32, 0.36, 0.39trimmomatic
trinityTrinityBioinformatics AssemblyRackham, Bianca, SnowyCustom 'AS IS'transcript, assembly2014-07-17, 2.4.0, 2.9.1, 2.11.0, 2.13.2trinity
trinotateTrinotateBioinformatics AnnotationRackham, Bianca, SnowyCustom open-source 'AS IS'annotation3.0.2, 3.1.1, 3.2.2trinotate
ucsc-utilitiesUCSC Bioinformatics UtilitiesBioinformatics MiscRackham, Bianca, SnowyNon-commercialbioinformaticsv287, v334, v345, v398, v421ucsc-utilities
unimapunimapBioinformatics AlignmentRackham, Bianca, SnowyMITreference, alignment, assembly-to-reference, assembly0.1-r46-dirtyunimap
usearchusearch (32-bit version); see vsearch for similar 64-bit capable toolBioinformatics MiscRackham, Bianca, SnowyOthersearch5.2.236, 5.2.32, 6.1.544, 7.0.1090, 8.1.1861, 9.2.64, 11.0.667usearch
velvetVelvetBioinformatics AssemblyMilou, Rackham, BiancaGPLassembly1.0.03, 1.1.04, 1.1.04_K101, 1.1.07, 1.2.10, 1.2.10_K128velvet
vmtouchvmtouchMisc ApplicationsRackham, Bianca, SnowyBSDlearning, cache1.3.1-20200309-4b70a9fvmtouch
vsearchVsearchBioinformatics AlignmentRackham, Bianca, SnowyGPL v3 or BSD 2-clausealignment, search2.3.2, 2.4.3, 2.5.2, 2.14.1, 2.18.0vsearch
wise2Wise2Bioinformatics AlignmentRackham, Bianca, SnowyOtheralignment2.2.0, 2.4.1wise2
wrf-pythonwrf-pythonGeospatialRackham, SnowyGPL v2python, spatial, interpolation, search1.3.1wrf-python
wtdbg2wtdbg2Bioinformatics AssemblyRackham, Bianca, SnowyGPL v3assembly2.4wtdbg2
yasmYasmCompilers and build toolsRackham, Bianca, Snowy'new' BSDassembler, compiler1.3.0yasm
+ + + + + + + + + + + + \ No newline at end of file diff --git a/software/spack/index.html b/software/spack/index.html new file mode 100644 index 000000000..769d1810a --- /dev/null +++ b/software/spack/index.html @@ -0,0 +1,3453 @@ + + + + + + + + + + + + + + + + + + + + + + + Spack - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Spack on UPPMAX

+

Introduction

+

Spack is a simple package management tool or installer that also installs dependencies automatically to the main software. Installing a new software version does not break existing installations, so many configurations can coexist on the same system.

+

It offers a simple spec syntax so that users can specify versions and configuration options concisely. Spack is also simple for package authors: package files are written in pure Python, and specs allow package authors to maintain a single file for many different builds of the same package.

+

Spack documentation

+

The UPPMAX staff has already other ways to install most software applications. Please use Spack only if other ways to install your tool is not possible or very difficult, e.g. requiring very many dependencies and it is not available through, e.g. EasyBuild (that the staff can manage centrally). One or the reasons is that SPACK produces very many small files and that having two parallel build systems centrally may make things a little complex.

+

This guide may change with time. Please come back and see updates.

+

This version assumes no available SPACK module, +which may come in the near future. +You have your own instance of Spack but can get a configuration file provided by UPPMAX.

+

First steps: Installing your own instance of SPACK

+

You may want to use your project folder if you want your colleagues to be able to run the application. Then change directory to a good place before installing Spack.

+
cd <good place>
+
+

Step 1: clone spack

+
module load git
+git clone -c feature.manyFiles=true https://github.com/spack/spack.git 
+cd spack
+
+

To get version v0.18:

+
git checkout releases/v0.18
+
+

Next, add Spack to your path. Spack has some nice command-line integration tools, so instead of simply appending to your PATH variable, source the Spack setup script.

+
source <root dir of spack>/spack/share/spack/setup-env.sh
+
+

Adding this line to your ~/.bashrc as well will activate the "spack commands" each time you start a new terminal session.

+

Orientation of the SPACK files

+

The Spack oriented files are stored in two places:

+
    +
  • Spack directory
      +
    • the cloned git repository
    • +
    • directories (important in bold)
        +
      • bin spack executables
      • +
      • etc configuration files
      • +
      • lib libraries
      • +
      • share documentation, scripts etc...
      • +
      • var other settings
      • +
      • opt produced after first installation, contains all packages (tools, dependencies and libraries)
          +
        • tools are found in a tree: ...opt/spack/linux-<arch>/<compiler>/tool/
        • +
        +
      • +
      +
    • +
    +
  • +
  • .spack
      +
    • local config and packages files
    • +
    • directories (important in bold)
        +
      • bootstrap
      • +
      • cache
      • +
      • reports
      • +
      • linux
          +
        • ​compilers.yaml
        • +
        • packages.yaml
        • +
        +
      • +
      +
    • +
    +
  • +
+

The .yaml files in the .spack/linux directory contains information which tolls you want to include from the UPPMAX system.

+
    +
  • The compilers.yaml file lists the compilers (intel or gcc) modules available to build your software tool.
  • +
  • The packages.yaml file lists tools available already as modules.
  • +
+

By default, these files are empty but you can copy working "central" files that can be extended for your needs. The content of the files can be larger than the needed packages/compilers, i.e. only the packages /dependencies needed for your installation will be "taken" from these files and the rest will be ignored. Therefore, the UPPMAX staff may update these central files once in a while.

+

Get templates

+

Do the following to get these templates (be sure to not overwrite old versions of these .yaml files that you configured yourself and might need).

+
cp /sw/build/spack/0.17.1/src/spack/share/spack/templates/compilers.yaml ~/.spack/linux/
+cp /sw/build/spack/0.17.1/src/spack/share/spack/templates/packages.yaml ~/.spack/linux/
+
+

Install your program

+

Check available software applications via Spack:

+
spack list
+spack list <search string>
+
+

Check already installed software applications with spack

+
spack find
+spack find <search string>
+
+

Some installations won't need any compilers or "large dependencies". The installation is straightforward:

+
spack install <tool>
+
+

Example:

+
spack install zlib
+
+

In other cases, for larger applications tools that require larger dependencies (that we might already have as modules), watch the installation documentation to see what is needed. Any recommended compiler? You can also check with a "dry run" before installing, to see what Spack "thinks" its needs to install. Use the spec command:

+
spack spec -I <tool>
+
+

To check the presently, for Spack, available compilers, type:

+
spack compilers
+
+

If your desired compiler is not there you can add it by first loading the module and then integrate it into the compilers.yaml file with a spack command:

+

Example:

+
module load intel/20.4
+spack compiler add
+
+

You can check if the compiler was added, either in the .spack/linux/compilers.yaml file or directly by:

+
spack compilers
+
+

To install a tool with a certain compiler version, if there are several compilers added for Spack, use "%". For specific version of the software tool or package, use "@".

+
spack install <tool>%<compiler>@<compiler-version>
+
+

Example:

+
spack install zlib%gcc@5.3.0
+
+

Large application tools may take a couple of hours so might be good to run in an interactive session (4 cores, -n 4).

+
spack install -j 4 <tool>
+
+

Use dependencies already available from our environment module system) ('module load').

+
cat .spack/linux/packages.yaml
+
+

Fill it with text,defining the spack name and lmod module names (be careful with indentations) +Then install you tool, as above. +To install a specific version of a dependency with Spack, use the command "^":

+
spack install <tool>%<compiler>@<compiler-version>^<dependency>@<version>
+
+

Here is a summarizing table

+ + + + + + + + + + + + + + + + + + + + + +
CommandOption
@Which version
%which compiler
^which dependency
+

Use your tool

+
$ spack load <tool>  
+# module load of the install dependencies will not be needed here, since their paths are integrated in spack
+$ <tool> [<arguments>]
+
+

Develop

+

More to come... Meanwhile:

+

Developer guide

+

Developer workflows tutorial

+

The builds are by default located here: <spack-root>/opt/spack/linux-centos7-broadwell/<compiler-version>/

+

Packages and environments

+

More to come... Meanwhile:

+

Packaging guide

+

Environments guide

+

Environments tutorial

+

Garbage collection

+

Installing and uninstalling software will in the end use up your disk space so it is good practice to do some garbage collection

+
spack gc
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/squeue/index.html b/software/squeue/index.html new file mode 100644 index 000000000..1c3739983 --- /dev/null +++ b/software/squeue/index.html @@ -0,0 +1,3215 @@ + + + + + + + + + + + + + + + + + + + squeue - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

squeue

+

The job scheduler consists of many +programs to manage jobs. +squeue is a tool to view information about the job queues.

+

View all jobs

+

View all jobs in the Bianca or Rackham queue

+

View all jobs in the Bianca or Rackham queue:

+
squeue
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham1 ~]$ squeue | head -n 1; squeue | shuf | head
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+          49086999      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49086465      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49085829      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49086067      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49086600      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49087075      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49080199      node /proj/sn torsteng PD       0:00      1 (Priority)
+          49088741      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49086825      core sbatch_l matca755 PD       0:00      1 (Priority)
+          49087385      core sbatch_l matca755 PD       0:00      1 (Priority)
+
+
+

View all jobs in Snowy queue

+

View all jobs in the Snowy queue:

+
squeue -M snowy
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham1 ~]$ squeue -M snowy
+CLUSTER: snowy
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+           9642748      core blast2un qiuzh610 PD       0:00      1 (Nodes required for job are DOWN, DRAINED or reserved for jobs in higher priority partitions)
+           9642749      core blast2un qiuzh610 PD       0:00      1 (Priority)
+           9642750      core blast2un qiuzh610 PD       0:00      1 (Priority)
+           9642751      core blast2un qiuzh610 PD       0:00      1 (Priority)
+           9640955      core interact    teitu  R 1-00:09:18      1 s201
+           9642778      core snakejob yildirim  R       9:18      1 s25
+           9641765      core Ridge_al yildirim  R   17:28:32      1 s201
+           9642747      core blast2un qiuzh610  R      31:48      1 s33
+           6968659      core  bpe_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)
+           6968658      core  bpe_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)
+           6968656      core word_nmt moamagda RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)
+           6968644      core word_nmt  matsten RD       0:00      1 (Reservation uppmax2022-2-18_4 was deleted)
+           9642777      node P20608_5    teitu PD       0:00      1 (Resources)
+           9642764      node     flye   octpa7  R    8:14:14      1 s9
+           9641505      node Fed_3_10  koussai  R   21:48:40      1 s73
+           9639430      node hmm_alig   ninaza  R 8-16:57:07      1 s149
+           9642775      node rhd0_st3    ariah  R      31:58      8 s[123-124,126-129,131,133]
+           9642763      node rhd1_st3    ariah  R   13:57:58      8 s[121,139,141,143-145,147-148]
+           9639541   veryfat interact  nikolay PD       0:00      1 (ReqNodeNotAvail, UnavailableNodes:s230)
+           9545835   veryfat     BAND    baldo PD       0:00      1 (AssocMaxCpuMinutesPerJobLimit)
+           9639540   veryfat interact  nikolay  R 7-21:34:31      1 s229
+
+
+

View your jobs in the queue

+

View your jobs in the Bianca or Rackham queue

+

View your jobs in the in the Bianca or Rackham queue:

+
squeue --me
+
+
+How does that look like? +

Your output will be similar to this, when you have no jobs in the queue:

+
[sven@rackham1 ~]$ squeue -u $USER
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+
+
+

Or alternatively:

+
squeue -u $USER
+
+

View your jobs in the Snowy queue

+

View your jobs in the in the Snowy queue:

+
squeue -M snowy --me
+
+
+How does that look like? +

Your output will be similar to this, when you have no jobs in the queue:

+
[sven@rackham1 ~]$ squeue -u $USER -M snowy
+CLUSTER: snowy
+             JOBID PARTITION     NAME     USER ST       TIME  NODES NODELIST(REASON)
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh/index.html b/software/ssh/index.html new file mode 100644 index 000000000..377cc86f0 --- /dev/null +++ b/software/ssh/index.html @@ -0,0 +1,3313 @@ + + + + + + + + + + + + + + + + + + + + + + + SSH - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

ssh

+

From Wikipedia:

+
+

The Secure Shell Protocol (SSH) is a cryptographic network protocol +for operating network services securely over an unsecured network.

+
+

At UPPMAX we allow users to login via SSH, using the program ssh.

+ +

SSH key management

+

For WSL2 under Windows10 or Windows11, +here as a neat way to get persistent key-manager in WSL2 (credits: original source).

+
sudo apt-get install keychain
+
+

Replace XXXX with the output of hostname command on the command line.

+
/usr/bin/keychain -q --nogui $HOME/.ssh/id_ed25519_key
+source $HOME/.keychain/XXXX-sh
+
+

Remove -q to get some information if you want

+
* keychain 2.8.5 ~ http://www.funtoo.org
+* Found existing ssh-agent: 4487
+* Known ssh key: /home/user/.ssh/id_ed25519_key
+
+

First time you login, you will be asked for the password and the key will be handled by the key-manager. Check with

+
ssh-add -l
+256 SHA256:wLJvQOM....   ....cTTtiU MyNewKey (ED25519)
+
+

MobaXterm

+

In MobaXterm you can use the internal MobAgent or/and the Peagent +from the PuTTy tools.

+

MobaXterm

+

OPTIONAL: SSH config

+

Example $HOME/.ssh/config file to make your work easier.

+
Host rackham
+User username
+HostName rackham.uppmax.uu.se
+ServerAliveInterval 240
+ServerAliveCountMax 2
+
+# Default settings
+#=======================================
+Host *
+ForwardAgent no
+ForwardX11 yes
+ForwardX11Trusted yes
+ServerAliveInterval 120
+#=======================================
+
+

Now

+
# without config
+ssh -X username@rackham.uppmax.uu.se
+# with config
+ssh rackham
+
+# without config
+scp local_file username@rackham.uppmax.uu.se:remote_folder/
+# with config
+scp local_file rackham:remote_folder/
+
+rsync ...
+sftp ...
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_client/index.html b/software/ssh_client/index.html new file mode 100644 index 000000000..e0aecff64 --- /dev/null +++ b/software/ssh_client/index.html @@ -0,0 +1,3341 @@ + + + + + + + + + + + + + + + + + + + SSH client - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + + + + + + + + + + + + + + +

SSH client

+

An SSH client is a program that allows on to use SSH.

+

Overview of SSH clients

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Operating systemSSH ClientRecommended?Allows graphics? [1]Description
LinuxsshYesYesStart from a terminal
MacOSsshYesYes [2]Start from a terminal, needs install for graphics [2]
WindowsMobaXtermYesYesEasiest for Windows users [5]
WindowsPuTTYNeutralYes [3]Needs install for graphics [3]
WindowssshNeutralUnknownStart from CMD, later Windows versions [4]
WindowssshNeutralUnknownStart from PowerShell [4]
+
    +
  • [1] The technical question is 'Allows X forwarding', + as this is the way graphical displays are allowed
  • +
  • [2] After installing XQuartz
  • +
  • [3] After installing Xming
  • +
  • [4] Untested
  • +
  • [5] MobaXterm has a built-in X server
  • +
+

Using ssh with different terminals that do not allow for graphics

+
+
+
+
    +
  • Start terminal (e.g. from Launchpad) or iTerm2 + to run ssh
  • +
+
ssh [username]@rackham.uppmax.uu.se
+
+
    +
  • where [username] is your UPPMAX username, for example ssh sven@rackham.uppmax.uu.se
  • +
+ +
    +
  • +

    iTerm2 goodies:

    +
      +
    • You can save hosts for later.
    • +
    • Drag and drop scp
    • +
    +
  • +
+
+
+
    +
  • Start a terminal (see below) to run ssh:
  • +
+
$ ssh [username]@rackham.uppmax.uu.se
+
+
    +
  • where [username] is your UPPMAX username, for example ssh sven@rackham.uppmax.uu.se
  • +
+

Terminal

+
    +
  • +

    The ssh (secure shell) client putty

    +
      +
    • You can save hosts for later.
    • +
    • No graphics.
    • +
    +
  • +
  • +

    Windows Powershell terminal can also work

    + +
  • +
  • +

    Windows command prompt can also work

    + +
  • +
  • +

    Git bash

    +
  • +
+
+
+
+

Using ssh with different terminals that allow for graphics

+
+
+
+ +
+How do I know XQuartz has been installed? +

As far as we know: you cannot check this directly: +you will have to find out by running an application of +Rackham that uses this. See below :-)

+
+
    +
  • Start terminal (e.g. from Launchpad) or iTerm2 + to run ssh:
  • +
+
$ ssh -X [username]@rackham.uppmax.uu.se
+
+

where [username] is your UPPMAX username and -X enables X forwarding. +For example, if your UPPMAX username is sven, this would be +ssh -X sven@rackham.uppmax.uu.se

+
+How do I know XQuartz has been installed? +

See SSH X forwarding.

+

Spoiler: use xeyes

+
+
+
+
    +
  • +

    Download and install ONE of the X-servers below (to enable graphics)

    + +
  • +
  • +

    or...

    +
  • +
  • +

    Install a ssh (secure shell) program with built-in X11 and sftp file manager

    +
      +
    • MobaXterm
    • +
    • sftp frame makes it easy to move, upload and download files.
    • +
    • ... though downloading from remote host to local is usually easier.
    • +
    • tabs for several sessions
    • +
    +
  • +
+ + +
$ ssh -X [username]@rackham.uppmax.uu.se
+
+

where [username] is your UPPMAX username and -X enables X forwarding. +For example, if your UPPMAX username is sven, this would be +ssh -X sven@rackham.uppmax.uu.se

+

Caption

+
    +
  • Or even better, create and save a SSH session, as shown in image below.
      +
    • This allows you to use MobaXterm as a file manager and to use the built-in graphical texteditor.
    • +
    • You can rename the session in the Bookmark settings tab.
    • +
    +
  • +
+

Caption

+
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_key_use/index.html b/software/ssh_key_use/index.html new file mode 100644 index 000000000..ae4d936d8 --- /dev/null +++ b/software/ssh_key_use/index.html @@ -0,0 +1,3110 @@ + + + + + + + + + + + + + + + + + + + Create and use an SSH key pair - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_key_use_bianca/index.html b/software/ssh_key_use_bianca/index.html new file mode 100644 index 000000000..8633bf6ab --- /dev/null +++ b/software/ssh_key_use_bianca/index.html @@ -0,0 +1,3293 @@ + + + + + + + + + + + + + + + + + + + Create an SSH key pair for use with Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create an SSH key pair for use with Bianca

+

This page describes how to create and use an SSH key +for the Bianca cluster.

+

Procedure

+

This procedure will fail if:

+
    +
  • You are outside of the university networks, + see how to get inside the university networks. + This video shows it will fail when being + outside of the university networks
  • +
  • You use Ubuntu 24.04 Noble, as demonstrated by this video, + where a password is still requested after doing this procedure + on Rackham
  • +
+

Here is the procedure:

+

1. Create an SSH key pair

+

On your local computer, create an SSH key pair with the following command:

+
+Can I also do this from Rackham? +

Yes.

+

In that case, read 'Rackham' instead of 'local computer'

+
+
ssh-keygen -a 100 -t ed25519 -f ~/.ssh/id_ed25519_uppmax_login -C "My comment"
+
+

Here is a description of the flags:

+
    +
  • -a 100: 100 rounds of key derivations, making your key's password harder to brute-force, as is recommended here
  • +
  • -t ed25519: type of encryption scheme
  • +
  • -f ~/.ssh/id_ed25519_uppmax_login: specify filename, following the naming scheme as suggested here
  • +
  • -C "My comment": a comment that will be stored in the key, so you can find out what it was for
  • +
+

2. Add the content of your public key to Bianca's authorized keys

+

Add the content of the public key id_ed25519_uppmax_login.pub +on your local computer to the Bianca's $HOME/.ssh/authorized_keys.

+

There are multiple ways to do so.

+
+Can I use ssh-copy? +

No.

+

You can not use ssh-copy.

+
+

One way is to, on your local computer, view the content of the file:

+
cat $HOME/.ssh/id_ed25519_uppmax_login.pub
+
+

Then copy that line to your clipboard.

+
+How does that look like? +
$ cat $HOME/.ssh/id_ed25519_uppmax_login.pub
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFGXV8fRK+cazt8qHX+fGS+w6WPOuE82Q19A12345678 Sven's key to UPPMAX
+
+
+

On Bianca, to edit the authorized keys file, do:

+
nano $HOME/.ssh/authorized_keys
+
+

In nano, paste the line in your clipboard. +Save the file and close nano.

+
+

The public key must be one line

+

The public key you've just copy-pasted must be one line. +It must not be wrapped/split over multiple lines.

+
+
+How can I check? +

On Bianca, do:

+
cat .ssh/authorized_keys 
+
+

You should find your public key there. It looks similar to this:

+
[sven@sens2017625-bianca ~]$ cat .ssh/authorized_keys 
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFGXV8fRK+cazt8qHX+fGS+w6WPOuE82Q19A12345678 Sven's key to UPPMAX
+
+
+

3. Set the right permissions

+

On Bianca, do:

+
chmod 700 .ssh/authorized_keys
+chmod 700 .ssh
+chmod 700 ~
+
+
+How can I check? +

You can check by doing the following and observing similar output:

+
ls -ld .ssh
+
+

Output should be:

+
drwx--S--- 2 sven sven 4096 Jan  8 10:26 .ssh
+
+

Second checkL

+
[richel@sens2017625-bianca ~]$ ls -ld .ssh/authorized_keys 
+
+

Output should be similar to:

+
-rwx------ 1 sven sven 104 Jan  8 10:26 .ssh/authorized_keys
+
+

Third check:

+
ls -l .ssh
+
+

Output should be similar to:

+
total 1
+-rw-r----- 1 user user 743 May  7  2019 authorized_keys
+
+

or

+
total 1
+-rwx------ 1 sven sven 104 Jan  8 10:26 authorized_keys
+
+
+

4. Log in to Bianca via the console using an SSH key

+

Log in to Bianca via the console using an SSH key, +using ssh -A:

+
ssh -A [username]-[project]@bianca.uppmax.uu.se
+
+

For example:

+
ssh -A sven-sens12345@bianca.uppmax.uu.se
+
+

You will still get one login, which is the one that asks your UPPMAX +password and 2FA.

+

If all worked, there will be no need anymore to again type the UPPMAX +password.

+

Troubleshooting

+

To debug, run SSH commands with the -vv flag.

+
+How does that look like? +
...
+debug1: Requesting authentication agent forwarding.
+debug2: channel 1: request auth-agent-req@openssh.com confirm 0
+...
+
+debug1: client_input_channel_open: ctype auth-agent@openssh.com rchan 2 win 65536 max 16384
+debug1: client_request_agent: bound agent to hostkey
+debug2: fd 8 setting O_NONBLOCK
+debug1: channel 2: new [authentication agent connection]
+debug1: confirm auth-agent@openssh.com
+Last login: Tue Jul 11 18:44:21 2023 from 172.18.144.254
+ _   _ ____  ____  __  __    _    __  __
+| | | |  _ \|  _ \|  \/  |  / \   \ \/ /   | System:    sens2017625-bianca
+| | | | |_) | |_) | |\/| | / _ \   \  /    | User:      user
+| |_| |  __/|  __/| |  | |/ ___ \  /  \    |
+ \___/|_|   |_|   |_|  |_/_/   \_\/_/\_\   |
+
+  ###############################################################################
+
+
+

On Linux, it still asks for a password

+

From this post +and its answer:

+

On Bianca, do:

+
chmod 700 .ssh/authorized_keys 
+chmod 700 .ssh
+chmod 700 ~
+
+

On your local computer, do:

+
chmod 700 .ssh/authorized_keys 
+chmod 700 .ssh
+chmod 700 ~
+
+ + + + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_key_use_dardel/index.html b/software/ssh_key_use_dardel/index.html new file mode 100644 index 000000000..5591b24b3 --- /dev/null +++ b/software/ssh_key_use_dardel/index.html @@ -0,0 +1,3299 @@ + + + + + + + + + + + + + + + + + + + Create and use an SSH key pair for Dardel - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create and use an SSH key pair for Dardel

+

This page describes how to create and use an SSH key +for the Dardel cluster.

+

This guide will show you:

+ +

This makes it possible for you to login to Dardel.

+

PDC has a more comprehensive guide on how to do this on various operating systems +if you want a more in-depth guide.

+
+

Warning

+
    +
  • To be able to transfer from Rackham you have to do the following steps on Rackham.
  • +
  • You can also do the steps for you local computer to be able to log in + directly from your terminal and not via Rackham.
  • +
+
+

1. How to create SSH keys

+

To create an SSH key, one needs to

+
    +
  • start generating the key
  • +
  • specify the filename
  • +
  • specify the password
  • +
+

1.1 Start generating the key

+
    +
  • Add the content of your public key id_ed25519.pub. To create a SSH key, run the following command:
  • +
+
ssh-keygen -t ed25519
+
+

This will start the creating of a SSH key using the ed25519 algorithm.

+

1.2 Specify where to save the file

+

The program will ask you where to save the file,

+
user@rackham ~ $ ssh-keygen -t ed25519
+Generating public/private ed25519 key pair.
+Enter file in which to save the key (/home/user/.ssh/id_ed25519):
+
+

If you press enter it will save the new key using the suggested name, /home/user/.ssh/id_ed25519

+

If it asks you if you want to overwrite, +you probably want to press n +since you already have one created +and might want to use that one instead. +If you overwrite it you will lose access to wherever the old key file is used, +so just run the ssh-keygen command above again and type in a new name for the file.

+
/home/user/.ssh/id_ed25519 already exists.
+Overwrite (y/n)?
+
+

1.3 Specify the password

+

The next step is to add a password to your key file. This makes sure that even if someone manages to copy your key they will not be able to use it without the password you set here. Type in a password you will remember, press enter, type it in again and press enter.

+
Enter passphrase (empty for no passphrase):
+Enter same passphrase again:
+
+

The key will now be created and you can add it to the PDC Login Portal.

+
+How does this look like? +

This is output similar to what you will see:

+
Your identification has been saved in /home/user/.ssh/id_ed25519
+Your public key has been saved in /home/user/.ssh/id_ed25519.pub
+The key fingerprint is:
+SHA256:g+rvY4HoDNlim+Bj43L3pxr56hrlwC4hzPa/yE/2YqE user@rackham
+The keys randomart image is:
++--[ED25519 256]--+
+|.o               |
+|o   .            |
+| . = .           |
+|    B ..         |
+| + * B..S        |
+|= + o =          |
+|*+.oo=..         |
+|+=oE+ B          |
+| o +*X o         |
++----[SHA256]-----+
+
+
+

2. How to add an SSH key to the PDC Login Portal

+

To add an SSH key to the PDC login portal, one needs to:

+
    +
  • Open the PDC login portal
  • +
  • Start adding a new key
  • +
  • Actually adding that public key
  • +
  • Allow the key to be used from UPPMAX
  • +
+

2.1. Open the PDC login portal

+

Go to the PDC Login Portal

+
+How does that look like? +

That will look like this:

+

Example PDC login portal without any SSH keys yet

+
+

Example PDC login portal without any SSH keys yet. +We will need to add an SSH key that allows +access from UPPMAX to PDC

+
+
+

2.2. Start adding a new key

+

Click the Add new key link:

+
+How does adding an SSH key pair look like? +

That will look like this:

+

Example of the first step of adding an SSH key pair to the PDC portal

+
+

Example of the first step of adding an SSH key pair to the PDC portal. +The 'SSH public key' is copy-pasted +from cat ~/id_ed25519_pdc.pub on Rackham. +The 'Key name' can be chosen freely. +Note that this SSH key cannot be used yet for UPPMAX, +as it only allows one IP address.

+
+
+
+How does it look like when the key is added? +

That will look like this:

+

Example PDC login portal with one key

+
+

Example PDC login portal with one key. +Note that the second column only has one IP address +and is still missing *.uppmax.uu.se.

+
+
+

2.3. Actually adding the public key

+

Here you can either upload the public part of the key file +you created before, +or you can enter the information manually.

+
+Forgot where the key was? +

Here is how to the display the SSH public key content +at the default location:

+
cat ~/.ssh/id_ed25519.pub
+
+

Else, the SSH keys are where you created them in step 1.2 :-)

+
+
+How does the content of a public SSH key look like? +

When displaying the content of a public SSH key, +it will show text like this:

+
ssh-ed25519 AAAA69Nz1C1lZkI1NdE5ABAAIA7RHe4jVBRTEvHVbEYxV8lnOQl22N+4QcUK+rDv1gPS user@rackham2.uppmax.uu.se
+
+
+

Copy the content of the SSH public key. +Paste it into the field SSH public key, +make up a name for the key so you know which computer it is on +and fill it into the field Key name.

+

2.4. Allow the key to be used from UPPMAX

+

Once you have added you key you have to +add UPPMAX as allowed to use the key. +Click on Add address for it and add *.uppmax.uu.se.

+

Address specifies which IP address(es) +are allowed to use this key +and the field is prefilled with the IP of the computer you are on at the moment.

+
+How does it look like to edit an SSH key so that can be used for UPPMAX? +

That will look like this:

+

Example of the second step of adding an SSH key pair to the PDC portal

+
+

Example of the second step of adding an SSH key pair to the PDC portal. +Here the custom address *.uppmax.uu.se is added, +so that this SSH key can be used for UPPMAX.

+
+
+
+How does it look like to have a key that can be used for UPPMAX? +

That will look like this:

+

Example PDC login portal with one key

+
+

Example PDC login portal with one key. Note the *.uppmax.uu.se +at the bottom of the second column.

+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_key_use_rackham/index.html b/software/ssh_key_use_rackham/index.html new file mode 100644 index 000000000..87fef37ce --- /dev/null +++ b/software/ssh_key_use_rackham/index.html @@ -0,0 +1,3195 @@ + + + + + + + + + + + + + + + + + + + Create and use an SSH key pair for Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Create and use an SSH key pair for Rackham

+

This page describes how to create and use an SSH key +for the Rackham cluster.

+

Procedure

+
+Prefer a video? + +
+

This figure shows the procedure:

+
flowchart TD
+  subgraph ip_inside_sunet[IP inside SUNET]
+    create[1.Create an SSH key pair]
+    add[2.Add your keys to an SSH agent]
+    copy[3.Copy the public key to Rackham]
+  end
+  create --> add
+  add --> copy
+

This procedure will fail if:

+ +

1. Create an SSH key pair

+

Create an SSH key pair with the following command:

+
ssh-keygen -a 100 -t ed25519 -f ~/.ssh/id_ed25519_uppmax_login -C "My comment"
+
+
    +
  • -a 100: 100 rounds of key derivations, making your key's password harder to brute-force, as is recommended here
  • +
  • -t ed25519: type of encryption scheme
  • +
  • -f ~/.ssh/id_ed25519_uppmax_login: specify filename, following the naming scheme as suggested here
  • +
  • -C "My comment": a comment that will be stored in the key, so you can find out what it was for
  • +
+

2. Add your keys to an SSH agent

+

Add your newly generated ed25519 key to an SSH agent:

+
ssh-add ~/.ssh/id_ed25519_uppmax_login
+
+

3. Copy the public key to Rackham

+

Copy the public key to Rackham or other server.

+
ssh-copy-id -i .ssh/id_ed25519_uppmax_login.pub [username]@rackham.uppmax.uu.se
+
+
    +
  • -i .ssh/id_ed25519_uppmax_login.pub: the identity file, the public key's filename
  • +
  • [username]@rackham.uppmax.uu.se: your UPPMAX username, for example sven@rackham.uppmax.uu.se
  • +
+

After this, you can login to Rackham without specifying a password.

+

Troubleshooting

+

On Linux, it still asks for a password

+

From this post +and its answer:

+

On Rackham, do:

+
chmod 700 .ssh/authorized_keys 
+chmod 700 .ssh
+chmod 700 ~
+
+

On your local computer, do:

+
chmod 700 .ssh/authorized_keys 
+chmod 700 .ssh
+chmod 700 ~
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/ssh_x_forwarding/index.html b/software/ssh_x_forwarding/index.html new file mode 100644 index 000000000..415f0c9f3 --- /dev/null +++ b/software/ssh_x_forwarding/index.html @@ -0,0 +1,3205 @@ + + + + + + + + + + + + + + + + + + + SSH X forwarding - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

SSH X forwarding

+

SSH X forwarding (or simply 'X forwarding') +allows one to use graphics when using an SSH client.

+

For example, this is how UPPMAX user sven would login +to Rackham using ssh with X forwarding enabled:

+
ssh -X sven@rackham.uppmax.uu.se
+
+

It is the -X that allows ssh to show graphics.

+
+What is X? +

In this context, the X window system.

+
+
+How can I verify I allow X forwarding? +

Using xyes.

+
+

UPPMAX clusters that allow SSH with X forwarding

+ + + + + + + + + + + + + + + + + + + + + +
ClusterAllows SSH with X forwarding
BiancaNo
RackhamYes
SnowyYes
+

SSH clients

+

See SSH clients.

+

Difference between ssh -X and ssh -Y

+

Adapted from this AskUbuntu answer:

+

If you need graphics, ssh -X is more secure. +However, it may be too secure for your software to run. +In that case, run ssh -Y.

+
flowchart TD
+  need_graphics[Need graphics?]
+  ssh[Using 'ssh' works]
+  try_ssh_x[Try to use 'ssh -X'. Does it work?]
+  ssh_x[Use 'ssh -X']
+  ssh_y[Use 'ssh -Y']
+
+  need_graphics --> |no| ssh
+  need_graphics --> |yes| try_ssh_x
+  try_ssh_x --> |yes| ssh_x
+  try_ssh_x --> |no| ssh_y
+
+

Flowchart to determine to use ssh or ssh -X or ssh -Y.

+
+
+Using ssh -Y? Let us know! +

If you -a user- use ssh -Y when ssh -X does not work, +let us know (see the UPPMAX support page here). +It helps us choose which option to show at these +documentation websites.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/tabix/index.html b/software/tabix/index.html new file mode 100644 index 000000000..5acf671bb --- /dev/null +++ b/software/tabix/index.html @@ -0,0 +1,3152 @@ + + + + + + + + + + + + + + + + + + + tabix - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

tabix

+

tabix is a tool.

+

Finding tabix

+

To find the versions of tabix installed, use:

+
module spider tabix
+
+
+How does that look like? +

The output may look like this:

+
[sven@rackham1 sven]$ module spider tabix
+
+----------------------------------------------------------------------------
+  tabix: tabix/0.2.6
+----------------------------------------------------------------------------
+
+     Other possible modules matches:
+        tabixpp
+
+    You will need to load all module(s) on any one of the lines below before the
+ "tabix/0.2.6" module is available to load.
+
+      bioinfo-tools
+
+    Help:
+       tabix - use tabix 0.2.6
+
+       Version 0.2.6
+
+
+
+
+----------------------------------------------------------------------------
+  To find other possible module matches execute:
+
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/tensorflow/index.html b/software/tensorflow/index.html new file mode 100644 index 000000000..5ff66af1d --- /dev/null +++ b/software/tensorflow/index.html @@ -0,0 +1,3268 @@ + + + + + + + + + + + + + + + + + + + + + + + TensorFlow - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

TensorFlow

+

TensorFlow is a library for machine learning and artificial intelligence.

+

TensorFlow is available in multiple variants:

+ +

TensorFlow as a Python package for CPU

+

TensorFlow as a Python package for CPU +that works on Rackham.

+

It is part of the python_ML_packages/[version]-cpu +modules, where [version] is a version, +for example, python_ML_packages/3.11.8-cpu.

+
+How to test TensorFlow as a Python package for CPU? +

On Rackham, load the module to get access to the library:

+
module load python_ML_packages/3.11.8-cpu
+
+

Start Python:

+
python
+
+

In Python, type:

+
import tensorflow as tf
+print(tf.test.is_gpu_available())
+
+

This should print:

+
False
+
+

The output is correct: this is the CPU version.

+
+

TensorFlow as a Python package for GPU

+

TensorFlow as a Python package for GPU that works on Bianca and Snowy.

+

It is part of the python_ML_packages/[version]-gpu +modules, where [version] is a version, +for example, python_ML_packages/3.9.5-gpu

+

⚠ You can load this package on nodes without GPU but python will not find TensorFlow!

+

If you want to work interactively and test things, first allocate resources as seen below:

+

On Snowy

+
interactive -A <proj> -n 2 -M snowy --gres=gpu:1  -t 1:00:01
+
+

On Bianca

+
interactive -A <proj> -n 1 -C gpu --gres=gpu:1 -t 01:10:00
+
+
+How to test TensorFlow as a Python package for GPU? +

Load the module to get access to the library:

+
module load python_ML_packages/3.9.5-gpu
+
+

Start Python:

+
python
+
+

In Python, type:

+
import tensorflow as tf
+print(tf.test.is_gpu_available())
+
+

This should print something like:

+
2024-03-15 14:13:02.038401: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /device:GPU:0 with 13614 MB memory:  -> device: 0, name: Tesla T4, pci bus id: 0000:08:00.0, compute capability: 7.5
+True
+
+

The output is correct: this is the GPU version.

+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/terminal/index.html b/software/terminal/index.html new file mode 100644 index 000000000..34349af6a --- /dev/null +++ b/software/terminal/index.html @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + Terminal - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Terminal

+

A terminal

+
+

A terminal.

+
+

A terminal is a program that allows you to run commands.

+
+How to copy-paste to/from a terminal? +

This depends on the terminal you use, however, +this is the most common options:

+

Press CTRL + SHIFT + C for copt, CTRL + SHIFT + V for pasting.

+
+
+What does all the stuff on the line I can type on mean? +

The text at the start of the line you can type on, +is called the command prompt.

+
+
+What is the command prompt? +

The command prompt indicates +that the terminal is waiting for user input.

+

Here is an example prompt:

+
[sven@rackham2 my_folder]$ 
+
+
    +
  • [ and ]: indicates the beginning and end of information
  • +
  • sven: the username
  • +
  • @: at which cluster
  • +
  • rackham2: the remote node's name, + in this case Rackham's second login node
  • +
  • my_folder: (part of) the path of the user, + in this case, a folder called my_folder. + The indication ~ means that the user in the home folder
  • +
  • $: indicate to be ready for user input
  • +
+

The node's name is useful to find out where you are:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
NameLocation
rackham1 to rackham4A Rackham login node
r1 and higherA Rackham compute node node
biancaA Bianca login node
b1 and higherA Bianca compute node
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/test/index.html b/software/test/index.html new file mode 100644 index 000000000..3612a9bf4 --- /dev/null +++ b/software/test/index.html @@ -0,0 +1,3120 @@ + + + + + + + + + + + + + + + + + + + Test - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/text_editors/index.html b/software/text_editors/index.html new file mode 100644 index 000000000..f75d4232e --- /dev/null +++ b/software/text_editors/index.html @@ -0,0 +1,3314 @@ + + + + + + + + + + + + + + + + + + + + + + + Text editors - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Text editors

+

There are many editors that can be used on the UPPMAX clusters:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Editor typeFeatures
Simple terminal editorsUsed in terminal, easy to learn, limited features
Advanced terminal editorsUsed in terminal, harder to learn, powerful features
Simple graphical editorsGraphical, needs X forwarding, easy to learn, limited features
Advanced graphical editorsGraphical, needs X forwarding, harder to learn, powerful features
+

Try them out and pick one favorite editor!

+
+

Tip

+

These commands are useful in the command line when something is stuck or a program is limiting you to do further work.

+
    +
  • ctrl-C interrupts a program or a command that is "stuck"
  • +
  • ctrl-D quits some programs from the program environment in the terminal
  • +
  • ctrl-Z pauses a program, can be continued in background (bg) or foreground (fg)
  • +
+
+

Simple terminal editors

+
    +
  • nano: used in terminal, easy to learn, limited features
  • +
+

Advanced terminal editors

+
+

Warning

+
    +
  • we suggest that you learn this tools before trying to work with them on UPPMAX
  • +
  • If you start one of these editors you may have difficulties to exit!
  • +
+
+ +

Simple graphical editors

+

To use a graphical editors you will need to:

+ +

See the SSH X forwarding page how to do so.

+
+And what about Bianca? +

Bianca is an UPPMAX cluster that does not allow X forwarding.

+

See the 'How to login to Bianca' page here +for more details.

+
+

gedit

+

See gedit

+

Advanced graphical editors

+

gvim

+
    +
  • vim with a GUI, lots of features, very fast
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/thinlinc/index.html b/software/thinlinc/index.html new file mode 100644 index 000000000..11739de4e --- /dev/null +++ b/software/thinlinc/index.html @@ -0,0 +1,3313 @@ + + + + + + + + + + + + + + + + + + + + + + + ThinLinc - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

ThinLinc

+

Rackham remote desktop

+
+

Remote desktop environment for Rackham, using the webbrowser login.

+
+

ThinLinc provides for a remote desktop environment for the UPPMAX clusters.

+

There are two ways of connecting to the clusters using ThinLinc, +using a local ThinLinc client or login using a webbrowser. +Here are the differences:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
ParameterLocal ThinLinc clientWeb browser login
Bianca useImpossiblePossible
Rackham useRecommendedPossible
InstallThinLinc clientNothing [1]
SimplicityEasyTrivial
PerformanceHigherLower
Recommended forMost use casesSmall tasks, when other approach fails
+
    +
  • [1] You already have a webbrowser installed :-)
  • +
+

The first is by using the web client and connect from the browser. +This can be useful for smaller tasks +or if you are unable to install software on the computer you are currently using. +Please see below for more information.

+

The second option is to download the ThinLinc client, +which offers higher performance and is recommended for most users. +The client can be downloaded from the official download page.

+ +

ThinLinc usage

+

How do I copy/paste within a ThinLinc session?"

+
    +
  • Windows/Mac: Right-click and choose, or
  • +
  • Windows:
      +
    • paste: shift+insert
    • +
    • copy: ctrl+insert
    • +
    +
  • +
+

How do I copy/paste between ThinLinc and locally?

+

The ThinLinc clipboard

+

ThinLinc has a clipboard where one can shuttle text via copy-pasting +inside/outside the ThinLinc remote desktop environment.

+
    +
  • +

    Copy in ThinLinc by the ThinLinc command (see above) and it ends up here in the ThinLinc clipboard

    +
      +
    • Mark and copy with Windows/Mac command
    • +
    • Paste locally with Windows/Mac command
    • +
    +
  • +
  • +

    Copy from locally

    +
      +
    • paste in the ThinLinc clipboard with Windows/Mac command
    • +
    • paste to ThinLinc place by the ThinLinc command (see above)
    • +
    +
  • +
+

Settings

+

Under the "Screen" tab, you can set the starting size of the session +and choose to enable/disable Full screen mode. +Typically, users prefer to turn off full screen mode.

+

Normally you don't have to change anything else here, +and we have also disabled all "local devices" (USB-sticks, sound and printers) +on server side. So no point to fiddle with these specific options.

+

ThinLinc error: no agent server available

+

ThinLinc error: no agent server available

+
+

ThinLinc error: no agent server available

+
+

Try again :-)

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/thinlinc_on_bianca/index.html b/software/thinlinc_on_bianca/index.html new file mode 100644 index 000000000..4370ed3cc --- /dev/null +++ b/software/thinlinc_on_bianca/index.html @@ -0,0 +1,3110 @@ + + + + + + + + + + + + + + + + + + + ThinLinc on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/thinlinc_on_rackham/index.html b/software/thinlinc_on_rackham/index.html new file mode 100644 index 000000000..d65cf37bd --- /dev/null +++ b/software/thinlinc_on_rackham/index.html @@ -0,0 +1,3149 @@ + + + + + + + + + + + + + + + + + + + ThinLinc on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

ThinLinc on Rackham

+

Rackham's remote desktop environment via webbrowser

+
+

Rackham's remote desktop environment accessed via a webbrowser

+
+

ThinLinc provides for a remote desktop environment +for the UPPMAX clusters. +This page describes how to use ThinLinc on Rackham.

+

There are two ways of connecting to the clusters using ThinLinc: +using a local ThinLinc client or login using a webbrowser. +See ThinLinc for a comparison.

+

Local ThinLinc client

+

Rackham's remote desktop environment via a local ThinLinc client

+
+

Rackham's remote desktop environment accessed via a local ThinLinc client

+
+

See the UPPMAX page 'Login to the Rackham remote desktop environment using a local ThinLinc client'.

+

Web browser login

+

Rackham's remote desktop environment via webbrowser

+
+

Rackham's remote desktop environment accessed via a webbrowser

+
+

See the UPPMAX page 'Login to the Rackham remote desktop environment website'.

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/tkinter/index.html b/software/tkinter/index.html new file mode 100644 index 000000000..b1683118b --- /dev/null +++ b/software/tkinter/index.html @@ -0,0 +1,3174 @@ + + + + + + + + + + + + + + + + + + + Tkinter - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Tkinter

+

Tkinter is a package built with (every!) Python executable.

+

Use Tkinter

+

Load a Python module:

+
module load python/3.12.1
+
+

Start Python:

+
python
+
+

Import thkinter in Python:

+
import tkinter
+
+

History

+

In January 2024, there was a Tkinter UPPMAX ticket. +and documentation how to load tkinter.

+

At that time, doing:

+
module load python/3.11.4
+
+

and then in Python:

+
import turtle
+
+

results in:

+
Traceback (most recent call last):
+  File "<string>", line 1, in <module>
+  File "/sw/comp/python3/3.11.4/rackham/lib/python3.11/turtle.py", line 107, in <module>
+    import tkinter as TK
+  File "/sw/comp/python3/3.11.4/rackham/lib/python3.11/tkinter/__init__.py", line 38, in <module>
+    import _tkinter # If this fails your Python may not be configured for Tk
+    ^^^^^^^^^^^^^^^
+ModuleNotFoundError: No module named '_tkinter'
+
+

With the application experts, we found out that python version 3.11.4 +did not have tkinter built in. That Python version was rebuilt. +Now all that is needed is to load a Python version and do a regular pip install. +That is, this solution should work:

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/tracer/index.html b/software/tracer/index.html new file mode 100644 index 000000000..9c1635fff --- /dev/null +++ b/software/tracer/index.html @@ -0,0 +1,3298 @@ + + + + + + + + + + + + + + + + + + + + + + + Tracer - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Tracer

+

Tracer is a tool to analyse the results of a +BEAST or BEAST2 run.

+

Tracer is not an UPPMAX module.

+

Instead, it needs to be download and run:

+

1. Download

+

Pick a Tracer release, +such as Tracer v1.7.2 +and download the Linux/UNIX version.

+
+How does that look like? +

Here is how the release page of Tracer v1.7.2 +looks like:

+

Tracer

+
+

Tracer

+
+

Download the file Tracer_v1.7.2.tgz.

+
+
+How to download from the command-line? +

Use wget on the URL to download from, for example:

+
wget https://github.com/beast-dev/tracer/releases/download/v1.7.2/Tracer_v1.7.2.tgz
+
+
+

2. Extract

+

Extract the downloaded file.

+
+How to do so, using the remote desktop environment? +

Right-click the file and click 'Extract here'.

+

Click 'Extract here' in this pop-up menu

+
+
+How to do so, using the console environment? +

Use tar on the file to extract:

+
tar zxvf  Tracer_v1.7.2.tgz
+
+
+

3. Run

+

Use java to run the Tracer jar file:

+
java -jar lib/tracer.jar
+
+
+How does that look like? +

Here is how Tracer looks like in a console environment:

+

Tracer in a console environment

+
+

Tracer in a console environment

+
+

For this to work, one needs to login using +SSH with X forwarding enabled.

+

Spoiler: use ssh -X

+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/transit_file_transfer_using_filezilla/index.html b/software/transit_file_transfer_using_filezilla/index.html new file mode 100644 index 000000000..11da9ddef --- /dev/null +++ b/software/transit_file_transfer_using_filezilla/index.html @@ -0,0 +1,3238 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Transit using FileZilla - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Transit using FileZilla

+

There are multiple ways to transfer files to/from Transit using a graphical tool

+

Here it is shown how to transfer files using a graphical tool called FileZilla.

+
+What is Transit? +

See the page about the UPPMAX Transit server.

+
+

Procedure

+

FileZilla connected to Transit

+
+

FileZilla connected to Transit

+
+
+Would you like a video? +

If you like to see how to do file transfer from/to Transit +using FileZilla, watch the video +here

+
+

FileZilla is a secure file transfer tool that works under Linux, Mac and Windows.

+

To transfer files to/from Transit using FileZilla, do:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start FileZilla

+

3. From the menu, select 'File | Site manager'

+
+Where is that? +

It is here:

+

From the menu, select 'File | Site manager'

+
+

The FileZilla 'File' menu contains the item 'Site manager'

+
+
+

4. Click 'New site'

+
+Where is that? +

It is here:

+

Click 'New site'

+
+

5. Create a name for the site

+

Create a name for the site, e.g. Transit.

+

6. Setup the site

+

For that site, use all standards, except:

+
    +
  • Set protocol to 'SFTP - SSH File Transfer Protocol'
  • +
  • Set host to transit.uppmax.uu.se
  • +
  • Set user to [username], e.g. sven
  • +
+
+How does that look like? +

It looks similar to this:

+

Setup the site done

+
+

7. Click 'Connect'

+

8. You will be asked for your password

+

You will be asked for your password, hence +type [your password], e.g. VerySecret. +You can save the password.

+
+How does that look like? +

It looks similar to this:

+

Asked for your password

+
+

9. Transfer files between local and Transit

+

Now you can transfer files between your local computer and Transit.

+
+How does that look like? +

It looks like this:

+

Transfer files between local and Transit

+
+

Where do my files end up?

+

They seem to end up in your Transit home folder.

+

Its location is at /home/[user_name], +for example, at /home/sven.

+

However, this is not the case: +upon closing FileZilla, +the files you've uploaded are gone.

+

You do need to transfer these files to other HPC clusters +before closing FileZilla. +For detailed instructions, see the guides at the respective cluster, among others:

+ +

Extra material

+

WinSCP

+

WinSCP is a secure file transfer tool that works under Windows.

+

To transfer files to/from Transit using WinSCP, do:

+
    +
  • Start WinSCP
  • +
  • Create a new site
  • +
  • For that site, use all standards, except:
      +
    • Set file protocol to 'SFTP'
    • +
    • Set host name to transit.uppmax.uu.se
    • +
    • Set user name to [username], e.g. sven
    • +
    +
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/transit_file_transfer_using_scp/index.html b/software/transit_file_transfer_using_scp/index.html new file mode 100644 index 000000000..d548f954a --- /dev/null +++ b/software/transit_file_transfer_using_scp/index.html @@ -0,0 +1,3172 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Transit using SCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Transit using SCP

+

Data transfer to/from Transit using SCP +is one of the ways ways to transfer files to/from Transit.

+
+What is Transit? +

Transit is an UPPMAX service to send files around. +It is not a file server.

+

See the page about Transit for more detailed information.

+
+
+What are the other ways to transfer files from/to Transit? +

Other ways to transfer data to/from Transit are described here

+
+

One cannot transfer files to/from Transit using SCP. +SCP is an abbreviation of 'Secure copy protocol', +however, it is not considered 'secure' anymore: +instead it is considered an outdated protocol. +The program scp allows you to transfer files to/from Transit using SCP, +by coping them between your local computer and Transit.

+

How to transfer files between a local computer and Transit

+

The process is:

+

1. Get inside SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start a terminal on your local computer

+

Start a terminal on your local computer

+

3a. Using scp to download from Transit

+

In the terminal, copy files using scp to download files from Transit:

+
scp [username]@transit.uppmax.uu.se:/home/[username]/[remote_filename] [local_folder]
+
+

where [remote_filename] is the path to a remote filename, +[username] is your UPPMAX username, +and [local_folder] is your local folder, for example:

+
scp sven@transit.uppmax.uu.se:/home/sven/my_remote_file.txt /home/sven
+
+

If asked, give your UPPMAX password.

+

You can get rid of this prompt if you have setup SSH keys

+

⛔ 3b. Using scp to upload to Transit

+

This is how you would copy a file from your local computer to Transit:

+
scp [local_filename] [username]@transit.uppmax.uu.se:/home/[username]
+
+

where [local_filename] is the path to a local filename, +and [username] is your UPPMAX username, for example:

+
scp my_file.txt sven@transit.uppmax.uu.se:/home/sven
+
+

However, Transit is not a file server. +The scp command will complete successfully, +yet the file will not be found on Transit.

+

If asked, give your UPPMAX password. +You can get rid of this prompt if you have setup SSH keys

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/transit_file_transfer_using_sftp/index.html b/software/transit_file_transfer_using_sftp/index.html new file mode 100644 index 000000000..e76fa4c5f --- /dev/null +++ b/software/transit_file_transfer_using_sftp/index.html @@ -0,0 +1,3232 @@ + + + + + + + + + + + + + + + + + + + Data transfer to/from Transit using SFTP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Data transfer to/from Transit using SFTP

+

Data transfer to/from Transit using SFTP +is one of the ways ways to transfer files to/from Transit.

+
+What is Transit? +

See the page about the UPPMAX Transit server.

+
+
+What are the other ways? +

Other ways to transfer data to/from Transit are described here

+
+

One can transfer files to/from Transit using SFTP. +SFTP is an abbreviation of 'SSH File Transfer Protocol', +where 'SSH' is an abbreviation of 'Secure Shell protocol' +The program sftp allows you to transfer files to/from Transit using SFTP.

+

Using SFTP

+

The procedure is described in the following steps.

+

1. Get inside SUNET

+

Get inside SUNET.

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start a terminal on your local computer

+

Start a terminal on your local computer.

+

3. Connect sftp to Transit

+

In the terminal, connect sftp to Transit by doing:

+
sftp [username]@transit.uppmax.uu.se
+
+

where [username] is your UPPMAX username, for example:

+
sftp sven@transit.uppmax.uu.se
+
+

If asked, give your UPPMAX password. +You can get rid of this prompt if you have setup SSH keys.

+

5. In sftp, upload/download files to/from Transit

+

In sftp, upload/download files to/from Transit.

+

For example, to upload a file to Transit:

+
put my_file.txt
+
+

Basic sftp command can be found here.

+

Where do my files end up?

+

They seem to end up in your Transit home folder.

+

Its location is at /home/[user_name], +for example, at /home/sven.

+

However, this is not the case: +upon closing sftp, +the files you've uploaded are gone.

+

You do need to transfer these files to other HPC clusters +before closing sftp. +For detailed instructions, see the guides at the respective cluster, among others:

+ +

Overview

+
flowchart TD
+
+    %% Give a white background to all nodes, instead of a transparent one
+    classDef node fill:#fff,color:#000,stroke:#000
+
+    %% Graph nodes for files and calculations
+    classDef file_node fill:#fcf,color:#000,stroke:#f0f
+    classDef calculation_node fill:#ccf,color:#000,stroke:#00f
+
+    user(User)
+      user_local_files(Files on user computer):::file_node
+
+    subgraph sub_inside[SUNET]
+      subgraph sub_transit_shared_env[Transit]
+          login_node(login/calculation/interactive node):::calculation_node
+          files_in_transit_home(Files in Transit home folder):::file_node
+      end
+    end
+
+    %% Shared subgraph color scheme
+    %% style sub_outside fill:#ccc,color:#000,stroke:#ccc
+    style sub_inside fill:#fcc,color:#000,stroke:#fcc
+    style sub_transit_shared_env fill:#ffc,color:#000,stroke:#ffc
+
+    user --> |logs in |login_node
+    user --> |uses| user_local_files
+
+    login_node --> |can use|files_in_transit_home
+    %% user_local_files <--> |graphical tool|files_in_transit_home
+    %% user_local_files <--> |SCP|files_in_transit_home
+    user_local_files <==> |SFTP|files_in_transit_home
+
+    %% Aligns nodes prettier
+    user_local_files ~~~ login_node
+
+

Overview of file transfer on Transit +The purple nodes are about file transfer, +the blue nodes are about 'doing other things'. +The user can be either inside or outside SUNET.

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/transit_file_transfer_using_winscp/index.html b/software/transit_file_transfer_using_winscp/index.html new file mode 100644 index 000000000..b1c067016 --- /dev/null +++ b/software/transit_file_transfer_using_winscp/index.html @@ -0,0 +1,3144 @@ + + + + + + + + + + + + + + + + + + + File transfer to/from Transit using WinSCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

File transfer to/from Transit using WinSCP

+

There are multiple ways to transfer files to/from Transit using a graphical tool

+

Here it is shown how to transfer files using a graphical tool called WinSCP.

+
+What is Transit? +

See the page about the UPPMAX Transit server.

+
+
+What are the other ways? +

Other ways to transfer data to/from Transit are described here

+
+

Procedure

+

WinSCP is a secure file transfer tool that works under Windows.

+

To transfer files to/from Transit using WinSCP, do:

+
    +
  • Start WinSCP
  • +
  • Create a new site
  • +
  • For that site, use all standards, except:
      +
    • Set file protocol to 'SFTP'
    • +
    • Set host name to transit.uppmax.uu.se
    • +
    • Set user name to [username], e.g. sven
    • +
    +
  • +
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/uquota/index.html b/software/uquota/index.html new file mode 100644 index 000000000..98afe22f8 --- /dev/null +++ b/software/uquota/index.html @@ -0,0 +1,3196 @@ + + + + + + + + + + + + + + + + + + + uquota - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

uquota

+

uquota is an UPPMAX tool to determine how much storage space +is left in all projects.

+

See the help file:

+
uquota --help
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham1 ~]$ uquota --help
+usage: uquota [-h] [-q] [-d] [-u USER] [-p PROJECTS_FILE] [--include-expired]
+              [--random-usage] [--only-expired] [--sort-by-col SORT_BY_COL]
+              [-s] [-f]
+
+optional arguments:
+  -h, --help            Ask for help
+  -q, --quiet           Quiet, abbreviated output
+  -d, --debug           Include debug output
+  -u USER, --user USER
+  -p PROJECTS_FILE, --projects-file PROJECTS_FILE
+  --include-expired     Include expired projects
+  --random-usage        removed option, don't use
+  --only-expired        Only show expired projects
+  --sort-by-col SORT_BY_COL
+                        Index (0-4) of column to sort by. Default is 0.
+  -s, --slow            Deprecated. Previously ran 'du' command
+  -f, --files           Reports on number of files. Only for home directories
+
+
+

Usage:

+
uquota
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham3 ~]$ uquota
+Your project     Your File Area       Unit        Usage  Quota Limit  Over Quota
+---------------  -------------------  -------  --------  -----------  ----------
+home             /home/sven           GiB          24.7           32
+home             /home/sven           files       79180       300000
+naiss2024-22-49  /proj/worldpeace     GiB           5.1          128
+naiss2024-22-49  /proj/worldpeace     files       20276       100000
+
+
+

If you find out that your home folder is full, +but do not know which folder takes up most space, +use the command below to find it:

+
du --human --max-depth 1 .
+
+
+How does that look like? +

Your output will be similar to this:

+
[sven@rackham2 ~]$ du --human --max-depth 1 .
+28K ./bin
+52M ./.config
+8.0K ./glob
+1.5G ./users
+484K ./.ssh
+9.7M ./.lmod.d
+514M ./.gradle
+4.0K ./.oracle_jre_usage
+84K ./.pki
+3.2G ./.singularity
+4.0K ./.git-credential-cache
+8.0K ./.keras
+6.1G ./.cache
+344M ./R
+740K ./.local
+8.0K ./.nv
+32M ./.nextflow
+88K ./.r
+140K ./.dbus
+48K ./.subversion
+8.0K ./.gnupg
+480K ./.java
+8.0K ./.vscode-oss
+29M ./.mozilla
+41M ./private
+64K ./.ipython
+8.0K ./.rstudio-desktop
+4.0K ./.allinea
+8.8M ./.beast
+688K ./.gstreamer-0.10
+8.4G ./.apptainer
+4.0K ./my_best_folder
+3.7G ./GitHubs
+260K ./.kde
+24K ./.jupyter
+849M ./.conda
+4.7M ./lib
+176M ./.vscode-server
+16K ./.MathWorks
+8.2M ./.matlab
+25G .
+
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/valgrind/index.html b/software/valgrind/index.html new file mode 100644 index 000000000..3f41f3506 --- /dev/null +++ b/software/valgrind/index.html @@ -0,0 +1,3156 @@ + + + + + + + + + + + + + + + + + + + Valgrind - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Valgrind

+

There are multiple profilers +available on UPPMAX. +This page describes Valgrind.

+

Valgrind is a suite of simulation-based debugging and profiling tools for programs.

+

Valgrind contains several tools:

+
    +
  • memcheck, for detecting memory-management problems in your program
  • +
  • cachegrind, for cache profiling
  • +
  • helgrind, finds data races in multithreaded programs
  • +
  • callgrind, a call graph profiler
  • +
  • drd, a thread error detector
  • +
  • massif, a heap profiler
  • +
  • ptrcheck, a pointer checking tool
  • +
  • lackey, a simple profiler and memory tracer
  • +
+

Valgrind works best with the GCC and Intel compilers.

+

There is a system valgrind-3.15.0 from 2020.

+

First load compiler:

+
module load gcc
+
+

or

+
module load intel
+
+

then you can use valgrind by:

+
valgrind [options] ./your-program [your programs options]
+
+

How to use valgrind with MPI programs

+

Load your compiler, openmpi and the valgrind module as before:

+
module load gcc/10.3.0 openmpi/3.1.6
+
+

or

+
module load intel/20.4 openmpi/3.1.6
+
+

As of now, Valgrind seems not compatible with openmpi/4.X.X.

+

Then run:

+
LD_PRELOAD=$VALGRIND_MPI_WRAPPER
+mpirun -np 2 valgrind ./your-program
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vartrix/index.html b/software/vartrix/index.html new file mode 100644 index 000000000..c9dbe4804 --- /dev/null +++ b/software/vartrix/index.html @@ -0,0 +1,3168 @@ + + + + + + + + + + + + + + + + + + + VarTrix - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VarTrix

+

VarTrix is 'a software tool for extracting single cell variant information +from 10x Genomics single cell data' (as quoted from the VarTrix repository).

+

To use VarTrix on an UPPMAX cluster, do

+
module load bioinfo-tools
+
+

After this, search for the module +of your favorite Vartrix version, using:

+
module spider vartrix
+
+
+How does that look like? +

The output will look similar to:

+
[sven@rackham3 vartrix]$ module spider vartrix
+
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+  vartrix: vartrix/1.1.22
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+
+    You will need to load all module(s) on any one of the lines below before the "vartrix/1.1.22" module is available to load.
+
+      bioinfo-tools
+
+    Help:
+      vartrix - use vartrix 
+
+      Description
+
+      Single-Cell Genotyping Tool
+
+      Version 1.1.22
+
+      https://github.com/10XGenomics/vartrix
+
+      Usage:
+
+          Example:
+
+          vartrix --bam $VARTRIX_TEST/test_dna.bam \
+                  --cell-barcodes $VARTRIX_TEST/dna_barcodes.tsv \
+                  --fasta $VARTRIX_TEST/test_dna.fa  \
+                  --vcf $VARTRIX_TEST/test_dna.vcf
+
+
+

Then load your favorite version, for example:

+
module load vartrix/1.1.22
+
+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/venv_on_rackham/index.html b/software/venv_on_rackham/index.html new file mode 100644 index 000000000..b6b9a99ce --- /dev/null +++ b/software/venv_on_rackham/index.html @@ -0,0 +1,3106 @@ + + + + + + + + + + + + + + + + + + + venv on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

venv on Rackham

+
+Want to see a video? +
+

You can find the video 'How to use a Python venv on the Rackham UPPMAX cluster' + here

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vim/index.html b/software/vim/index.html new file mode 100644 index 000000000..b9e0c6624 --- /dev/null +++ b/software/vim/index.html @@ -0,0 +1,3114 @@ + + + + + + + + + + + + + + + + + + + vim - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

vim

+

UPPMAX has multiple text editors available. +This page describes the vim text editor.

+

vim is an advanced terminal editor that is fast fast and powerful, once you learn it.

+

Start vim on a terminal with:

+
vi
+
+

Then:

+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscode/index.html b/software/vscode/index.html new file mode 100644 index 000000000..e95dd5002 --- /dev/null +++ b/software/vscode/index.html @@ -0,0 +1,3163 @@ + + + + + + + + + + + + + + + + + + + + + + + VSCode - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

VSCode

+

Visual Studio Code ('VSCode') is an IDE +that can be used for software development in many languages.

+

VSCode from a local computer working on Rackham.

+
+

VSCode from a local computer working on Rackham.

+
+

If you can use VSCode, depends on the HPC cluster:

+ + + + + + + + + + + + + + + + + + + + +
ClusterWorks/failsDocumentation page
BiancaFails [1]VSCode on Bianca
RackhamWorksVSCode on Rackham
+ + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscode_on_bianca/index.html b/software/vscode_on_bianca/index.html new file mode 100644 index 000000000..e062e7372 --- /dev/null +++ b/software/vscode_on_bianca/index.html @@ -0,0 +1,3162 @@ + + + + + + + + + + + + + + + + + + + Using Visual Studio Code on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using Visual Studio Code on Bianca

+
+

VSCode fails, use VSCodium instead

+

The approach below will fail +(note that using VSCode on Rackham does work).

+

Instead, go to the page Using VSCodium on Bianca

+
+

Introduction

+

There are multiple IDEs on Bianca, +among other VSCodium. +Here we discuss that running VSCode +on Bianca will fail.

+

Visual Studio Code ('VSCode') is an IDE +that can be used for software development in many languages.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to use VSCode on Bianca.

+

Procedure to start VSCode

+

1. Install VSCode on your local computer

+

2. Start VSCode on your local computer

+

Start VSCode on your local computer

+

3. In VSCode, install the VSCode 'Remote Tunnels' plugin

+

In VSCode, install the VSCode 'Remote Tunnels' plugin

+

In VSCode, installed the VSCode 'Remote Tunnels' plugin

+

4. In VSCode, connect to Bianca

+

In VSCode, at the 'Remote Explorer' tab, click on 'SSH', +then on 'New Remote'.

+

In VSCode, connect to Bianca

+
+

This is the step that fails

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscode_on_rackham/index.html b/software/vscode_on_rackham/index.html new file mode 100644 index 000000000..6c50ce9f7 --- /dev/null +++ b/software/vscode_on_rackham/index.html @@ -0,0 +1,3235 @@ + + + + + + + + + + + + + + + + + + + Connecting Visual Studio Code to Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Connecting Visual Studio Code to Rackham

+

VSCode from a local computer working on Rackham.

+
+

VSCode from a local computer working on Rackham.

+
+

Introduction

+

Visual Studio Code ('VSCode') is an IDE +that can be used for software development in many languages.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to connect VSCode on your local computer +to work with your files on Rackham.

+

Procedure

+

Below is a step-by-step procedure to start VSCode.

+
+Prefer a video? +

See this YouTube video.

+

An older version of this procedure, where the 'Remote Tunnel' +extension is used, can be seen in this YouTube video.

+
+

1. Install VSCode on your local computer

+

Install VSCode on your local computer.

+

2. Start VSCode on your local computer

+
+How does that look like? +

Start VSCode on your local computer

+
+

3. In VSCode, install the VSCode 'Remote-SSH' plugin

+

In VSCode, install the VSCode 'Remote-SSH' plugin.

+
+How does that look like? +

Install the VSCode 'Remote-SSH' plugin

+
+

4. In the 'Remote Explorer' tab, at SSH, click the plus

+

In VSCode, go to the 'Remote Explorer' tab. +At the SSH section, click on the '+' (with tooltip 'New remote').

+
+How does that look like? +

Click on the plus

+
+

5. Give the SSH command to connect to Rackham

+

In the main edit bar, give the SSH command to connect to Rackham, +e.g. ssh sven@rackham.uppmax.uu.se

+
+How does that look like? +

Type the SSH command

+
+

6. Pick the a location for the SSH config file

+

In the dropdown menu, pick the a location for the SSH config file, +e.g. the first, which is similar to /home/sven/.ssh/config.

+
+How does that look like? +

Type a location for an SSH config file

+
+

7. Click 'Connect'

+

In the bottom left of VSCode, click on the popup window 'Connect'.

+
+How does that look like? +
+

Click on 'Connect'

+

8. Done

+

You are now connected: there is a new window with VSCode connected to Rackham.

+
+How does that look like? +

The window that is connected to a Rackham home folder:

+

Connected to Rackham in subwindow

+

Going to /proj/staff:

+

Connected to Rackham's project folder

+
+

Setting up VSCode for Rackham and Snowy

+
+

Info

+
    +
  • You can run VSCode on your local and still be able to work with modules loaded or environment created on Rackham.
  • +
  • Similarly it is possible to take advantage of Snowy GPUs meanwhile developing on your local computer.
  • +
+
+

1. Connect your local VSCode to VSCode server running on Rackham

+

Perform steps mentioned under the section Procedure to start VSCode.

+

When you first establish the ssh connection to Rackham, your VSCode server directory .vscode-server will be created in your home folder /home/[username].

+

2. Install and manage Extensions on remote VSCode server

+

By default all the VSCode extensions will get installed on your home folder /home/[username]. Due to less storage quota on home folder 32 GB, 300k files, can quickly fill up with extensions and other file operations. The default installation path for VSCode extensions can however be changed to your project folder which have way more storage space and file count capacity, 1TB, 1M files.

+

2.1. Manage Extensions

+

Go to Command Palette Ctrl+Shift+P or F1. Search for Remote-SSH: Settings and then go to Remote.SSH: Server Install Path. Add Item as remote host rackham.uppmax.uu.se and Value as folder in which you want to install all your data and extensions /proj/uppmax202x-x-xx/nobackup (without a trailing slash /).

+

If you already had your vscode-server running and storing extensions in home directory. Make sure to kill the server by selecting Remote-SSH: KIll VS Code Server on Host on Command Palette and deleting the .vscode-server directory in your home folder.

+

2.2. Install Extensions

+

You can sync all your local VSCode extensions to the remote server after you are connected with VSCode server on Rackham by searching for Remote: Install Local Extensions in 'SSH: rackham.uppmax.uu.se' in Command Palette. You can alternatively, go to Extensions tab and select each individually.

+

2.3. Selecting Kernels

+

Request allocation in either Rackham or Snowy compute node depending on your need, for that use interactive slurm command. Load the correct module on Rackham/Snowy that you contains the interpret you want on your VSCode. For example in case you need ML packages and python interpreter, do module load python_ML_packages. Check the file path for python interpreter by checking which python and copy this path. Go to Command Palette Ctrl+Shift+P or F1 on your local VSCode. Search for "interpreter" for python, then paste the path of your interpreter/kernel.

+

venv or conda environments are also visible on VSCode when you select interpreter/kernel for python or jupyter server. For jupyter, you need to start the server first, check Point 3.

+

3. Working with jupyter server on Rackham and snowy

+

Rackham

+

Module load jupyter packages either from module load python or module load python_ML_packages as per your needs. For heavy compute and longer running jupyter server, allocate a Rackham compute node instead of using login node. Either request for rackham compute node by using, for example, interactive -A uppmax202x-x-xx -p node -N 1 -t 2:00:00 or move to the next step to run jupyter on login node itself. Start the jupyter server jupyter notebook --ip 0.0.0.0 --no-browser. Copy the jupyter server URL which goes something like http://r52.uppmax.uu.se:8888/tree?token=xxx, click on Select Kernel on VSCode and select Existing Jupyter Server. Past the URL here and confirm your choice.

+

Snowy

+

Start an interactive session with GPU allocation on Snowy interactive -A uppmax202x-x-xx -p node -N 1 -t 02:00:00 --gres=gpu:1 -M snowy. Module load the jupyter packages module load python_ML_packages and start the jupyter server jupyter notebook --ip 0.0.0.0 --no-browser. This should start a jupyter server on Snowy compute node with one T4 GPU. Copy the URL of the running jupyter server which goes something like http://s193.uppmax.uu.se:8888/tree?token=xxx and paste it in the jupyter kernel path on your local VSCode. The application will automatically perform port forwarding to Rackham, which already is listening to Snowy compute nodes over certain ports.

+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscodium/index.html b/software/vscodium/index.html new file mode 100644 index 000000000..f97be8952 --- /dev/null +++ b/software/vscodium/index.html @@ -0,0 +1,3166 @@ + + + + + + + + + + + + + + + + + + + + + + + VSCodium - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscodium_on_bianca/index.html b/software/vscodium_on_bianca/index.html new file mode 100644 index 000000000..4658403e5 --- /dev/null +++ b/software/vscodium_on_bianca/index.html @@ -0,0 +1,3192 @@ + + + + + + + + + + + + + + + + + + + Using VSCodium on Bianca - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using VSCodium on Bianca

+

VSCodium running on Bianca

+
+

VSCodium running on Bianca

+
+

Introduction

+

There are multiple IDEs on Bianca, +among other VSCodium. +Here we discuss how to run VSCodium +on Bianca.

+

VSCodium is the community edition of Visual Studio Code +and can be used for software development in many languages.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to use VSCodium on Bianca, +using Bianca's remote desktop environment.

+
+Forgot how to login to a remote desktop environment? +

See the 'Logging in to Bianca' page.

+
+

As VSCodium is a resource-heavy program, +it must be run on an interactive node.

+

Procedure to start VSCodium

+

Below is a step-by-step procedure to start RStudio. +This procedure is also demonstrated in this YouTube video.

+

1. Get within SUNET

+
+Forgot how to get within SUNET? +

See the 'get inside the university networks' page here

+
+

2. Start the Bianca remote desktop environment

+
+Forgot how to start Bianca's remote desktop environment? +

See the 'Logging in to Bianca' page.

+
+

3. Start an interactive session

+

Within the Bianca remote desktop environment, start a terminal. +Within that terminal, start an interactive session with 1 core.

+
+Forgot how to start an interactive node? +

See the 'Starting an interactive node' page.

+

Spoiler: use:

+
interactive -A sens2023598 -n 1 -t 8:00:00
+
+
+

4. Load the modules needed

+

VSCodium needs the VSCodium/latest module.

+

In the terminal of the interactive session, do:

+
module load VSCodium/latest`
+
+

5. Start VSCodium

+

With the modules loaded, +in that same terminal, +start VSCodium:

+
code
+
+

VSCodium starts up quickly.

+
+How does VSCodium look on Bianca? +

VSCodium running on Bianca

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/vscodium_on_rackham/index.html b/software/vscodium_on_rackham/index.html new file mode 100644 index 000000000..a97dfd7c2 --- /dev/null +++ b/software/vscodium_on_rackham/index.html @@ -0,0 +1,3187 @@ + + + + + + + + + + + + + + + + + + + Using VSCodium on Rackham - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Using VSCodium on Rackham

+

VSCodium on another cluster, as VSCodium on Rackham fails

+
+

VSCodium on another cluster, as VSCodium on Rackham fails

+
+
+

VSCodium fails, use VSCode instead

+

The approach below will fail +(note that using VSCodium on Bianca does work).

+

Instead, go to the page Using VSCode on Rackham

+
+

Introduction

+

VSCodium is the community edition of Visual Studio Code +and can be used for software development in many languages.

+
+What is an IDE? +

See the page on IDEs.

+
+

In this session, we show how to use VSCodium on Rackham, +using Rackham's remote desktop environment.

+
+Forgot how to login to a remote desktop environment? +

See the 'Logging in to Rackham' page.

+
+

As VSCodium is a resource-heavy program, +it must be run on an interactive node.

+

Procedure to start VSCodium

+

1. Start the Rackham remote desktop environment

+
+Forgot how to start Rackham's remote desktop environment? +

See the 'Logging in to Rackham' page.

+
+

2. Start an interactive session

+

Within the Rackham remote desktop environment, start a terminal. +Within that terminal, start an interactive session with 1 core.

+
+Forgot how to start an interactive node? +

See the 'Starting an interactive node' page.

+

Spoiler: use:

+
interactive -A uppmax2023-2-25
+
+
+

3. Load the modules needed

+

VSCodium needs the VSCodium/latest module.

+

In the terminal of the interactive session, do:

+
module load VSCodium/latest`
+
+

4. Start VSCodium

+

With the modules loaded, +in that same terminal, +start VSCodium:

+
code
+
+

VSCodium will give an error?

+
+How does the VSCodium error look on Rackham? +

VSCodium on Rackham fails

+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/whisper/index.html b/software/whisper/index.html new file mode 100644 index 000000000..a2a013d07 --- /dev/null +++ b/software/whisper/index.html @@ -0,0 +1,3608 @@ + + + + + + + + + + + + + + + + + + + + + + + Whisper - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Whisper

+

Introduction

+

This guide provides instructions for loading and using OpenAI's Whisper, an +automatic speech recognition system. Whisper is available on Bianca. It can either be used through a User Interface or loaded as a Module.

+
+

AI tool caution

+

Like all other AI models, Whisper too hallucinates while transcribing or translating. ie, "make-up" words or even sentences, resulting in misinterpretation or misrepresentation of the speaker.

+
+
+Quality of transcriptions/ translations and audio formats +

Transcriptions (error rate):- Swedish: ~10% , English: ~5% , English with heavy accent: ~ 20%

+

Translations:- Any to English: "DeepL" level performance. Slightly better than google translate.

+

Supported file types: mp3, mp4, mpeg, mpga, m4a, wav, webm and wma.

+

Quality as a factor of duration of recordings:-
+A few minutes: Excellent
+A few minutes to an hour: Excellent at the beginning, then detoriates. +An hour or more: Excellent at the beginning, then detoriates.

+

Quality as a factor of noise and count of speakers:-
+2 speakers: Excellent
+Background noise: Good
+2+ speakers: Very Good
+Conversational overlap: Average. Difficulty disambiguating speakers.
+Long silences: Good. Might repeat sentences and get stuck in loop.

+

Whisper also tries to give separate sentences for different speakers. But it is not guaranteed.

+
+

Recordings from Dictaphone

+

If you record using dictaphone such as Olympus DS-9000, it would by default record in .DS or .DS2 file formats which are NOT supported by Whisper.
+Make sure to change the settings on the dictaphone to .mp3 format before you start recording.
+Follow this guide to convert your DS or .DS2 recording to .mp3 using the software that comes with your dictaphone. Else, you can also download the sofware from here and then follow the same guide.

+
+
+

Glossary

+

SUPR account : Gives access to project management account for submitting project proposals on SUPR.
+UPPMAX account : Gives access to UPPMAX servers, like Bianca.
+GUI : Graphical User Interface for taking transcription/translation inputs.
+WinSCP / FileZilla: user interface to send data from your computer to Bianca and vice-versa.
+Terminal : Black text-based environment that is used for performing jobs.
+Wharf: private folder in Bianca that is used to transfer data to and from your computer.
+Proj: project folder in Bianca that is shared among all project members.
+Job: A request for transcribing/translating one or many recordings.
+Slurm: "job" handler.

+
+

Checklist for new project

+
    +
  • SUPR account
  • +
  • Submit project proposal
  • +
  • UPPMAX username and password
  • +
  • UPPMAX two factor authentication.
  • +
+
+
+

Checklist for existing project

+
    +
  • SUPR account
  • +
  • Submit project proposal
  • +
  • UPPMAX username and password
  • +
  • UPPMAX two factor authentication.
  • +
+
+

|

+

|

+

|

+

|

+

|

+

Accessing your project

+

Following steps are derived from UPPMAX User Accounts:

+
    +
  1. +

    Register an account on SUPR.

    +
  2. +
  3. +

    Apply for a project for sensitive data at Bianca.

    +
  4. +
  5. +

    Give adequate information while creating your proposal by following this template.

    +
  6. +
  7. +

    Register an account for UPPMAX at SUPR by clicking "Request Account at UPPMAX" button. You will receive an UPPMAX username and password via email.

    +
  8. +
  9. +

    Setup two factor authentication for this newly created UPPMAX account.

    +
  10. +
  11. +

    Check access to your project on Bianca.

    +
  12. +
+

Whisper App

+

Step 1: Data transfer from local computer to Bianca

+
    +
  1. Transfer your data from your local computer to Wharf using WinSCP client (for Windows only) or FileZilla client (Mac, Windows or Linux). Instruction on how to do it is in their respective links.
  2. +
+

Step 2: Transcribing/Translating

+
    +
  1. +

    Login to Bianca. It requires your UPPMAX username (visible in SUPR), project name and two factor authentication code. Make sure you are inside SUNET for the link to work.

    +
  2. +
  3. +

    Click on the Terminal icon on the bottom of the Desktop and enter the following command in it to load Whisper GUI.

    +
    module load Whisper-gui
    +
    +

    Terminal on Bianca Desktop

    +
  4. +
  5. +

    You shall now see proj and wharf folders on your Desktop along with a Whisper application icon. wharf contains the data that was transferred in Step 1.
    +(Next time you start transcribing/translating by logging in again to Bianca, you can start from this step and skip the previous one, since wharf and proj folder are already created.)

    +

    Desktop view on Bianca after running module load Whisper-gui

    +
  6. +
  7. +

    Open wharf and proj folder. Select all the data that you transferred in wharf, drag and drop it into the proj folder.
    +NOTE: if you drag and drop, it will cut-paste your data instead of copy-paste. Do not keep files in wharf for a long period, as this folder is connected to the outside world and hence is a security risk. proj, on the other hand, is safe to keep data in as it is cut-off from the internet, so move your data there.

    +

    whisper gui

    +
  8. +
  9. +

    Click on Whisper application on Desktop. It would look like this:
    +whisper gui

    +
  10. +
  11. +

    Select appropriate options, or use the following for the best results:

    +

    Total audio length in hours: [give a rough average if transcribing files in bulk, rounding up to nearest hour]
    +Model: large-v2
    +Language used in recordings (leave blank for autodetection): If your language of choice is unavailable, check the "Languages available" list for its availability and contact support.
    +Initial Prompt: [leave blank]

    +
  12. +
+

Step 3: Monitoring jobs

+
    +
  1. +

    Your job will first wait in a queue and then start executing. To first check if your job is waiting in the queue, type squeue --me -o "%.30j" on terminal. If you see your job name Whisper_xxx it means it is in the queue, where xxx is the date and time of job submission, example: Whisper_2024-10-25_11-10-30.

    +
  2. +
  3. +

    To check if your job has started executing, locate a file named [Whisper_xxx_yyy].out that will get created in Whisper_logs folder inside proj folder, where xxx is date and time of job submission and yyy is your username followed by a "job id", example: Whisper_2024-10-25_11-10-30_jayan_234.out. This contains a progress bar for each recording that you sent for transcribing/translating.

    +
  4. +
  5. +

    If neither job name Whisper_xxx was found in queue, nor a [Whisper_xxx_yyy].out was created in Whisper_logs, contact support.

    +
  6. +
+

Step 4: Data transfer from project to local computer

+
    +
  1. +

    Drag and drop your transcriptions/translations from proj folder to wharf.

    +
  2. +
  3. +

    Use WinSCP/FileZilla like you did in Step 1 and transfer your data from wharf to your local computer.

    +
  4. +
+

Output files

+

By default you receive 5 types of output files for each file you transcribe/translate:
+With timestamps: .srt, .vtt, .tsv
+Without timestamps: .txt
+With detailed model metadata: .json.
+The most popular ones are .srt and .txt formats.

+

On Mac, .srt and .vtt can be opened in Word by:
+Tap with two fingers. Select Encoding as "Unicode (UTF-8)". Change the name of the file like some_name.docx and change type of file to .docx. Open the file and then Save As a new file.
+Mac setting for UTF-8 export

+
+Advance settings +

Use below features only if transcriptions/translations are not satisfactory and for less spoken languages or languages that are not having good resources online for understanding :

+
    +
  1. +

    When asked for Initial Prompt, provide a list of comma separated words or sentences (less than 80 words) that describe what the recording is about or the words used by the speaker in the recording. It should be in written in same language as the language in spoken in the recordings.

    +
  2. +
  3. +

    Try switching to Model: large-v3.

    +
  4. +
  5. +

    Use combination of both 1 and 2.

    +
  6. +
  7. +

    If you are sure about the language used in the recording, use the dropdown menu and select the appropriate language.

    +
  8. +
+
+
+Languages available +

Following languages are available for transcribing. If your language of choice does not appear in Whisper application but is listed here, contact support:

+

en: "english", +zh: "chinese", +de: "german", +es: "spanish", +ru: "russian", +ko: "korean", +fr: "french", +ja: "japanese", +pt: "portuguese", +tr: "turkish", +pl: "polish", +ca: "catalan", +nl: "dutch", +ar: "arabic", +sv: "swedish", +it: "italian", +id: "indonesian", +hi: "hindi", +fi: "finnish", +vi: "vietnamese", +he: "hebrew", +uk: "ukrainian", +el: "greek", +ms: "malay", +cs: "czech", +ro: "romanian", +da: "danish", +hu: "hungarian", +ta: "tamil", +no: "norwegian", +th: "thai", +ur: "urdu", +hr: "croatian", +bg: "bulgarian", +lt: "lithuanian", +la: "latin", +mi: "maori", +ml: "malayalam", +cy: "welsh", +sk: "slovak", +te: "telugu", +fa: "persian", +lv: "latvian", +bn: "bengali", +sr: "serbian", +az: "azerbaijani", +sl: "slovenian", +kn: "kannada", +et: "estonian", +mk: "macedonian", +br: "breton", +eu: "basque", +is: "icelandic", +hy: "armenian", +ne: "nepali", +mn: "mongolian", +bs: "bosnian", +kk: "kazakh", +sq: "albanian", +sw: "swahili", +gl: "galician", +mr: "marathi", +pa: "punjabi", +si: "sinhala", +km: "khmer", +sn: "shona", +yo: "yoruba", +so: "somali", +af: "afrikaans", +oc: "occitan", +ka: "georgian", +be: "belarusian", +tg: "tajik", +sd: "sindhi", +gu: "gujarati", +am: "amharic", +yi: "yiddish", +lo: "lao", +uz: "uzbek", +fo: "faroese", +ht: "haitian creole", +ps: "pashto", +tk: "turkmen", +nn: "nynorsk", +mt: "maltese", +sa: "sanskrit", +lb: "luxembourgish", +my: "myanmar", +bo: "tibetan", +tl: "tagalog", +mg: "malagasy", +as: "assamese", +tt: "tatar", +haw: "hawaiian", +ln: "lingala", +ha: "hausa", +ba: "bashkir", +jw: "javanese", +su: "sundanese", +yue: "cantonese"

+
+

Proposal template

+

Under the Basic Information section on NAISS SUPR, provide the following compulsory details pertaining to your project in the following fashion:

+
    +
  • +

    Project Title : Whisper service for [Name of the project]

    +
  • +
  • +

    Abstract: [What is the project about, give links, funding info, duration etc.]

    +
  • +
  • +

    Resource Usage: [Explain where transcriptions/translations are needed like interview recordings on device/ zoom or other forms of audio/video recordings from offline/online sources. Give the average and maximum number of recordings to be transcribed/translated. Give the average and maximum size of recordings in mins/hours. Mention if it is a transcribing or translation requirement. Mention the language spoken in the recordings, if known, and a rough estimate of number of recordings for each of these languages. Ignore the "core-hours" and "hours required to analyse one sample" requirement.]

    +
  • +
  • +

    Abridged Data Management Plan: [Address all points. Mention the recording file types example: .mp3, .mp4, .wav etc.]

    +
  • +
  • +

    Primary Classification: [Either follow the Standard för svensk indelning av forskningsämnen link given or search by entering the field of research such as 'Social Work', 'Human Geography' etc. ]

    +
  • +
  • +

    Requested Duration: [Mention the duration for which Whisper service is strictly required. Mentioning more duration than actually required might reflect negatively when a new allocation is requested for the same or new project next time. It is possible to request for a shorter duration of 1 month at first and then ask for a new one once the need arises again in the future.]

    +
  • +
+
+Module Loading +

To load the Whisper module, run the following command:

+
[jayan@sens2024544-bianca jayan]$ module load Whisper
+
+

This will also load the necessary dependencies, including python +and ffmpeg.

+
[jayan@sens2024544-bianca jayan]$ module list
+Currently Loaded Modules:
+1) uppmax   2) python/3.11.4   3) FFmpeg/5.1.2   4) Whisper/20240930
+
+

Command-line

+

The whisper command can be used to transcribe audio files. For example:

+
[jayan@sens2024544-bianca jayan]$ whisper audio.flac audio.mp3 audio.wav --model medium
+
+

For more ways to run whisper, for example on cpu node or do translations, check the correct flags by doing : whisper --help +You can also check the source code with arguments here on the official GitHub repository.

+

Python

+
example.py
import whisper
+
+# Load the model
+model = whisper.load_model("base")
+
+# Transcribe an audio file
+result = model.transcribe("/path/to/audiofile.mp3")
+
+# Output the transcription
+print(result["text"])
+
+

Available Models

+

For making offline usage of Whisper more convenient, we provide +pre-trained models as part of the Whisper module. You can list +all the available models by:

+
[jayan@sens2024544-bianca jayan]$ ll /sw/apps/Whisper/0.5.1/rackham/models
+total 13457440
+-rw-rw-r-- 1 sw  145261783 Nov 10 14:22 base.en.pt
+-rw-rw-r-- 1 sw  145262807 Nov 10 14:23 base.pt
+-rw-rw-r-- 1 sw 3086999982 Nov 10 14:39 large-v1.pt
+-rw-rw-r-- 1 sw 3086999982 Nov 10 14:40 large-v2.pt
+-rw-rw-r-- 1 sw 3087371615 Nov 10 14:27 large-v3.pt
+-rw-rw-r-- 1 sw 1528006491 Nov 10 14:24 medium.en.pt
+-rw-rw-r-- 1 sw 1528008539 Nov 10 14:25 medium.pt
+-rw-rw-r-- 1 sw  483615683 Nov 10 14:23 small.en.pt
+-rw-rw-r-- 1 sw  483617219 Nov 10 14:23 small.pt
+-rw-rw-r-- 1 sw   75571315 Nov 10 14:22 tiny.en.pt
+-rw-rw-r-- 1 sw   75572083 Nov 10 14:22 tiny.pt
+
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/winscp/index.html b/software/winscp/index.html new file mode 100644 index 000000000..660a813bd --- /dev/null +++ b/software/winscp/index.html @@ -0,0 +1,3110 @@ + + + + + + + + + + + + + + + + + + + WinSCP - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/wrf.html b/software/wrf.html new file mode 100644 index 000000000..ca57d5194 --- /dev/null +++ b/software/wrf.html @@ -0,0 +1,298 @@ + +

Introduction

+ +

The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

+ +

Model home page

+ +

ARW branch page

+ +

WRF Preprocessing System (WPS). The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

+ +

WRF is installed as modules for version 4.1.3 and compiled with INTEL and parallelized for distributed memory (dmpar) or hybrid shared and distributed memory (sm+dm). These are available as:

+ +
    +
  • WRF/4.1.3-dmpar     default as WRF/4.1.3
  • +
  • WRF/4.1.3-dm+sm    
  • +
+ +

WPS is installed as version 4.1 and available as:

+ +
    +
  • WPS/4.1
  • +
+ +

There are WPS_GEOG data available.
+Set the path in namelist.wps to: 
+
+'geog_data_path = '/sw/data/WPS-geog/4/rackham/WPS_GEOG''

+ +

Corine and metria data are included in the WPS_GEOG directory.
+In /sw/data/WPS-geog/4/rackham you'll find GEOGRID.TBL.ARW.corine_metria that hopefully works. Copy to your WPS/GEOGRID directory and then link to GEOGRID.TBL file.
+It may not work for a large domain. If so, either modify TBL file or use in inner domains only.

+ +

+ +

To analyse the WRF output on the cluster you can use Vapor, NCL (module called as NCL-graphics) or wrf-python (module called as wrf-python). For details on how, please confer the VaporNCL or wrf-python webpages.

+ +

Get started

+ +

This section assumes that you are already familiar in running WRF. If not, please check the tutorial, where you can at least omit the first 5 buttons and go directly to the last button, or depending on your needs, also check the “Static geography data” and “Real-time data”.

+ +

When running WRF/WPS you would like your own settings for the model to run and not to interfere with other users. Therefore, you need to set up a local or project directory (e.g. 'WRF') and work from there like for a local installation. You also need some of the content from the central installation. Follow these steps:

+ +
    +
  1. Create a directory where you plan to have your input and result files.
  2. +
  3. Standing in this directory copy the all or some of the following directories from the central installation. +
      +
    1. Run directory                           for real runs +
      +cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/run .
      + You can remove *.exe files in this run directory because the module files shall be used.
    2. +
    3. WPS directory                          if input data has to be prepared +
      +cp -r /sw/EasyBuild/rackham/software/WPS/4.1-intel-2019b-dmpar/WPS-4.1 .
      + You can remove *.exe files in the new directory because the module files shall be used.
    4. +
    5. Test directory                          for ideal runs +
      +cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/test .
      + You can remove *.exe files because the module files shall be used.
    6. +
    +
  4. +
  5. When WRF or WPS modules are loaded you can run with “ungrib.exe” or for instance “wrf.exe”, i.e. without the “./”.
  6. +
  7. Normally you can run ungrib.exe, geogrid.exe and real.exe and, if not too long period, metgrid.exe, in the command line or in interactive mode.
  8. +
  9. wrf.exe has to be run on the compute nodes. Make a batch script, see template below:
  10. +
+ +

+ +

+ +

+ +

+ +

+ +
+
+
+#!/bin/bash
+#SBATCH -J 
+#SBATCH --mail-user 
+#SBATCH --mail-type=ALL 
+#SBATCH -t 0-01:00:0 
+#set wall time c. 50% higher than expected 
+#SBATCH -A 
+# 
+#SBATCH -n 40 -p node 
+#this gives 40 cores on 2 nodes 
+module load WRF/4.1.3-dmpar 
+# With PMI jobs on very many nodes starts more efficiently. 
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so 
+export I_MPI_PMI2=yes 
+srun -n 40 --mpi=pmi2 wrf.exe
+ +

+
+ +

+ +

Running smpar+dmpar

+ +

Wrf compiled for Hybrid Shared + Distributed memory (OpenMP+MPI) can be more efficient than dmpar only. With good settings it runs approximately 30% faster and similarly less resources.

+ +

To load this module type:

+ +
+
+
+module load WRF/4.1.3-dm+sm
+ +

+
+ +

The submit script can look like this:

+ +
+
+
+#!/bin/bash
+#SBATCH -J <jobname>
+#SBATCH --mail-user <email address>
+#SBATCH --mail-type=ALL
+#SBATCH -t 0-01:00:0    #set wall time c. 50% higher than expected
+#SBATCH -A <project name>
+#
+#SBATCH -N 2  ## case with 2 nodes = 40 cores on Rackham
+#SBATCH -n 8  ## make sure that n x c = (cores per node) x N
+#SBATCH -c 5
+#SBATCH --exclusive
+# We want to run OpenMP on one unit (the cores that share a memory channel, 10 on Rackham) or a part of it.
+# So, for Rackham, choose -c to be either 10, 5 or 2.
+# c = 5 seems to be the most efficient!
+# Set flags below!
+nt=1
+if [ -n "$SLURM_CPUS_PER_TASK" ]; then
+  nt=$SLURM_CPUS_PER_TASK
+fi
+ml purge > /dev/null 2>&1 # Clean the environment
+ml WRF/4.1.3-dm+sm
+export OMP_NUM_THREADS=$nt
+export I_MPI_PIN_DOMAIN=omp
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so
+export I_MPI_PMI2=yes
+srun -n 8 --mpi=pmi2 wrf.exe
+
+ +

Local installation with module dependencies

+ +

If you would like to change in the FORTRAN code for physics or just want the latest version you can install locally but with the dependencies from the modules

+ +

Step 1: WRF Source Code Registration and Download

+ +
    +
  1. Register and download
  2. +
  3. Identify download urls you need (on Github for v4 and higher) +
      +
    1. WRF
    2. +
    3. WPS
    4. +
    5. Other?
    6. +
    +
  4. +
  5. In folder of your choice at UPPMAX: +
      +
    1. 'wget <download url>'
    2. +
    +
  6. +
  7. 'tar zxvf <file>' .
  8. +
+ +

Step 2: Configure and compile

+ +
+ +
Create and set the environment in a SOURCEME file, see example below for a intel-dmpar build. Loading module WRF sets most of the environment but some variables have different names in configure file. Examples below assumes dmpar, but can be interchanged to dm+sm for hybrid build.
+ +
+
+
+
+#!/bin/bash
+
+module load WRF/4.1.3-dmpar
+
+module list
+
+export WRF_EM_CORE=1
+
+export WRFIO_NCD_LARGE_FILE_SUPPORT=1
+
+export NETCDFPATH=$NETCDF
+
+export HDF5PATH=$HDF5_DIR
+
+export HDF5=$HDF5_DIR           
+ +

+
+
+ +
Then
+ +
+
+source SOURCEME
+
+./configure
+
+ +
+ +
Choose intel and dmpar (15) or other, depending on WRF version and parallelization.
+ +
When finished it may complain about not finding netcdf.inc file. This is solved below as you have to modify the configure.wrf file.
+ +
+ +
+
•Intelmpi settings (for dmpar)
+ +
+
+DM_FC           =        mpiifort
+
+DM_CC           =        mpiicc -DMPI2_SUPPORT
+
+#DM_FC           =       mpif90 -f90=$(SFC)
+
+#DM_CC           =       mpicc -cc=$(SCC)
+
+ +
+ +
+
•Netcdf-fortran paths
+ +
+
+LIB_EXTERNAL    = add  flags "-$(NETCDFFPATH)/lib -lnetcdff -lnetcdf"  (let line end with "\")
+INCLUDE_MODULES =    add flag "-I$(NETCDFFPATH)/include" (let line end with "\")
+
+Add the line below close to  NETCDFPATH: + +
+
+NETCDFFPATH     =    $(NETCDFF)
+
+ +

+
+
+Then: + +
+
+./compile em_real
+
+ +

+When you have made modification of the code and once configure.wrf is created, just + +
+
+source SOURCEME 
+
+and run: + +
+
+./compile em_real 
+
+ +

+ +
+

Running

+ +
Batch script should include:
+ +
+
+
+module load WRF/4.1.3-dmpar
+
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so
+
+export I_MPI_PMI2=yes
+
+srun -n 40 --mpi=pmi2 ./wrf.exe     #Note ”./”, otherwise ”module version of wrf.exe” is used
+
+
+ +
+
+
+
+
+
diff --git a/software/wrf/index.html b/software/wrf/index.html new file mode 100644 index 000000000..7eb77949d --- /dev/null +++ b/software/wrf/index.html @@ -0,0 +1,3497 @@ + + + + + + + + + + + + + + + + + + + + + + + WRF - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

WRF user guide

+

Introduction

+
    +
  • +

    The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

    +
  • +
  • +

    Model home page

    +
  • +
  • +

    ARW branch page

    +
  • +
  • +

    WRF Preprocessing System (WPS). The Weather Research and Forecasting (WRF) Model is a next-generation mesoscale numerical weather prediction system designed to serve both operational forecasting and atmospheric research needs.

    +
  • +
  • +

    WRF is installed as modules for version 4.1.3 and compiled with INTEL and parallelized for distributed memory (dmpar) or hybrid shared and distributed memory (sm+dm). These are available as:

    +
      +
    • WRF/4.1.3-dmpar default as WRF/4.1.3
    • +
    • WRF/4.1.3-dm+sm
    • +
    • +

      WPS is installed as version 4.1 and available as:

      +
    • +
    • +

      WPS/4.1

      +
    • +
    +
  • +
  • +

    There are WPS_GEOG data available.

    +
  • +
  • Set the path in namelist.wps to:
  • +
+
geog_data_path = '/sw/data/WPS-geog/4/rackham/WPS_GEOG'
+
+
    +
  • Corine and metria data are included in the WPS_GEOG directory.
  • +
  • In /sw/data/WPS-geog/4/rackham you'll find GEOGRID.TBL.ARW.corine_metria that hopefully works. Copy to your WPS/GEOGRID directory and then link to GEOGRID.TBL file.
  • +
  • +

    It may not work for a large domain. If so, either modify TBL file or use in inner domains only.

    +
  • +
  • +

    To analyse the WRF output on the cluster you can use Vapor, NCL (module called as NCL-graphics) or wrf-python (module called as wrf-python). For details on how, please confer the web pages below:

    +
      +
    • wrf-python,
    • +
    • Vapor or
    • +
    • NCL
        +
      • is not updated anymore and the developers recommend GeoCAT which serves as an umbrella over wrf-python, among others.
      • +
      +
    • +
    +
  • +
+

Get started

+
    +
  • +

    This section assumes that you are already familiar in running WRF. If not, please check the tutorial, where you can at least omit the first 5 buttons and go directly to the last button, or depending on your needs, also check the “Static geography data” and “Real-time data”.

    +
  • +
  • +

    When running WRF/WPS you would like your own settings for the model to run and not to interfere with other users. Therefore, you need to set up a local or project directory (e.g. 'WRF') and work from there like for a local installation. You also need some of the content from the central installation. Follow these steps:

    +
  • +
  • +

    Create a directory where you plan to have your input and result files.

    +
  • +
  • +

    Standing in this directory copy the all or some of the following directories from the central installation.

    +
      +
    1. +

      Run directory for real runs

      +
        +
      • cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/run .
      • +
      • You can remove *.exe files in this run directory because the module files shall be used.
      • +
      +
    2. +
    3. +

      WPS directory if input data has to be prepared

      +
        +
      • cp -r /sw/EasyBuild/rackham/software/WPS/4.1-intel-2019b-dmpar/WPS-4.1 .
      • +
      • You can remove *.exe files in the new directory because the module files shall be used.
      • +
      +
    4. +
    5. +

      Test directory for ideal runs

      +
        +
      • cp -r /sw/EasyBuild/rackham/software/WRF/4.1.3-intel-2019b-dmpar/WRF-4.1.3/test .
      • +
      • You can remove *.exe files because the module files shall be used.
      • +
      +
    6. +
    +
  • +
  • +

    When WRF or WPS modules are loaded you can run with “ungrib.exe” or for instance “wrf.exe”, i.e. without the “./”.

    +
  • +
  • Normally you can run ungrib.exe, geogrid.exe and real.exe and, if not too long period, metgrid.exe, in the command line or in interactive mode.
  • +
  • wrf.exe has to be run on the compute nodes. Make a batch script, see template below:
  • +
+
#!/bin/bash
+#SBATCH -J
+#SBATCH --mail-user
+#SBATCH --mail-type=ALL
+#SBATCH -t 0-01:00:0
+#set wall time c. 50% higher than expected
+#SBATCH -A
+#
+#SBATCH -n 40 -p node
+#this gives 40 cores on 2 nodes
+module load WRF/4.1.3-dmpar
+# With PMI jobs on very many nodes starts more efficiently.
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so
+export I_MPI_PMI2=yes
+srun -n 40 --mpi=pmi2 wrf.exe
+
+

Running smpar+dmpar

+

WRF compiled for Hybrid Shared + Distributed memory (OpenMP+MPI) can be more efficient than dmpar only. With good settings it runs approximately 30% faster and similarly less resources.

+

To load this module type:

+
module load WRF/4.1.3-dm+sm
+
+

The submit script can look like this:

+
#!/bin/bash -l
+#SBATCH -J <jobname>
+#SBATCH --mail-user <email address>
+#SBATCH --mail-type=ALL
+#SBATCH -t 0-01:00:0    #set wall time c. 50% higher than expected
+#SBATCH -A <project name>
+#
+#SBATCH -N 2  ## case with 2 nodes = 40 cores on Rackham
+#SBATCH -n 8  ## make sure that n x c = (cores per node) x N
+#SBATCH -c 5
+#SBATCH --exclusive
+# We want to run OpenMP on one unit (the cores that share a memory channel, 10 on Rackham) or a part of it.
+# So, for Rackham, choose -c to be either 10, 5 or 2.
+# c = 5 seems to be the most efficient!
+# Set flags below!
+nt=1
+if [ -n "$SLURM_CPUS_PER_TASK" ]; then
+  nt=$SLURM_CPUS_PER_TASK
+fi
+ml purge > /dev/null 2>&1 # Clean the environment
+ml WRF/4.1.3-dm+sm
+export OMP_NUM_THREADS=$nt
+export I_MPI_PIN_DOMAIN=omp
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so
+export I_MPI_PMI2=yes
+srun -n 8 --mpi=pmi2 wrf.exe
+
+

Local installation with module dependencies

+

If you would like to change in the FORTRAN code for physics or just want the latest version you can install locally but with the dependencies from the modules

+

Step 1: WRF Source Code Registration and Download

+
    +
  1. Register and download
  2. +
  3. +

    Identify download URLs you need (on Github for v4 and higher)

    +
      +
    1. WRF
    2. +
    3. WPS
    4. +
    5. Other?
    6. +
    +
  4. +
  5. +

    In folder of your choice at UPPMAX:

    +
      +
    1. wget <download url>
    2. +
    +
  6. +
  7. tar zxvf <file>
  8. +
+

Step 2: Configure and compile

+
    +
  • Create and set the environment in a SOURCEME file, see example below for a intel-dmpar build.
  • +
  • Loading module WRF sets most of the environment but some variables have different names in configure file.
  • +
  • Examples below assumes dmpar, but can be interchanged to dm+sm for hybrid build.
  • +
+
#!/bin/bash
+
+module load WRF/4.1.3-dmpar
+
+module list
+
+export WRF_EM_CORE=1
+
+export WRFIO_NCD_LARGE_FILE_SUPPORT=1
+
+export NETCDFPATH=$NETCDF
+
+export HDF5PATH=$HDF5_DIR
+
+export HDF5=$HDF5_DIR
+
+
    +
  • Then
  • +
+
source SOURCEME
+./configure
+
+
    +
  • Choose intel and dmpar (15) or other, depending on WRF version and parallelization.
  • +
  • +

    When finished it may complain about not finding netcdf.inc file. This is solved below as you have to modify the configure.wrf file.

    +
  • +
  • +

    Intelmpi settings (for dmpar)

    +
  • +
+
DM_FC           =        mpiifort
+
+DM_CC           =        mpiicc -DMPI2_SUPPORT
+
+#DM_FC           =       mpif90 -f90=$(SFC)
+
+#DM_CC           =       mpicc -cc=$(SCC)
+
+
    +
  • NetCDF-fortran paths
  • +
+
LIB_EXTERNAL    = add  flags "-$(NETCDFFPATH)/lib -lnetcdff -lnetcdf"  (let line end with "\")
+INCLUDE_MODULES =    add flag "-I$(NETCDFFPATH)/include" (let line end with "\")
+Add the line below close to  NETCDFPATH:
+NETCDFFPATH     =    $(NETCDFF)
+
+

Then:

+
./compile em_real
+
+

When you have made modification of the code and once configure.wrf is created, just

+
source SOURCEME
+
+

and run:

+
./compile em_real
+
+

Running

+

Batch script should include:

+
module load WRF/4.1.3-dmpar
+
+export I_MPI_PMI_LIBRARY=/usr/lib64/libpmi2.so
+
+export I_MPI_PMI2=yes
+
+srun -n 40 --mpi=pmi2 ./wrf.exe     #Note ”./”, otherwise ”module version of wrf.exe” is used
+
+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/software/xeyes/index.html b/software/xeyes/index.html new file mode 100644 index 000000000..d272fd7fb --- /dev/null +++ b/software/xeyes/index.html @@ -0,0 +1,3142 @@ + + + + + + + + + + + + + + + + + + + xeyes - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

xeyes

+

xeyes in action

+

xeyes is a program that shows two eyes. The x in its name refers +to the X11 display server, which is one of many ways to display +graphics on screen.

+

xeyes is used mostly diagnostically, i.e. to find +out if one used SSH with X-forwarding. +When xeyes is run, but does not show the eyes, it means +that SSH with X-forwarding does not work.

+

How to run xeyes

+

In a terminal, type:

+
xeyes
+
+

If you've logged in via SSH with X-forwarding +and it works correctly, you will see this:

+

xeyes in action

+

If you've logged in without SSH with X-forwarding +or the SSH client is not setup correctly, you will see:

+

xeyes not working

+

The line that indicates the error is:

+
Error: Can't open display:
+
+ + + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/compress_fastQ/index.html b/storage/compress_fastQ/index.html new file mode 100644 index 000000000..69a69f226 --- /dev/null +++ b/storage/compress_fastQ/index.html @@ -0,0 +1,3096 @@ + + + + + + + + + + + + + + + + + + + compress fastQ - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

compress fastQ

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/compress_format/index.html b/storage/compress_format/index.html new file mode 100644 index 000000000..451c71d79 --- /dev/null +++ b/storage/compress_format/index.html @@ -0,0 +1,3096 @@ + + + + + + + + + + + + + + + + + + + Compress format - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compress format

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/compress_guide/index.html b/storage/compress_guide/index.html new file mode 100644 index 000000000..580f1f153 --- /dev/null +++ b/storage/compress_guide/index.html @@ -0,0 +1,3096 @@ + + + + + + + + + + + + + + + + + + + Compress guide - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Compress guide

+ + + + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/storage/disk_storage_guide/index.html b/storage/disk_storage_guide/index.html new file mode 100644 index 000000000..f70c5fe28 --- /dev/null +++ b/storage/disk_storage_guide/index.html @@ -0,0 +1,3101 @@ + + + + + + + + + + + + + + + + + + + Disk storage guide - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

Disk storage guide

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 000000000..9545f7ad3 --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,69 @@ +/* Color Settings */ +/* https://github.com/squidfunk/mkdocs-material/blob/6b5035f5580f97532d664e3d1babf5f320e88ee9/src/assets/stylesheets/main/_colors.scss */ +/* https://squidfunk.github.io/mkdocs-material/setup/changing-the-colors/#custom-colors */ +:root > * { + --md-primary-fg-color: #E6E6E6; + --md-primary-bg-color: #1d1d20; + + /* --md-default-fg-color: #1d1d20; */ + /* --md-default-bg-color: #ffffff; */ + + /* --md-code-fg-color: #1d1d20; */ + + --md-accent-fg-color: #999; + + /* --md-admonition-fg-color: #1d1d20; */ + + /* --md-typeset-color: #1d1d20; */ + --md-typeset-a-color: #0645AD; + + --md-footer-bg-color: #E6E6E6; + --md-footer-fg-color: #000000; + +} + +/* Maximum space for text block */ +/* .md-grid { + max-width: 70%; /* or 100%, if you want to stretch to full-width */ +} */ + +/* https://github.com/squidfunk/mkdocs-material/issues/4832#issuecomment-1374891676 */ +.md-nav__link[for] { + color: var(--md-default-fg-color) !important; +} + +/* Center Markdown Tables (requires md_in_html extension) */ +.center-table { + text-align: center; +} + +/* Reset alignment for table cells */ +.md-typeset .center-table :is(td, th):not([align]) { + text-align: initial; +} + + .md-typeset a { + text-decoration: underline; +} + + +/* Markdown Header */ +/* https://github.com/squidfunk/mkdocs-material/blob/dcab57dd1cced4b77875c1aa1b53467c62709d31/src/assets/stylesheets/main/_typeset.scss */ +.md-typeset h1 { + font-weight: 400; + color: var(--md-default-fg-color); +} + +.md-typeset h2 { + font-weight: 400; +} + +.md-typeset h3 { + font-weight: 500; +} + +/* Image align center */ +.center { + display: block; + margin: 0 auto; +} diff --git a/support/index.html b/support/index.html new file mode 100644 index 000000000..e045a8b12 --- /dev/null +++ b/support/index.html @@ -0,0 +1,3126 @@ + + + + + + + + + + + + + + + + + + + + + + + Support - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + + + + + + + + + + + + + + +

UPPMAX support

+

If you lost your UPPMAX password, see how to reset your UPPMAX password.

+

If you need other help on using UPPMAX, +you preferably contact us through the Support Form. +If that does not work, use support@uppmax.uu.se.

+

If you want to contribute, see how to contribute.

+

If you need general Uppsala University IT support, +use the Uppsala University IT Servicedesk.

+

If you plan a course using UPPMAX resources, see +What should I think about when planning a course using UPPMAX resources

+ + + + + + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/uppmax/gitlab/index.html b/uppmax/gitlab/index.html new file mode 100644 index 000000000..f73213be3 --- /dev/null +++ b/uppmax/gitlab/index.html @@ -0,0 +1,3106 @@ + + + + + + + + + + + + + + + + + + + UPPMAX GitLab - UPPMAX Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+ + + + + + + +
+ +
+ + + + +
+
+ + + +
+
+
+ + + + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+ +
+ + + + + +
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+
+ + + + + + + + + + \ No newline at end of file