diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..c743517 --- /dev/null +++ b/404.html @@ -0,0 +1,353 @@ + + + + + + + + + + + Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+
+

404

+

Page not found

+
+
+ + +
+
+ + + + + + + + + + + + diff --git a/css/base.css b/css/base.css new file mode 100644 index 0000000..2610341 --- /dev/null +++ b/css/base.css @@ -0,0 +1,325 @@ +html { + /* csslint ignore:start */ + /* The nav header is 3.5rem high, plus 20px for the margin-top of the + main container. */ + scroll-padding-top: calc(3.5rem + 20px); + /* csslint ignore:end */ +} + +/* Replacement for `body { background-attachment: fixed; }`, which has + performance issues when scrolling on large displays. See #1394. */ +body::before { + content: ' '; + position: fixed; + width: 100%; + height: 100%; + top: 0; + left: 0; + background-color: #f8f8f8; + background: url(../img/grid.png) repeat-x; + will-change: transform; + z-index: -1; +} + +body > .container { + margin-top: 20px; + min-height: 400px; +} + +.navbar.fixed-top { /* csslint allow: adjoining-classes */ + /* csslint ignore:start */ + position: -webkit-sticky; + position: sticky; + /* csslint ignore:end */ +} + +.source-links { + float: right; +} + +.col-md-9 img { + max-width: 100%; + display: inline-block; + padding: 4px; + line-height: 1.428571429; + background-color: #fff; + border: 1px solid #ddd; + border-radius: 4px; + margin: 20px auto 30px auto; +} + +h1 { + color: #444; + font-weight: 400; + font-size: 42px; +} + +h2, h3, h4, h5, h6 { + color: #444; + font-weight: 300; +} + +hr { + border-top: 1px solid #aaa; +} + +pre, .rst-content tt { + max-width: 100%; + background: #fff; + border: solid 1px #e1e4e5; + color: #333; + overflow-x: auto; +} + +code.code-large, .rst-content tt.code-large { + font-size: 90%; +} + +code { + padding: 2px 5px; + background: #fff; + border: solid 1px #e1e4e5; + color: #333; + white-space: pre-wrap; + word-wrap: break-word; +} + +pre code { + display: block; + background: transparent; + border: none; + white-space: pre; + word-wrap: normal; + font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; + font-size: 12px; +} + +kbd { + padding: 2px 4px; + font-size: 90%; + color: #fff; + background-color: #333; + border-radius: 3px; + -webkit-box-shadow: inset 0 -1px 0 rgba(0,0,0,.25); + box-shadow: inset 0 -1px 0 rgba(0,0,0,.25); +} + +a code { + color: #2FA4E7; +} + +a:hover code, a:focus code { + color: #157AB5; +} + +footer { + margin-top: 30px; + margin-bottom: 10px; + text-align: center; + font-weight: 200; +} + +.modal-dialog { + margin-top: 60px; +} + +/* + * Side navigation + * + * Scrollspy and affixed enhanced navigation to highlight sections and secondary + * sections of docs content. + */ + +.bs-sidebar.affix { /* csslint allow: adjoining-classes */ + /* csslint ignore:start */ + position: -webkit-sticky; + position: sticky; + /* csslint ignore:end */ + /* The nav header is 3.5rem high, plus 20px for the margin-top of the + main container. */ + top: calc(3.5rem + 20px); +} + +.bs-sidebar.card { /* csslint allow: adjoining-classes */ + padding: 0; + max-height: 90%; + overflow-y: auto; +} + +/* Toggle (vertically flip) sidebar collapse icon */ +.bs-sidebar .navbar-toggler span { + -moz-transform: scale(1, -1); + -webkit-transform: scale(1, -1); + -o-transform: scale(1, -1); + -ms-transform: scale(1, -1); + transform: scale(1, -1); +} + +.bs-sidebar .navbar-toggler.collapsed span { /* csslint allow: adjoining-classes */ + -moz-transform: scale(1, 1); + -webkit-transform: scale(1, 1); + -o-transform: scale(1, 1); + -ms-transform: scale(1, 1); + transform: scale(1, 1); +} + +/* First level of nav */ +.bs-sidebar > .navbar-collapse > .nav { + padding-top: 10px; + padding-bottom: 10px; + border-radius: 5px; + width: 100%; +} + +/* All levels of nav */ +.bs-sidebar .nav > li > a { + display: block; + padding: 5px 20px; + z-index: 1; +} +.bs-sidebar .nav > li > a:hover, +.bs-sidebar .nav > li > a:focus { + text-decoration: none; + border-right: 1px solid; +} +.bs-sidebar .nav > li > a.active, +.bs-sidebar .nav > li > a.active:hover, +.bs-sidebar .nav > li > a.active:focus { + font-weight: bold; + background-color: transparent; + border-right: 1px solid; +} + +.bs-sidebar .nav .nav .nav { + margin-left: 1em; +} + +.bs-sidebar .nav > li > a { + font-weight: bold; +} + +.bs-sidebar .nav .nav > li > a { + font-weight: normal; +} + +.headerlink { + font-family: FontAwesome; + font-size: 14px; + display: none; + padding-left: .5em; +} + +h1:hover .headerlink, h2:hover .headerlink, h3:hover .headerlink, h4:hover .headerlink, h5:hover .headerlink, h6:hover .headerlink { + display:inline-block; +} + +blockquote { + padding-left: 10px; + border-left: 4px solid #e6e6e6; +} + +.admonition, details { + padding: 15px; + margin-bottom: 20px; + border: 1px solid transparent; + border-radius: 4px; + text-align: left; +} + +.admonition.note, details.note { /* csslint allow: adjoining-classes */ + color: #2e6b89; + background-color: #e2f0f7; + border-color: #bce8f1; +} + +.admonition.warning, details.warning { /* csslint allow: adjoining-classes */ + color: #7a6032; + background-color: #fffae5; + border-color: #fbeed5; +} + +.admonition.danger, details.danger { /* csslint allow: adjoining-classes */ + color: #7f3130; + background-color: #fde3e3; + border-color: #eed3d7; +} + +.admonition-title, summary { + font-weight: bold; + text-align: left; +} + +.admonition>p:last-child, details>p:last-child { + margin-bottom: 0; +} + +@media (max-width: 991.98px) { + .navbar-collapse.show { /* csslint allow: adjoining-classes */ + overflow-y: auto; + max-height: calc(100vh - 3.5rem); + } +} + +.dropdown-item.open { /* csslint allow: adjoining-classes */ + color: #fff; + background-color: #2FA4E7; +} + +.dropdown-submenu > .dropdown-menu { + margin: 0 0 0 1.5rem; + padding: 0; + border-width: 0; +} + +.dropdown-submenu > a::after { + display: block; + content: " "; + float: right; + width: 0; + height: 0; + border-color: transparent; + border-style: solid; + border-width: 5px 0 5px 5px; + border-left-color: #ccc; + margin-top: 5px; + margin-right: -10px; +} + +.dropdown-submenu:hover > a::after { + border-left-color: #fff; +} + +@media (min-width: 992px) { + .dropdown-menu { + overflow-y: auto; + max-height: calc(100vh - 3.5rem); + } + + .dropdown-submenu { + position: relative; + } + + .dropdown-submenu > .dropdown-menu { + /* csslint ignore:start */ + position: fixed !important; + /* csslint ignore:end */ + margin-top: -9px; + margin-left: -2px; + border-width: 1px; + padding: 0.5rem 0; + } + + .dropdown-submenu.pull-left { /* csslint allow: adjoining-classes */ + float: none; + } + + .dropdown-submenu.pull-left > .dropdown-menu { /* csslint allow: adjoining-classes */ + left: -100%; + margin-left: 10px; + } +} + +@media print { + /* Remove sidebar when print */ + .col-md-3 { display: none; } +} diff --git a/css/bootstrap.min.css b/css/bootstrap.min.css new file mode 100644 index 0000000..4ce503d --- /dev/null +++ b/css/bootstrap.min.css @@ -0,0 +1,12 @@ +/*! + * Bootswatch v4.1.3 + * Homepage: https://bootswatch.com + * Copyright 2012-2018 Thomas Park + * Licensed under MIT + * Based on Bootstrap +*//*! + * Bootstrap v4.1.3 (https://getbootstrap.com/) + * Copyright 2011-2018 The Bootstrap Authors + * Copyright 2011-2018 Twitter, Inc. + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */:root{--blue:#033C73;--indigo:#6610f2;--purple:#6f42c1;--pink:#e83e8c;--red:#C71C22;--orange:#fd7e14;--yellow:#DD5600;--green:#73A839;--teal:#20c997;--cyan:#2FA4E7;--white:#fff;--gray:#868e96;--gray-dark:#343a40;--primary:#2FA4E7;--secondary:#e9ecef;--success:#73A839;--info:#033C73;--warning:#DD5600;--danger:#C71C22;--light:#f8f9fa;--dark:#343a40;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";--font-family-monospace:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}*,*::before,*::after{-webkit-box-sizing:border-box;box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}@-ms-viewport{width:device-width}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0 !important}hr{-webkit-box-sizing:content-box;box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:0.5rem}p{margin-top:0;margin-bottom:1rem}abbr[title],abbr[data-original-title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}dfn{font-style:italic}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#2FA4E7;text-decoration:none;background-color:transparent;-webkit-text-decoration-skip:objects}a:hover{color:#157ab5;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):hover,a:not([href]):not([tabindex]):focus{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}pre,code,kbd,samp{font-family:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:0.75rem;padding-bottom:0.75rem;color:#868e96;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:0.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}button,html [type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{padding:0;border-style:none}input[type="radio"],input[type="checkbox"]{-webkit-box-sizing:border-box;box-sizing:border-box;padding:0}input[type="date"],input[type="time"],input[type="datetime-local"],input[type="month"]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{outline-offset:-2px;-webkit-appearance:none}[type="search"]::-webkit-search-cancel-button,[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none !important}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{margin-bottom:0.5rem;font-family:inherit;font-weight:500;line-height:1.2;color:#2FA4E7}h1,.h1{font-size:2.5rem}h2,.h2{font-size:2rem}h3,.h3{font-size:1.75rem}h4,.h4{font-size:1.5rem}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,0.1)}small,.small{font-size:80%;font-weight:400}mark,.mark{padding:0.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:0.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#868e96}.blockquote-footer::before{content:"\2014 \00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:0.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:0.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:0.5rem;line-height:1}.figure-caption{font-size:90%;color:#868e96}code{font-size:87.5%;color:#e83e8c;word-break:break-word}a>code{color:inherit}kbd{padding:0.2rem 0.4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:0.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width: 576px){.container{max-width:540px}}@media (min-width: 768px){.container{max-width:720px}}@media (min-width: 992px){.container{max-width:960px}}@media (min-width: 1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*="col-"]{padding-right:0;padding-left:0}.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col,.col-auto,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm,.col-sm-auto,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md,.col-md-auto,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg,.col-lg-auto,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl,.col-xl-auto{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}.col{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media (min-width: 576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-sm-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-sm-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-sm-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-sm-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-sm-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-sm-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-sm-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-sm-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-sm-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-sm-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-sm-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-sm-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-sm-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-sm-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-sm-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-sm-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media (min-width: 768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-md-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-md-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-md-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-md-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-md-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-md-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-md-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-md-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-md-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-md-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-md-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-md-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-md-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-md-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-md-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-md-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media (min-width: 992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-lg-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-lg-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-lg-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-lg-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-lg-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-lg-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-lg-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-lg-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-lg-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-lg-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-lg-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-lg-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-lg-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-lg-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-lg-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-lg-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media (min-width: 1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-xl-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-xl-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-xl-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-xl-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-xl-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-xl-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-xl-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-xl-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-xl-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-xl-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-xl-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-xl-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-xl-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-xl-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-xl-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-xl-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;margin-bottom:1rem;background-color:transparent}.table th,.table td{padding:0.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table .table{background-color:#fff}.table-sm th,.table-sm td{padding:0.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered th,.table-bordered td{border:1px solid #dee2e6}.table-bordered thead th,.table-bordered thead td{border-bottom-width:2px}.table-borderless th,.table-borderless td,.table-borderless thead th,.table-borderless tbody+tbody{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,0.05)}.table-hover tbody tr:hover{background-color:rgba(0,0,0,0.075)}.table-primary,.table-primary>th,.table-primary>td{background-color:#c5e6f8}.table-hover .table-primary:hover{background-color:#aedcf5}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#aedcf5}.table-secondary,.table-secondary>th,.table-secondary>td{background-color:#f9fafb}.table-hover .table-secondary:hover{background-color:#eaedf1}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#eaedf1}.table-success,.table-success>th,.table-success>td{background-color:#d8e7c8}.table-hover .table-success:hover{background-color:#cbdfb6}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#cbdfb6}.table-info,.table-info>th,.table-info>td{background-color:#b8c8d8}.table-hover .table-info:hover{background-color:#a8bbcf}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#a8bbcf}.table-warning,.table-warning>th,.table-warning>td{background-color:#f5d0b8}.table-hover .table-warning:hover{background-color:#f2c1a2}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#f2c1a2}.table-danger,.table-danger>th,.table-danger>td{background-color:#efbfc1}.table-hover .table-danger:hover{background-color:#eaabad}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#eaabad}.table-light,.table-light>th,.table-light>td{background-color:#fdfdfe}.table-hover .table-light:hover{background-color:#ececf6}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#ececf6}.table-dark,.table-dark>th,.table-dark>td{background-color:#c6c8ca}.table-hover .table-dark:hover{background-color:#b9bbbe}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#b9bbbe}.table-active,.table-active>th,.table-active>td{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,0.075)}.table .thead-dark th{color:#fff;background-color:#212529;border-color:#32383e}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#212529}.table-dark th,.table-dark td,.table-dark thead th{border-color:#32383e}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:rgba(255,255,255,0.05)}.table-dark.table-hover tbody tr:hover{background-color:rgba(255,255,255,0.075)}@media (max-width: 575.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-sm>.table-bordered{border:0}}@media (max-width: 767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-md>.table-bordered{border:0}}@media (max-width: 991.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-lg>.table-bordered{border:0}}@media (max-width: 1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(2.25rem + 2px);padding:0.375rem 0.75rem;font-size:1rem;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:0.25rem;-webkit-transition:border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.form-control{-webkit-transition:none;transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#a1d6f4;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25)}.form-control::-webkit-input-placeholder{color:#868e96;opacity:1}.form-control:-ms-input-placeholder{color:#868e96;opacity:1}.form-control::-ms-input-placeholder{color:#868e96;opacity:1}.form-control::placeholder{color:#868e96;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(0.375rem + 1px);padding-bottom:calc(0.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(0.5rem + 1px);padding-bottom:calc(0.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(0.25rem + 1px);padding-bottom:calc(0.25rem + 1px);font-size:0.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding-top:0.375rem;padding-bottom:0.375rem;margin-bottom:0;line-height:1.5;color:#495057;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.8125rem + 2px);padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.form-control-lg{height:calc(2.875rem + 2px);padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}select.form-control[size],select.form-control[multiple]{height:auto}textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:0.25rem}.form-row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*="col-"]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:0.3rem;margin-left:-1.25rem}.form-check-input:disabled ~ .form-check-label{color:#868e96}.form-check-label{margin-bottom:0}.form-check-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding-left:0;margin-right:0.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:0.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:0.25rem;font-size:80%;color:#73A839}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:0.25rem 0.5rem;margin-top:.1rem;font-size:0.875rem;line-height:1.5;color:#fff;background-color:rgba(115,168,57,0.9);border-radius:0.25rem}.was-validated .form-control:valid,.form-control.is-valid,.was-validated .custom-select:valid,.custom-select.is-valid{border-color:#73A839}.was-validated .form-control:valid:focus,.form-control.is-valid:focus,.was-validated .custom-select:valid:focus,.custom-select.is-valid:focus{border-color:#73A839;-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.25);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.25)}.was-validated .form-control:valid ~ .valid-feedback,.was-validated .form-control:valid ~ .valid-tooltip,.form-control.is-valid ~ .valid-feedback,.form-control.is-valid ~ .valid-tooltip,.was-validated .custom-select:valid ~ .valid-feedback,.was-validated .custom-select:valid ~ .valid-tooltip,.custom-select.is-valid ~ .valid-feedback,.custom-select.is-valid ~ .valid-tooltip{display:block}.was-validated .form-control-file:valid ~ .valid-feedback,.was-validated .form-control-file:valid ~ .valid-tooltip,.form-control-file.is-valid ~ .valid-feedback,.form-control-file.is-valid ~ .valid-tooltip{display:block}.was-validated .form-check-input:valid ~ .form-check-label,.form-check-input.is-valid ~ .form-check-label{color:#73A839}.was-validated .form-check-input:valid ~ .valid-feedback,.was-validated .form-check-input:valid ~ .valid-tooltip,.form-check-input.is-valid ~ .valid-feedback,.form-check-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid ~ .custom-control-label,.custom-control-input.is-valid ~ .custom-control-label{color:#73A839}.was-validated .custom-control-input:valid ~ .custom-control-label::before,.custom-control-input.is-valid ~ .custom-control-label::before{background-color:#b2d789}.was-validated .custom-control-input:valid ~ .valid-feedback,.was-validated .custom-control-input:valid ~ .valid-tooltip,.custom-control-input.is-valid ~ .valid-feedback,.custom-control-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before,.custom-control-input.is-valid:checked ~ .custom-control-label::before{background-color:#8dc450}.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before,.custom-control-input.is-valid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(115,168,57,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(115,168,57,0.25)}.was-validated .custom-file-input:valid ~ .custom-file-label,.custom-file-input.is-valid ~ .custom-file-label{border-color:#73A839}.was-validated .custom-file-input:valid ~ .custom-file-label::after,.custom-file-input.is-valid ~ .custom-file-label::after{border-color:inherit}.was-validated .custom-file-input:valid ~ .valid-feedback,.was-validated .custom-file-input:valid ~ .valid-tooltip,.custom-file-input.is-valid ~ .valid-feedback,.custom-file-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-file-input:valid:focus ~ .custom-file-label,.custom-file-input.is-valid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.25);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.25)}.invalid-feedback{display:none;width:100%;margin-top:0.25rem;font-size:80%;color:#C71C22}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:0.25rem 0.5rem;margin-top:.1rem;font-size:0.875rem;line-height:1.5;color:#fff;background-color:rgba(199,28,34,0.9);border-radius:0.25rem}.was-validated .form-control:invalid,.form-control.is-invalid,.was-validated .custom-select:invalid,.custom-select.is-invalid{border-color:#C71C22}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus,.was-validated .custom-select:invalid:focus,.custom-select.is-invalid:focus{border-color:#C71C22;-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.25);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.25)}.was-validated .form-control:invalid ~ .invalid-feedback,.was-validated .form-control:invalid ~ .invalid-tooltip,.form-control.is-invalid ~ .invalid-feedback,.form-control.is-invalid ~ .invalid-tooltip,.was-validated .custom-select:invalid ~ .invalid-feedback,.was-validated .custom-select:invalid ~ .invalid-tooltip,.custom-select.is-invalid ~ .invalid-feedback,.custom-select.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-control-file:invalid ~ .invalid-feedback,.was-validated .form-control-file:invalid ~ .invalid-tooltip,.form-control-file.is-invalid ~ .invalid-feedback,.form-control-file.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-check-input:invalid ~ .form-check-label,.form-check-input.is-invalid ~ .form-check-label{color:#C71C22}.was-validated .form-check-input:invalid ~ .invalid-feedback,.was-validated .form-check-input:invalid ~ .invalid-tooltip,.form-check-input.is-invalid ~ .invalid-feedback,.form-check-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid ~ .custom-control-label,.custom-control-input.is-invalid ~ .custom-control-label{color:#C71C22}.was-validated .custom-control-input:invalid ~ .custom-control-label::before,.custom-control-input.is-invalid ~ .custom-control-label::before{background-color:#ec777b}.was-validated .custom-control-input:invalid ~ .invalid-feedback,.was-validated .custom-control-input:invalid ~ .invalid-tooltip,.custom-control-input.is-invalid ~ .invalid-feedback,.custom-control-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before,.custom-control-input.is-invalid:checked ~ .custom-control-label::before{background-color:#e2343a}.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before,.custom-control-input.is-invalid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(199,28,34,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(199,28,34,0.25)}.was-validated .custom-file-input:invalid ~ .custom-file-label,.custom-file-input.is-invalid ~ .custom-file-label{border-color:#C71C22}.was-validated .custom-file-input:invalid ~ .custom-file-label::after,.custom-file-input.is-invalid ~ .custom-file-label::after{border-color:inherit}.was-validated .custom-file-input:invalid ~ .invalid-feedback,.was-validated .custom-file-input:invalid ~ .invalid-tooltip,.custom-file-input.is-invalid ~ .invalid-feedback,.custom-file-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-file-input:invalid:focus ~ .custom-file-label,.custom-file-input.is-invalid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.25);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.25)}.form-inline{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.form-inline .form-check{width:100%}@media (min-width: 576px){.form-inline label{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .input-group,.form-inline .custom-select{width:auto}.form-inline .form-check{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;margin-top:0;margin-right:0.25rem;margin-left:0}.form-inline .custom-control{-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;padding:0.375rem 0.75rem;font-size:1rem;line-height:1.5;border-radius:0.25rem;-webkit-transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.btn{-webkit-transition:none;transition:none}}.btn:hover,.btn:focus{text-decoration:none}.btn:focus,.btn.focus{outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25)}.btn.disabled,.btn:disabled{opacity:0.65}.btn:not(:disabled):not(.disabled){cursor:pointer}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.btn-primary:hover{color:#fff;background-color:#1992d7;border-color:#178acc}.btn-primary:focus,.btn-primary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.btn-primary:not(:disabled):not(.disabled):active,.btn-primary:not(:disabled):not(.disabled).active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#178acc;border-color:#1682c0}.btn-primary:not(:disabled):not(.disabled):active:focus,.btn-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5)}.btn-secondary{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-secondary:hover{color:#212529;background-color:#d3d9df;border-color:#cbd3da}.btn-secondary:focus,.btn-secondary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-secondary:not(:disabled):not(.disabled):active,.btn-secondary:not(:disabled):not(.disabled).active,.show>.btn-secondary.dropdown-toggle{color:#212529;background-color:#cbd3da;border-color:#c4ccd4}.btn-secondary:not(:disabled):not(.disabled):active:focus,.btn-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-success{color:#fff;background-color:#73A839;border-color:#73A839}.btn-success:hover{color:#fff;background-color:#5f8b2f;border-color:#59822c}.btn-success:focus,.btn-success.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#73A839;border-color:#73A839}.btn-success:not(:disabled):not(.disabled):active,.btn-success:not(:disabled):not(.disabled).active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#59822c;border-color:#527829}.btn-success:not(:disabled):not(.disabled):active:focus,.btn-success:not(:disabled):not(.disabled).active:focus,.show>.btn-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5)}.btn-info{color:#fff;background-color:#033C73;border-color:#033C73}.btn-info:hover{color:#fff;background-color:#02294e;border-color:#022241}.btn-info:focus,.btn-info.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5);box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#033C73;border-color:#033C73}.btn-info:not(:disabled):not(.disabled):active,.btn-info:not(:disabled):not(.disabled).active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#022241;border-color:#011c35}.btn-info:not(:disabled):not(.disabled):active:focus,.btn-info:not(:disabled):not(.disabled).active:focus,.show>.btn-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5);box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5)}.btn-warning{color:#fff;background-color:#DD5600;border-color:#DD5600}.btn-warning:hover{color:#fff;background-color:#b74700;border-color:#aa4200}.btn-warning:focus,.btn-warning.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5);box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5)}.btn-warning.disabled,.btn-warning:disabled{color:#fff;background-color:#DD5600;border-color:#DD5600}.btn-warning:not(:disabled):not(.disabled):active,.btn-warning:not(:disabled):not(.disabled).active,.show>.btn-warning.dropdown-toggle{color:#fff;background-color:#aa4200;border-color:#9d3d00}.btn-warning:not(:disabled):not(.disabled):active:focus,.btn-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5);box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5)}.btn-danger{color:#fff;background-color:#C71C22;border-color:#C71C22}.btn-danger:hover{color:#fff;background-color:#a5171c;border-color:#9a161a}.btn-danger:focus,.btn-danger.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#C71C22;border-color:#C71C22}.btn-danger:not(:disabled):not(.disabled):active,.btn-danger:not(:disabled):not(.disabled).active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#9a161a;border-color:#8f1418}.btn-danger:not(:disabled):not(.disabled):active:focus,.btn-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5)}.btn-light{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:hover{color:#212529;background-color:#e2e6ea;border-color:#dae0e5}.btn-light:focus,.btn-light.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5);box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-light:not(:disabled):not(.disabled):active,.btn-light:not(:disabled):not(.disabled).active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#dae0e5;border-color:#d3d9df}.btn-light:not(:disabled):not(.disabled):active:focus,.btn-light:not(:disabled):not(.disabled).active:focus,.show>.btn-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5);box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5)}.btn-dark{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:hover{color:#fff;background-color:#23272b;border-color:#1d2124}.btn-dark:focus,.btn-dark.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5);box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#343a40;border-color:#343a40}.btn-dark:not(:disabled):not(.disabled):active,.btn-dark:not(:disabled):not(.disabled).active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#1d2124;border-color:#171a1d}.btn-dark:not(:disabled):not(.disabled):active:focus,.btn-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5);box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5)}.btn-outline-primary{color:#2FA4E7;background-color:transparent;background-image:none;border-color:#2FA4E7}.btn-outline-primary:hover{color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.btn-outline-primary:focus,.btn-outline-primary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#2FA4E7;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled):active,.btn-outline-primary:not(:disabled):not(.disabled).active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.5)}.btn-outline-secondary{color:#e9ecef;background-color:transparent;background-image:none;border-color:#e9ecef}.btn-outline-secondary:hover{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-outline-secondary:focus,.btn-outline-secondary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#e9ecef;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled):active,.btn-outline-secondary:not(:disabled):not(.disabled).active,.show>.btn-outline-secondary.dropdown-toggle{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-outline-success{color:#73A839;background-color:transparent;background-image:none;border-color:#73A839}.btn-outline-success:hover{color:#fff;background-color:#73A839;border-color:#73A839}.btn-outline-success:focus,.btn-outline-success.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#73A839;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled):active,.btn-outline-success:not(:disabled):not(.disabled).active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#73A839;border-color:#73A839}.btn-outline-success:not(:disabled):not(.disabled):active:focus,.btn-outline-success:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5);box-shadow:0 0 0 0.2rem rgba(115,168,57,0.5)}.btn-outline-info{color:#033C73;background-color:transparent;background-image:none;border-color:#033C73}.btn-outline-info:hover{color:#fff;background-color:#033C73;border-color:#033C73}.btn-outline-info:focus,.btn-outline-info.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5);box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#033C73;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled):active,.btn-outline-info:not(:disabled):not(.disabled).active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#033C73;border-color:#033C73}.btn-outline-info:not(:disabled):not(.disabled):active:focus,.btn-outline-info:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5);box-shadow:0 0 0 0.2rem rgba(3,60,115,0.5)}.btn-outline-warning{color:#DD5600;background-color:transparent;background-image:none;border-color:#DD5600}.btn-outline-warning:hover{color:#fff;background-color:#DD5600;border-color:#DD5600}.btn-outline-warning:focus,.btn-outline-warning.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5);box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#DD5600;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled):active,.btn-outline-warning:not(:disabled):not(.disabled).active,.show>.btn-outline-warning.dropdown-toggle{color:#fff;background-color:#DD5600;border-color:#DD5600}.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5);box-shadow:0 0 0 0.2rem rgba(221,86,0,0.5)}.btn-outline-danger{color:#C71C22;background-color:transparent;background-image:none;border-color:#C71C22}.btn-outline-danger:hover{color:#fff;background-color:#C71C22;border-color:#C71C22}.btn-outline-danger:focus,.btn-outline-danger.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#C71C22;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled):active,.btn-outline-danger:not(:disabled):not(.disabled).active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#C71C22;border-color:#C71C22}.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5);box-shadow:0 0 0 0.2rem rgba(199,28,34,0.5)}.btn-outline-light{color:#f8f9fa;background-color:transparent;background-image:none;border-color:#f8f9fa}.btn-outline-light:hover{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:focus,.btn-outline-light.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5);box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#f8f9fa;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled):active,.btn-outline-light:not(:disabled):not(.disabled).active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#f8f9fa;border-color:#f8f9fa}.btn-outline-light:not(:disabled):not(.disabled):active:focus,.btn-outline-light:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5);box-shadow:0 0 0 0.2rem rgba(248,249,250,0.5)}.btn-outline-dark{color:#343a40;background-color:transparent;background-image:none;border-color:#343a40}.btn-outline-dark:hover{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:focus,.btn-outline-dark.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5);box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#343a40;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled):active,.btn-outline-dark:not(:disabled):not(.disabled).active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#343a40;border-color:#343a40}.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5);box-shadow:0 0 0 0.2rem rgba(52,58,64,0.5)}.btn-link{font-weight:400;color:#2FA4E7;background-color:transparent}.btn-link:hover{color:#157ab5;text-decoration:underline;background-color:transparent;border-color:transparent}.btn-link:focus,.btn-link.focus{text-decoration:underline;border-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link:disabled,.btn-link.disabled{color:#868e96;pointer-events:none}.btn-lg,.btn-group-lg>.btn{padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}.btn-sm,.btn-group-sm>.btn{padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:0.5rem}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{-webkit-transition:opacity 0.15s linear;transition:opacity 0.15s linear}@media screen and (prefers-reduced-motion: reduce){.fade{-webkit-transition:none;transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height 0.35s ease;transition:height 0.35s ease}@media screen and (prefers-reduced-motion: reduce){.collapsing{-webkit-transition:none;transition:none}}.dropup,.dropright,.dropdown,.dropleft{position:relative}.dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid;border-right:0.3em solid transparent;border-bottom:0;border-left:0.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:0.5rem 0;margin:0.125rem 0 0;font-size:1rem;color:#495057;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:0.25rem}.dropdown-menu-right{right:0;left:auto}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:0.125rem}.dropup .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0;border-right:0.3em solid transparent;border-bottom:0.3em solid;border-left:0.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:0.125rem}.dropright .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid transparent;border-right:0;border-bottom:0.3em solid transparent;border-left:0.3em solid}.dropright .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-toggle::after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:0.125rem}.dropleft .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:""}.dropleft .dropdown-toggle::after{display:none}.dropleft .dropdown-toggle::before{display:inline-block;width:0;height:0;margin-right:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid transparent;border-right:0.3em solid;border-bottom:0.3em solid transparent}.dropleft .dropdown-toggle:empty::after{margin-left:0}.dropleft .dropdown-toggle::before{vertical-align:0}.dropdown-menu[x-placement^="top"],.dropdown-menu[x-placement^="right"],.dropdown-menu[x-placement^="bottom"],.dropdown-menu[x-placement^="left"]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:0.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:0.25rem 1.5rem;clear:both;font-weight:400;color:#495057;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:hover,.dropdown-item:focus{color:#fff;text-decoration:none;background-color:#2FA4E7}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#2FA4E7}.dropdown-item.disabled,.dropdown-item:disabled{color:#868e96;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:0.5rem 1.5rem;margin-bottom:0;font-size:0.875rem;color:#868e96;white-space:nowrap}.dropdown-item-text{display:block;padding:0.25rem 1.5rem;color:#495057}.btn-group,.btn-group-vertical{position:relative;display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover{z-index:1}.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group,.btn-group-vertical .btn+.btn,.btn-group-vertical .btn+.btn-group,.btn-group-vertical .btn-group+.btn,.btn-group-vertical .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:0.5625rem;padding-left:0.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropright .dropdown-toggle-split::after{margin-left:0}.dropleft .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:0.375rem;padding-left:0.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:0.75rem;padding-left:0.75rem}.btn-group-vertical{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.btn-group-vertical .btn,.btn-group-vertical .btn-group{width:100%}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type="radio"],.btn-group-toggle>.btn input[type="checkbox"],.btn-group-toggle>.btn-group>.btn input[type="radio"],.btn-group-toggle>.btn-group>.btn input[type="checkbox"]{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.input-group{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.custom-select,.input-group>.custom-file{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;width:1%;margin-bottom:0}.input-group>.form-control+.form-control,.input-group>.form-control+.custom-select,.input-group>.form-control+.custom-file,.input-group>.custom-select+.form-control,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.custom-file,.input-group>.custom-file+.form-control,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.custom-file{margin-left:-1px}.input-group>.form-control:focus,.input-group>.custom-select:focus,.input-group>.custom-file .custom-file-input:focus ~ .custom-file-label{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.form-control:not(:last-child),.input-group>.custom-select:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.form-control:not(:first-child),.input-group>.custom-select:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label::after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-prepend,.input-group-append{display:-webkit-box;display:-ms-flexbox;display:flex}.input-group-prepend .btn,.input-group-append .btn{position:relative;z-index:2}.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.input-group-text,.input-group-append .input-group-text+.btn{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding:0.375rem 0.75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:0.25rem}.input-group-text input[type="radio"],.input-group-text input[type="checkbox"]{margin-top:0}.input-group-lg>.form-control,.input-group-lg>.input-group-prepend>.input-group-text,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-append>.btn{height:calc(2.875rem + 2px);padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}.input-group-sm>.form-control,.input-group-sm>.input-group-prepend>.input-group-text,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-append>.btn{height:calc(1.8125rem + 2px);padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text,.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked ~ .custom-control-label::before{color:#fff;background-color:#2FA4E7}.custom-control-input:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25)}.custom-control-input:active ~ .custom-control-label::before{color:#fff;background-color:#cfeaf9}.custom-control-input:disabled ~ .custom-control-label{color:#868e96}.custom-control-input:disabled ~ .custom-control-label::before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0}.custom-control-label::before{position:absolute;top:0.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;pointer-events:none;content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#dee2e6}.custom-control-label::after{position:absolute;top:0.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:"";background-repeat:no-repeat;background-position:center center;background-size:50% 50%}.custom-checkbox .custom-control-label::before{border-radius:0.25rem}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before{background-color:#2FA4E7}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before{background-color:#2FA4E7}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(47,164,231,0.5)}.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before{background-color:rgba(47,164,231,0.5)}.custom-radio .custom-control-label::before{border-radius:50%}.custom-radio .custom-control-input:checked ~ .custom-control-label::before{background-color:#2FA4E7}.custom-radio .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(47,164,231,0.5)}.custom-select{display:inline-block;width:100%;height:calc(2.25rem + 2px);padding:0.375rem 1.75rem 0.375rem 0.75rem;line-height:1.5;color:#495057;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center;background-size:8px 10px;border:1px solid #ced4da;border-radius:0.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#a1d6f4;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(161,214,244,0.5);box-shadow:0 0 0 0.2rem rgba(161,214,244,0.5)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:0.75rem;background-image:none}.custom-select:disabled{color:#868e96;background-color:#e9ecef}.custom-select::-ms-expand{opacity:0}.custom-select-sm{height:calc(1.8125rem + 2px);padding-top:0.375rem;padding-bottom:0.375rem;font-size:75%}.custom-select-lg{height:calc(2.875rem + 2px);padding-top:0.375rem;padding-bottom:0.375rem;font-size:125%}.custom-file{position:relative;display:inline-block;width:100%;height:calc(2.25rem + 2px);margin-bottom:0}.custom-file-input{position:relative;z-index:2;width:100%;height:calc(2.25rem + 2px);margin:0;opacity:0}.custom-file-input:focus ~ .custom-file-label{border-color:#a1d6f4;-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25)}.custom-file-input:focus ~ .custom-file-label::after{border-color:#a1d6f4}.custom-file-input:disabled ~ .custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en) ~ .custom-file-label::after{content:"Browse"}.custom-file-label{position:absolute;top:0;right:0;left:0;z-index:1;height:calc(2.25rem + 2px);padding:0.375rem 0.75rem;line-height:1.5;color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:0.25rem}.custom-file-label::after{position:absolute;top:0;right:0;bottom:0;z-index:3;display:block;height:2.25rem;padding:0.375rem 0.75rem;line-height:1.5;color:#495057;content:"Browse";background-color:#e9ecef;border-left:1px solid #ced4da;border-radius:0 0.25rem 0.25rem 0}.custom-range{width:100%;padding-left:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(47,164,231,0.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-0.25rem;background-color:#2FA4E7;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;-webkit-appearance:none;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#cfeaf9}.custom-range::-webkit-slider-runnable-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#2FA4E7;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;-moz-appearance:none;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-moz-range-thumb{-webkit-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#cfeaf9}.custom-range::-moz-range-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:0.2rem;margin-left:0.2rem;background-color:#2FA4E7;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-ms-thumb{-webkit-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#cfeaf9}.custom-range::-ms-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:0.5rem}.custom-range::-ms-fill-lower{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px;background-color:#dee2e6;border-radius:1rem}.custom-control-label::before,.custom-file-label,.custom-select{-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.custom-control-label::before,.custom-file-label,.custom-select{-webkit-transition:none;transition:none}}.nav{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:0.5rem 1rem}.nav-link:hover,.nav-link:focus{text-decoration:none}.nav-link.disabled{color:#868e96}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#868e96;background-color:transparent;border-color:transparent}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:0.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#2FA4E7}.nav-fill .nav-item{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;text-align:center}.nav-justified .nav-item{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:0.5rem 1rem}.navbar>.container,.navbar>.container-fluid{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:0.3125rem;padding-bottom:0.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:0.5rem;padding-bottom:0.5rem}.navbar-collapse{-ms-flex-preferred-size:100%;flex-basis:100%;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.navbar-toggler{padding:0.25rem 0.75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:0.25rem}.navbar-toggler:hover,.navbar-toggler:focus{text-decoration:none}.navbar-toggler:not(:disabled):not(.disabled){cursor:pointer}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;background-size:100% 100%}@media (max-width: 575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 576px){.navbar-expand-sm{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-sm .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width: 767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 768px){.navbar-expand-md{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-md .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width: 991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 992px){.navbar-expand-lg{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-lg .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width: 1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 1200px){.navbar-expand-xl{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-xl .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,0.9)}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:rgba(0,0,0,0.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,0.5)}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:rgba(0,0,0,0.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,0.3)}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .nav-link.active{color:rgba(0,0,0,0.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,0.5);border-color:rgba(0,0,0,0.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,0.5)}.navbar-light .navbar-text a{color:rgba(0,0,0,0.9)}.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:rgba(0,0,0,0.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,0.8)}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:#fff}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,0.25)}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,0.8);border-color:rgba(255,255,255,0.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.8)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:rgba(255,255,255,0.8)}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.card{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,0.125);border-radius:0.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.card-body{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:0.75rem}.card-subtitle{margin-top:-0.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:0.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,0.03);border-bottom:1px solid rgba(0,0,0,0.125)}.card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:0.75rem 1.25rem;background-color:rgba(0,0,0,0.03);border-top:1px solid rgba(0,0,0,0.125)}.card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.card-header-tabs{margin-right:-0.625rem;margin-bottom:-0.75rem;margin-left:-0.625rem;border-bottom:0}.card-header-pills{margin-right:-0.625rem;margin-left:-0.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img{width:100%;border-radius:calc(0.25rem - 1px)}.card-img-top{width:100%;border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.card-img-bottom{width:100%;border-bottom-right-radius:calc(0.25rem - 1px);border-bottom-left-radius:calc(0.25rem - 1px)}.card-deck{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-deck .card{margin-bottom:15px}@media (min-width: 576px){.card-deck{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-group>.card{margin-bottom:15px}@media (min-width: 576px){.card-group{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap}.card-group>.card{-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:first-child .card-img-top,.card-group>.card:first-child .card-header{border-top-right-radius:0}.card-group>.card:first-child .card-img-bottom,.card-group>.card:first-child .card-footer{border-bottom-right-radius:0}.card-group>.card:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:last-child .card-img-top,.card-group>.card:last-child .card-header{border-top-left-radius:0}.card-group>.card:last-child .card-img-bottom,.card-group>.card:last-child .card-footer{border-bottom-left-radius:0}.card-group>.card:only-child{border-radius:0.25rem}.card-group>.card:only-child .card-img-top,.card-group>.card:only-child .card-header{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.card-group>.card:only-child .card-img-bottom,.card-group>.card:only-child .card-footer{border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.card-group>.card:not(:first-child):not(:last-child):not(:only-child){border-radius:0}.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-top,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-header,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-footer{border-radius:0}}.card-columns .card{margin-bottom:0.75rem}@media (min-width: 576px){.card-columns{-webkit-column-count:3;column-count:3;-webkit-column-gap:1.25rem;column-gap:1.25rem;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion .card:not(:first-of-type):not(:last-of-type){border-bottom:0;border-radius:0}.accordion .card:not(:first-of-type) .card-header:first-child{border-radius:0}.accordion .card:first-of-type{border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion .card:last-of-type{border-top-left-radius:0;border-top-right-radius:0}.breadcrumb{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding:0.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:0.25rem}.breadcrumb-item+.breadcrumb-item{padding-left:0.5rem}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:0.5rem;color:#868e96;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#868e96}.pagination{display:-webkit-box;display:-ms-flexbox;display:flex;padding-left:0;list-style:none;border-radius:0.25rem}.page-link{position:relative;display:block;padding:0.5rem 0.75rem;margin-left:-1px;line-height:1.25;color:#2FA4E7;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#157ab5;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:2;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25);box-shadow:0 0 0 0.2rem rgba(47,164,231,0.25)}.page-link:not(:disabled):not(.disabled){cursor:pointer}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:0.25rem;border-bottom-left-radius:0.25rem}.page-item:last-child .page-link{border-top-right-radius:0.25rem;border-bottom-right-radius:0.25rem}.page-item.active .page-link{z-index:1;color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.page-item.disabled .page-link{color:#868e96;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:0.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:0.3rem;border-bottom-left-radius:0.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:0.3rem;border-bottom-right-radius:0.3rem}.pagination-sm .page-link{padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:0.2rem;border-bottom-left-radius:0.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:0.2rem;border-bottom-right-radius:0.2rem}.badge{display:inline-block;padding:0.25em 0.4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:0.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:0.6em;padding-left:0.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#2FA4E7}.badge-primary[href]:hover,.badge-primary[href]:focus{color:#fff;text-decoration:none;background-color:#178acc}.badge-secondary{color:#212529;background-color:#e9ecef}.badge-secondary[href]:hover,.badge-secondary[href]:focus{color:#212529;text-decoration:none;background-color:#cbd3da}.badge-success{color:#fff;background-color:#73A839}.badge-success[href]:hover,.badge-success[href]:focus{color:#fff;text-decoration:none;background-color:#59822c}.badge-info{color:#fff;background-color:#033C73}.badge-info[href]:hover,.badge-info[href]:focus{color:#fff;text-decoration:none;background-color:#022241}.badge-warning{color:#fff;background-color:#DD5600}.badge-warning[href]:hover,.badge-warning[href]:focus{color:#fff;text-decoration:none;background-color:#aa4200}.badge-danger{color:#fff;background-color:#C71C22}.badge-danger[href]:hover,.badge-danger[href]:focus{color:#fff;text-decoration:none;background-color:#9a161a}.badge-light{color:#212529;background-color:#f8f9fa}.badge-light[href]:hover,.badge-light[href]:focus{color:#212529;text-decoration:none;background-color:#dae0e5}.badge-dark{color:#fff;background-color:#343a40}.badge-dark[href]:hover,.badge-dark[href]:focus{color:#fff;text-decoration:none;background-color:#1d2124}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:0.3rem}@media (min-width: 576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:0.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:0.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:0.75rem 1.25rem;color:inherit}.alert-primary{color:#185578;background-color:#d5edfa;border-color:#c5e6f8}.alert-primary hr{border-top-color:#aedcf5}.alert-primary .alert-link{color:#10374e}.alert-secondary{color:#797b7c;background-color:#fbfbfc;border-color:#f9fafb}.alert-secondary hr{border-top-color:#eaedf1}.alert-secondary .alert-link{color:#606162}.alert-success{color:#3c571e;background-color:#e3eed7;border-color:#d8e7c8}.alert-success hr{border-top-color:#cbdfb6}.alert-success .alert-link{color:#223111}.alert-info{color:#021f3c;background-color:#cdd8e3;border-color:#b8c8d8}.alert-info hr{border-top-color:#a8bbcf}.alert-info .alert-link{color:#00060b}.alert-warning{color:#732d00;background-color:#f8ddcc;border-color:#f5d0b8}.alert-warning hr{border-top-color:#f2c1a2}.alert-warning .alert-link{color:#401900}.alert-danger{color:#670f12;background-color:#f4d2d3;border-color:#efbfc1}.alert-danger hr{border-top-color:#eaabad}.alert-danger .alert-link{color:#3a090a}.alert-light{color:#818182;background-color:#fefefe;border-color:#fdfdfe}.alert-light hr{border-top-color:#ececf6}.alert-light .alert-link{color:#686868}.alert-dark{color:#1b1e21;background-color:#d6d8d9;border-color:#c6c8ca}.alert-dark hr{border-top-color:#b9bbbe}.alert-dark .alert-link{color:#040505}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:-webkit-box;display:-ms-flexbox;display:flex;height:1rem;overflow:hidden;font-size:0.75rem;background-color:#e9ecef;border-radius:0.25rem}.progress-bar{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#2FA4E7;-webkit-transition:width 0.6s ease;transition:width 0.6s ease}@media screen and (prefers-reduced-motion: reduce){.progress-bar{-webkit-transition:none;transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}.media{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start}.media-body{-webkit-box-flex:1;-ms-flex:1;flex:1}.list-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#495057;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:0.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,0.125)}.list-group-item:first-child{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.list-group-item:hover,.list-group-item:focus{z-index:1;text-decoration:none}.list-group-item.disabled,.list-group-item:disabled{color:#868e96;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#2FA4E7;border-color:#2FA4E7}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom:0}.list-group-item-primary{color:#185578;background-color:#c5e6f8}.list-group-item-primary.list-group-item-action:hover,.list-group-item-primary.list-group-item-action:focus{color:#185578;background-color:#aedcf5}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#185578;border-color:#185578}.list-group-item-secondary{color:#797b7c;background-color:#f9fafb}.list-group-item-secondary.list-group-item-action:hover,.list-group-item-secondary.list-group-item-action:focus{color:#797b7c;background-color:#eaedf1}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#797b7c;border-color:#797b7c}.list-group-item-success{color:#3c571e;background-color:#d8e7c8}.list-group-item-success.list-group-item-action:hover,.list-group-item-success.list-group-item-action:focus{color:#3c571e;background-color:#cbdfb6}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#3c571e;border-color:#3c571e}.list-group-item-info{color:#021f3c;background-color:#b8c8d8}.list-group-item-info.list-group-item-action:hover,.list-group-item-info.list-group-item-action:focus{color:#021f3c;background-color:#a8bbcf}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#021f3c;border-color:#021f3c}.list-group-item-warning{color:#732d00;background-color:#f5d0b8}.list-group-item-warning.list-group-item-action:hover,.list-group-item-warning.list-group-item-action:focus{color:#732d00;background-color:#f2c1a2}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#732d00;border-color:#732d00}.list-group-item-danger{color:#670f12;background-color:#efbfc1}.list-group-item-danger.list-group-item-action:hover,.list-group-item-danger.list-group-item-action:focus{color:#670f12;background-color:#eaabad}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#670f12;border-color:#670f12}.list-group-item-light{color:#818182;background-color:#fdfdfe}.list-group-item-light.list-group-item-action:hover,.list-group-item-light.list-group-item-action:focus{color:#818182;background-color:#ececf6}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#818182;border-color:#818182}.list-group-item-dark{color:#1b1e21;background-color:#c6c8ca}.list-group-item-dark.list-group-item-action:hover,.list-group-item-dark.list-group-item-action:focus{color:#1b1e21;background-color:#b9bbbe}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#1b1e21;border-color:#1b1e21}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:not(:disabled):not(.disabled){cursor:pointer}.close:not(:disabled):not(.disabled):hover,.close:not(:disabled):not(.disabled):focus{color:#000;text-decoration:none;opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:0.5rem;pointer-events:none}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform 0.3s ease-out;transition:-webkit-transform 0.3s ease-out;transition:transform 0.3s ease-out;transition:transform 0.3s ease-out, -webkit-transform 0.3s ease-out;-webkit-transform:translate(0, -25%);transform:translate(0, -25%)}@media screen and (prefers-reduced-motion: reduce){.modal.fade .modal-dialog{-webkit-transition:none;transition:none}}.modal.show .modal-dialog{-webkit-transform:translate(0, 0);transform:translate(0, 0)}.modal-dialog-centered{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;min-height:calc(100% - (0.5rem * 2))}.modal-dialog-centered::before{display:block;height:calc(100vh - (0.5rem * 2));content:""}.modal-content{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:0.3rem;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:0.5}.modal-header{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:1rem;border-bottom:1px solid #e9ecef;border-top-left-radius:0.3rem;border-top-right-radius:0.3rem}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem}.modal-footer{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end;padding:1rem;border-top:1px solid #e9ecef}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width: 576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-centered{min-height:calc(100% - (1.75rem * 2))}.modal-dialog-centered::before{height:calc(100vh - (1.75rem * 2))}.modal-sm{max-width:300px}}@media (min-width: 992px){.modal-lg{max-width:800px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:0.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:0.9}.tooltip .arrow{position:absolute;display:block;width:0.8rem;height:0.4rem}.tooltip .arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-top,.bs-tooltip-auto[x-placement^="top"]{padding:0.4rem 0}.bs-tooltip-top .arrow,.bs-tooltip-auto[x-placement^="top"] .arrow{bottom:0}.bs-tooltip-top .arrow::before,.bs-tooltip-auto[x-placement^="top"] .arrow::before{top:0;border-width:0.4rem 0.4rem 0;border-top-color:#000}.bs-tooltip-right,.bs-tooltip-auto[x-placement^="right"]{padding:0 0.4rem}.bs-tooltip-right .arrow,.bs-tooltip-auto[x-placement^="right"] .arrow{left:0;width:0.4rem;height:0.8rem}.bs-tooltip-right .arrow::before,.bs-tooltip-auto[x-placement^="right"] .arrow::before{right:0;border-width:0.4rem 0.4rem 0.4rem 0;border-right-color:#000}.bs-tooltip-bottom,.bs-tooltip-auto[x-placement^="bottom"]{padding:0.4rem 0}.bs-tooltip-bottom .arrow,.bs-tooltip-auto[x-placement^="bottom"] .arrow{top:0}.bs-tooltip-bottom .arrow::before,.bs-tooltip-auto[x-placement^="bottom"] .arrow::before{bottom:0;border-width:0 0.4rem 0.4rem;border-bottom-color:#000}.bs-tooltip-left,.bs-tooltip-auto[x-placement^="left"]{padding:0 0.4rem}.bs-tooltip-left .arrow,.bs-tooltip-auto[x-placement^="left"] .arrow{right:0;width:0.4rem;height:0.8rem}.bs-tooltip-left .arrow::before,.bs-tooltip-auto[x-placement^="left"] .arrow::before{left:0;border-width:0.4rem 0 0.4rem 0.4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:0.25rem 0.5rem;color:#fff;text-align:center;background-color:#000;border-radius:0.25rem}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;font-family:-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:0.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:0.3rem}.popover .arrow{position:absolute;display:block;width:1rem;height:0.5rem;margin:0 0.3rem}.popover .arrow::before,.popover .arrow::after{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-top,.bs-popover-auto[x-placement^="top"]{margin-bottom:0.5rem}.bs-popover-top .arrow,.bs-popover-auto[x-placement^="top"] .arrow{bottom:calc((0.5rem + 1px) * -1)}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before,.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{border-width:0.5rem 0.5rem 0}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before{bottom:0;border-top-color:rgba(0,0,0,0.25)}.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{bottom:1px;border-top-color:#fff}.bs-popover-right,.bs-popover-auto[x-placement^="right"]{margin-left:0.5rem}.bs-popover-right .arrow,.bs-popover-auto[x-placement^="right"] .arrow{left:calc((0.5rem + 1px) * -1);width:0.5rem;height:1rem;margin:0.3rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before,.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{border-width:0.5rem 0.5rem 0.5rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before{left:0;border-right-color:rgba(0,0,0,0.25)}.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{left:1px;border-right-color:#fff}.bs-popover-bottom,.bs-popover-auto[x-placement^="bottom"]{margin-top:0.5rem}.bs-popover-bottom .arrow,.bs-popover-auto[x-placement^="bottom"] .arrow{top:calc((0.5rem + 1px) * -1)}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before,.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{border-width:0 0.5rem 0.5rem 0.5rem}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before{top:0;border-bottom-color:rgba(0,0,0,0.25)}.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{top:1px;border-bottom-color:#fff}.bs-popover-bottom .popover-header::before,.bs-popover-auto[x-placement^="bottom"] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-0.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-left,.bs-popover-auto[x-placement^="left"]{margin-right:0.5rem}.bs-popover-left .arrow,.bs-popover-auto[x-placement^="left"] .arrow{right:calc((0.5rem + 1px) * -1);width:0.5rem;height:1rem;margin:0.3rem 0}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before,.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{border-width:0.5rem 0 0.5rem 0.5rem}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before{right:0;border-left-color:rgba(0,0,0,0.25)}.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{right:1px;border-left-color:#fff}.popover-header{padding:0.5rem 0.75rem;margin-bottom:0;font-size:1rem;color:#2FA4E7;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(0.3rem - 1px);border-top-right-radius:calc(0.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:0.5rem 0.75rem;color:#495057}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-item{position:relative;display:none;-webkit-box-align:center;-ms-flex-align:center;align-items:center;width:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block;-webkit-transition:-webkit-transform 0.6s ease;transition:-webkit-transform 0.6s ease;transition:transform 0.6s ease;transition:transform 0.6s ease, -webkit-transform 0.6s ease}@media screen and (prefers-reduced-motion: reduce){.carousel-item.active,.carousel-item-next,.carousel-item-prev{-webkit-transition:none;transition:none}}.carousel-item-next,.carousel-item-prev{position:absolute;top:0}.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translateX(0);transform:translateX(0)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}}.carousel-item-next,.active.carousel-item-right{-webkit-transform:translateX(100%);transform:translateX(100%)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-next,.active.carousel-item-right{-webkit-transform:translate3d(100%, 0, 0);transform:translate3d(100%, 0, 0)}}.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translateX(-100%);transform:translateX(-100%)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translate3d(-100%, 0, 0);transform:translate3d(-100%, 0, 0)}}.carousel-fade .carousel-item{opacity:0;-webkit-transition-duration:.6s;transition-duration:.6s;-webkit-transition-property:opacity;transition-property:opacity}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right{opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{opacity:0}.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active,.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev{-webkit-transform:translateX(0);transform:translateX(0)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active,.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:0.5}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:20px;height:20px;background:transparent no-repeat center center;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:10px;left:0;z-index:15;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:rgba(255,255,255,0.5)}.carousel-indicators li::before{position:absolute;top:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators li::after{position:absolute;bottom:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.bg-primary{background-color:#2FA4E7 !important}a.bg-primary:hover,a.bg-primary:focus,button.bg-primary:hover,button.bg-primary:focus{background-color:#178acc !important}.bg-secondary{background-color:#e9ecef !important}a.bg-secondary:hover,a.bg-secondary:focus,button.bg-secondary:hover,button.bg-secondary:focus{background-color:#cbd3da !important}.bg-success{background-color:#73A839 !important}a.bg-success:hover,a.bg-success:focus,button.bg-success:hover,button.bg-success:focus{background-color:#59822c !important}.bg-info{background-color:#033C73 !important}a.bg-info:hover,a.bg-info:focus,button.bg-info:hover,button.bg-info:focus{background-color:#022241 !important}.bg-warning{background-color:#DD5600 !important}a.bg-warning:hover,a.bg-warning:focus,button.bg-warning:hover,button.bg-warning:focus{background-color:#aa4200 !important}.bg-danger{background-color:#C71C22 !important}a.bg-danger:hover,a.bg-danger:focus,button.bg-danger:hover,button.bg-danger:focus{background-color:#9a161a !important}.bg-light{background-color:#f8f9fa !important}a.bg-light:hover,a.bg-light:focus,button.bg-light:hover,button.bg-light:focus{background-color:#dae0e5 !important}.bg-dark{background-color:#343a40 !important}a.bg-dark:hover,a.bg-dark:focus,button.bg-dark:hover,button.bg-dark:focus{background-color:#1d2124 !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:transparent !important}.border{border:1px solid #dee2e6 !important}.border-top{border-top:1px solid #dee2e6 !important}.border-right{border-right:1px solid #dee2e6 !important}.border-bottom{border-bottom:1px solid #dee2e6 !important}.border-left{border-left:1px solid #dee2e6 !important}.border-0{border:0 !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.border-primary{border-color:#2FA4E7 !important}.border-secondary{border-color:#e9ecef !important}.border-success{border-color:#73A839 !important}.border-info{border-color:#033C73 !important}.border-warning{border-color:#DD5600 !important}.border-danger{border-color:#C71C22 !important}.border-light{border-color:#f8f9fa !important}.border-dark{border-color:#343a40 !important}.border-white{border-color:#fff !important}.rounded{border-radius:0.25rem !important}.rounded-top{border-top-left-radius:0.25rem !important;border-top-right-radius:0.25rem !important}.rounded-right{border-top-right-radius:0.25rem !important;border-bottom-right-radius:0.25rem !important}.rounded-bottom{border-bottom-right-radius:0.25rem !important;border-bottom-left-radius:0.25rem !important}.rounded-left{border-top-left-radius:0.25rem !important;border-bottom-left-radius:0.25rem !important}.rounded-circle{border-radius:50% !important}.rounded-0{border-radius:0 !important}.clearfix::after{display:block;clear:both;content:""}.d-none{display:none !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}@media (min-width: 576px){.d-sm-none{display:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-sm-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 768px){.d-md-none{display:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-md-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 992px){.d-lg-none{display:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-lg-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 1200px){.d-xl-none{display:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-xl-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media print{.d-print-none{display:none !important}.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-print-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.8571428571%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.flex-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}@media (min-width: 576px){.flex-sm-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-sm-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-sm-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-sm-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-sm-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-sm-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-sm-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-sm-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-sm-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-sm-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-sm-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-sm-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-sm-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-sm-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-sm-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-sm-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-sm-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-sm-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-sm-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-sm-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-sm-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-sm-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-sm-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-sm-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-sm-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-sm-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-sm-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-sm-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-sm-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-sm-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-sm-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-sm-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-sm-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 768px){.flex-md-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-md-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-md-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-md-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-md-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-md-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-md-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-md-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-md-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-md-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-md-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-md-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-md-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-md-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-md-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-md-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-md-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-md-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-md-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-md-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-md-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-md-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-md-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-md-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-md-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-md-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-md-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-md-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-md-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-md-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-md-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-md-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-md-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 992px){.flex-lg-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-lg-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-lg-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-lg-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-lg-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-lg-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-lg-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-lg-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-lg-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-lg-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-lg-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-lg-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-lg-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-lg-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-lg-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-lg-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-lg-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-lg-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-lg-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-lg-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-lg-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-lg-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-lg-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-lg-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-lg-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-lg-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-lg-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-lg-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-lg-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-lg-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-lg-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-lg-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-lg-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 1200px){.flex-xl-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-xl-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-xl-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-xl-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-xl-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-xl-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-xl-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-xl-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-xl-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-xl-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-xl-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-xl-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-xl-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-xl-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-xl-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-xl-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-xl-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-xl-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-xl-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-xl-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-xl-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-xl-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-xl-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-xl-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-xl-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-xl-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-xl-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-xl-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-xl-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-xl-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-xl-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-xl-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-xl-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}.float-left{float:left !important}.float-right{float:right !important}.float-none{float:none !important}@media (min-width: 576px){.float-sm-left{float:left !important}.float-sm-right{float:right !important}.float-sm-none{float:none !important}}@media (min-width: 768px){.float-md-left{float:left !important}.float-md-right{float:right !important}.float-md-none{float:none !important}}@media (min-width: 992px){.float-lg-left{float:left !important}.float-lg-right{float:right !important}.float-lg-none{float:none !important}}@media (min-width: 1200px){.float-xl-left{float:left !important}.float-xl-right{float:right !important}.float-xl-none{float:none !important}}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:-webkit-sticky !important;position:sticky !important}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}@supports (position: -webkit-sticky) or (position: sticky){.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{-webkit-box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important;box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important}.shadow{-webkit-box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important;box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important}.shadow-lg{-webkit-box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important;box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important}.shadow-none{-webkit-box-shadow:none !important;box-shadow:none !important}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mw-100{max-width:100% !important}.mh-100{max-height:100% !important}.m-0{margin:0 !important}.mt-0,.my-0{margin-top:0 !important}.mr-0,.mx-0{margin-right:0 !important}.mb-0,.my-0{margin-bottom:0 !important}.ml-0,.mx-0{margin-left:0 !important}.m-1{margin:0.25rem !important}.mt-1,.my-1{margin-top:0.25rem !important}.mr-1,.mx-1{margin-right:0.25rem !important}.mb-1,.my-1{margin-bottom:0.25rem !important}.ml-1,.mx-1{margin-left:0.25rem !important}.m-2{margin:0.5rem !important}.mt-2,.my-2{margin-top:0.5rem !important}.mr-2,.mx-2{margin-right:0.5rem !important}.mb-2,.my-2{margin-bottom:0.5rem !important}.ml-2,.mx-2{margin-left:0.5rem !important}.m-3{margin:1rem !important}.mt-3,.my-3{margin-top:1rem !important}.mr-3,.mx-3{margin-right:1rem !important}.mb-3,.my-3{margin-bottom:1rem !important}.ml-3,.mx-3{margin-left:1rem !important}.m-4{margin:1.5rem !important}.mt-4,.my-4{margin-top:1.5rem !important}.mr-4,.mx-4{margin-right:1.5rem !important}.mb-4,.my-4{margin-bottom:1.5rem !important}.ml-4,.mx-4{margin-left:1.5rem !important}.m-5{margin:3rem !important}.mt-5,.my-5{margin-top:3rem !important}.mr-5,.mx-5{margin-right:3rem !important}.mb-5,.my-5{margin-bottom:3rem !important}.ml-5,.mx-5{margin-left:3rem !important}.p-0{padding:0 !important}.pt-0,.py-0{padding-top:0 !important}.pr-0,.px-0{padding-right:0 !important}.pb-0,.py-0{padding-bottom:0 !important}.pl-0,.px-0{padding-left:0 !important}.p-1{padding:0.25rem !important}.pt-1,.py-1{padding-top:0.25rem !important}.pr-1,.px-1{padding-right:0.25rem !important}.pb-1,.py-1{padding-bottom:0.25rem !important}.pl-1,.px-1{padding-left:0.25rem !important}.p-2{padding:0.5rem !important}.pt-2,.py-2{padding-top:0.5rem !important}.pr-2,.px-2{padding-right:0.5rem !important}.pb-2,.py-2{padding-bottom:0.5rem !important}.pl-2,.px-2{padding-left:0.5rem !important}.p-3{padding:1rem !important}.pt-3,.py-3{padding-top:1rem !important}.pr-3,.px-3{padding-right:1rem !important}.pb-3,.py-3{padding-bottom:1rem !important}.pl-3,.px-3{padding-left:1rem !important}.p-4{padding:1.5rem !important}.pt-4,.py-4{padding-top:1.5rem !important}.pr-4,.px-4{padding-right:1.5rem !important}.pb-4,.py-4{padding-bottom:1.5rem !important}.pl-4,.px-4{padding-left:1.5rem !important}.p-5{padding:3rem !important}.pt-5,.py-5{padding-top:3rem !important}.pr-5,.px-5{padding-right:3rem !important}.pb-5,.py-5{padding-bottom:3rem !important}.pl-5,.px-5{padding-left:3rem !important}.m-auto{margin:auto !important}.mt-auto,.my-auto{margin-top:auto !important}.mr-auto,.mx-auto{margin-right:auto !important}.mb-auto,.my-auto{margin-bottom:auto !important}.ml-auto,.mx-auto{margin-left:auto !important}@media (min-width: 576px){.m-sm-0{margin:0 !important}.mt-sm-0,.my-sm-0{margin-top:0 !important}.mr-sm-0,.mx-sm-0{margin-right:0 !important}.mb-sm-0,.my-sm-0{margin-bottom:0 !important}.ml-sm-0,.mx-sm-0{margin-left:0 !important}.m-sm-1{margin:0.25rem !important}.mt-sm-1,.my-sm-1{margin-top:0.25rem !important}.mr-sm-1,.mx-sm-1{margin-right:0.25rem !important}.mb-sm-1,.my-sm-1{margin-bottom:0.25rem !important}.ml-sm-1,.mx-sm-1{margin-left:0.25rem !important}.m-sm-2{margin:0.5rem !important}.mt-sm-2,.my-sm-2{margin-top:0.5rem !important}.mr-sm-2,.mx-sm-2{margin-right:0.5rem !important}.mb-sm-2,.my-sm-2{margin-bottom:0.5rem !important}.ml-sm-2,.mx-sm-2{margin-left:0.5rem !important}.m-sm-3{margin:1rem !important}.mt-sm-3,.my-sm-3{margin-top:1rem !important}.mr-sm-3,.mx-sm-3{margin-right:1rem !important}.mb-sm-3,.my-sm-3{margin-bottom:1rem !important}.ml-sm-3,.mx-sm-3{margin-left:1rem !important}.m-sm-4{margin:1.5rem !important}.mt-sm-4,.my-sm-4{margin-top:1.5rem !important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem !important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem !important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem !important}.m-sm-5{margin:3rem !important}.mt-sm-5,.my-sm-5{margin-top:3rem !important}.mr-sm-5,.mx-sm-5{margin-right:3rem !important}.mb-sm-5,.my-sm-5{margin-bottom:3rem !important}.ml-sm-5,.mx-sm-5{margin-left:3rem !important}.p-sm-0{padding:0 !important}.pt-sm-0,.py-sm-0{padding-top:0 !important}.pr-sm-0,.px-sm-0{padding-right:0 !important}.pb-sm-0,.py-sm-0{padding-bottom:0 !important}.pl-sm-0,.px-sm-0{padding-left:0 !important}.p-sm-1{padding:0.25rem !important}.pt-sm-1,.py-sm-1{padding-top:0.25rem !important}.pr-sm-1,.px-sm-1{padding-right:0.25rem !important}.pb-sm-1,.py-sm-1{padding-bottom:0.25rem !important}.pl-sm-1,.px-sm-1{padding-left:0.25rem !important}.p-sm-2{padding:0.5rem !important}.pt-sm-2,.py-sm-2{padding-top:0.5rem !important}.pr-sm-2,.px-sm-2{padding-right:0.5rem !important}.pb-sm-2,.py-sm-2{padding-bottom:0.5rem !important}.pl-sm-2,.px-sm-2{padding-left:0.5rem !important}.p-sm-3{padding:1rem !important}.pt-sm-3,.py-sm-3{padding-top:1rem !important}.pr-sm-3,.px-sm-3{padding-right:1rem !important}.pb-sm-3,.py-sm-3{padding-bottom:1rem !important}.pl-sm-3,.px-sm-3{padding-left:1rem !important}.p-sm-4{padding:1.5rem !important}.pt-sm-4,.py-sm-4{padding-top:1.5rem !important}.pr-sm-4,.px-sm-4{padding-right:1.5rem !important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem !important}.pl-sm-4,.px-sm-4{padding-left:1.5rem !important}.p-sm-5{padding:3rem !important}.pt-sm-5,.py-sm-5{padding-top:3rem !important}.pr-sm-5,.px-sm-5{padding-right:3rem !important}.pb-sm-5,.py-sm-5{padding-bottom:3rem !important}.pl-sm-5,.px-sm-5{padding-left:3rem !important}.m-sm-auto{margin:auto !important}.mt-sm-auto,.my-sm-auto{margin-top:auto !important}.mr-sm-auto,.mx-sm-auto{margin-right:auto !important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto !important}.ml-sm-auto,.mx-sm-auto{margin-left:auto !important}}@media (min-width: 768px){.m-md-0{margin:0 !important}.mt-md-0,.my-md-0{margin-top:0 !important}.mr-md-0,.mx-md-0{margin-right:0 !important}.mb-md-0,.my-md-0{margin-bottom:0 !important}.ml-md-0,.mx-md-0{margin-left:0 !important}.m-md-1{margin:0.25rem !important}.mt-md-1,.my-md-1{margin-top:0.25rem !important}.mr-md-1,.mx-md-1{margin-right:0.25rem !important}.mb-md-1,.my-md-1{margin-bottom:0.25rem !important}.ml-md-1,.mx-md-1{margin-left:0.25rem !important}.m-md-2{margin:0.5rem !important}.mt-md-2,.my-md-2{margin-top:0.5rem !important}.mr-md-2,.mx-md-2{margin-right:0.5rem !important}.mb-md-2,.my-md-2{margin-bottom:0.5rem !important}.ml-md-2,.mx-md-2{margin-left:0.5rem !important}.m-md-3{margin:1rem !important}.mt-md-3,.my-md-3{margin-top:1rem !important}.mr-md-3,.mx-md-3{margin-right:1rem !important}.mb-md-3,.my-md-3{margin-bottom:1rem !important}.ml-md-3,.mx-md-3{margin-left:1rem !important}.m-md-4{margin:1.5rem !important}.mt-md-4,.my-md-4{margin-top:1.5rem !important}.mr-md-4,.mx-md-4{margin-right:1.5rem !important}.mb-md-4,.my-md-4{margin-bottom:1.5rem !important}.ml-md-4,.mx-md-4{margin-left:1.5rem !important}.m-md-5{margin:3rem !important}.mt-md-5,.my-md-5{margin-top:3rem !important}.mr-md-5,.mx-md-5{margin-right:3rem !important}.mb-md-5,.my-md-5{margin-bottom:3rem !important}.ml-md-5,.mx-md-5{margin-left:3rem !important}.p-md-0{padding:0 !important}.pt-md-0,.py-md-0{padding-top:0 !important}.pr-md-0,.px-md-0{padding-right:0 !important}.pb-md-0,.py-md-0{padding-bottom:0 !important}.pl-md-0,.px-md-0{padding-left:0 !important}.p-md-1{padding:0.25rem !important}.pt-md-1,.py-md-1{padding-top:0.25rem !important}.pr-md-1,.px-md-1{padding-right:0.25rem !important}.pb-md-1,.py-md-1{padding-bottom:0.25rem !important}.pl-md-1,.px-md-1{padding-left:0.25rem !important}.p-md-2{padding:0.5rem !important}.pt-md-2,.py-md-2{padding-top:0.5rem !important}.pr-md-2,.px-md-2{padding-right:0.5rem !important}.pb-md-2,.py-md-2{padding-bottom:0.5rem !important}.pl-md-2,.px-md-2{padding-left:0.5rem !important}.p-md-3{padding:1rem !important}.pt-md-3,.py-md-3{padding-top:1rem !important}.pr-md-3,.px-md-3{padding-right:1rem !important}.pb-md-3,.py-md-3{padding-bottom:1rem !important}.pl-md-3,.px-md-3{padding-left:1rem !important}.p-md-4{padding:1.5rem !important}.pt-md-4,.py-md-4{padding-top:1.5rem !important}.pr-md-4,.px-md-4{padding-right:1.5rem !important}.pb-md-4,.py-md-4{padding-bottom:1.5rem !important}.pl-md-4,.px-md-4{padding-left:1.5rem !important}.p-md-5{padding:3rem !important}.pt-md-5,.py-md-5{padding-top:3rem !important}.pr-md-5,.px-md-5{padding-right:3rem !important}.pb-md-5,.py-md-5{padding-bottom:3rem !important}.pl-md-5,.px-md-5{padding-left:3rem !important}.m-md-auto{margin:auto !important}.mt-md-auto,.my-md-auto{margin-top:auto !important}.mr-md-auto,.mx-md-auto{margin-right:auto !important}.mb-md-auto,.my-md-auto{margin-bottom:auto !important}.ml-md-auto,.mx-md-auto{margin-left:auto !important}}@media (min-width: 992px){.m-lg-0{margin:0 !important}.mt-lg-0,.my-lg-0{margin-top:0 !important}.mr-lg-0,.mx-lg-0{margin-right:0 !important}.mb-lg-0,.my-lg-0{margin-bottom:0 !important}.ml-lg-0,.mx-lg-0{margin-left:0 !important}.m-lg-1{margin:0.25rem !important}.mt-lg-1,.my-lg-1{margin-top:0.25rem !important}.mr-lg-1,.mx-lg-1{margin-right:0.25rem !important}.mb-lg-1,.my-lg-1{margin-bottom:0.25rem !important}.ml-lg-1,.mx-lg-1{margin-left:0.25rem !important}.m-lg-2{margin:0.5rem !important}.mt-lg-2,.my-lg-2{margin-top:0.5rem !important}.mr-lg-2,.mx-lg-2{margin-right:0.5rem !important}.mb-lg-2,.my-lg-2{margin-bottom:0.5rem !important}.ml-lg-2,.mx-lg-2{margin-left:0.5rem !important}.m-lg-3{margin:1rem !important}.mt-lg-3,.my-lg-3{margin-top:1rem !important}.mr-lg-3,.mx-lg-3{margin-right:1rem !important}.mb-lg-3,.my-lg-3{margin-bottom:1rem !important}.ml-lg-3,.mx-lg-3{margin-left:1rem !important}.m-lg-4{margin:1.5rem !important}.mt-lg-4,.my-lg-4{margin-top:1.5rem !important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem !important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem !important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem !important}.m-lg-5{margin:3rem !important}.mt-lg-5,.my-lg-5{margin-top:3rem !important}.mr-lg-5,.mx-lg-5{margin-right:3rem !important}.mb-lg-5,.my-lg-5{margin-bottom:3rem !important}.ml-lg-5,.mx-lg-5{margin-left:3rem !important}.p-lg-0{padding:0 !important}.pt-lg-0,.py-lg-0{padding-top:0 !important}.pr-lg-0,.px-lg-0{padding-right:0 !important}.pb-lg-0,.py-lg-0{padding-bottom:0 !important}.pl-lg-0,.px-lg-0{padding-left:0 !important}.p-lg-1{padding:0.25rem !important}.pt-lg-1,.py-lg-1{padding-top:0.25rem !important}.pr-lg-1,.px-lg-1{padding-right:0.25rem !important}.pb-lg-1,.py-lg-1{padding-bottom:0.25rem !important}.pl-lg-1,.px-lg-1{padding-left:0.25rem !important}.p-lg-2{padding:0.5rem !important}.pt-lg-2,.py-lg-2{padding-top:0.5rem !important}.pr-lg-2,.px-lg-2{padding-right:0.5rem !important}.pb-lg-2,.py-lg-2{padding-bottom:0.5rem !important}.pl-lg-2,.px-lg-2{padding-left:0.5rem !important}.p-lg-3{padding:1rem !important}.pt-lg-3,.py-lg-3{padding-top:1rem !important}.pr-lg-3,.px-lg-3{padding-right:1rem !important}.pb-lg-3,.py-lg-3{padding-bottom:1rem !important}.pl-lg-3,.px-lg-3{padding-left:1rem !important}.p-lg-4{padding:1.5rem !important}.pt-lg-4,.py-lg-4{padding-top:1.5rem !important}.pr-lg-4,.px-lg-4{padding-right:1.5rem !important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem !important}.pl-lg-4,.px-lg-4{padding-left:1.5rem !important}.p-lg-5{padding:3rem !important}.pt-lg-5,.py-lg-5{padding-top:3rem !important}.pr-lg-5,.px-lg-5{padding-right:3rem !important}.pb-lg-5,.py-lg-5{padding-bottom:3rem !important}.pl-lg-5,.px-lg-5{padding-left:3rem !important}.m-lg-auto{margin:auto !important}.mt-lg-auto,.my-lg-auto{margin-top:auto !important}.mr-lg-auto,.mx-lg-auto{margin-right:auto !important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto !important}.ml-lg-auto,.mx-lg-auto{margin-left:auto !important}}@media (min-width: 1200px){.m-xl-0{margin:0 !important}.mt-xl-0,.my-xl-0{margin-top:0 !important}.mr-xl-0,.mx-xl-0{margin-right:0 !important}.mb-xl-0,.my-xl-0{margin-bottom:0 !important}.ml-xl-0,.mx-xl-0{margin-left:0 !important}.m-xl-1{margin:0.25rem !important}.mt-xl-1,.my-xl-1{margin-top:0.25rem !important}.mr-xl-1,.mx-xl-1{margin-right:0.25rem !important}.mb-xl-1,.my-xl-1{margin-bottom:0.25rem !important}.ml-xl-1,.mx-xl-1{margin-left:0.25rem !important}.m-xl-2{margin:0.5rem !important}.mt-xl-2,.my-xl-2{margin-top:0.5rem !important}.mr-xl-2,.mx-xl-2{margin-right:0.5rem !important}.mb-xl-2,.my-xl-2{margin-bottom:0.5rem !important}.ml-xl-2,.mx-xl-2{margin-left:0.5rem !important}.m-xl-3{margin:1rem !important}.mt-xl-3,.my-xl-3{margin-top:1rem !important}.mr-xl-3,.mx-xl-3{margin-right:1rem !important}.mb-xl-3,.my-xl-3{margin-bottom:1rem !important}.ml-xl-3,.mx-xl-3{margin-left:1rem !important}.m-xl-4{margin:1.5rem !important}.mt-xl-4,.my-xl-4{margin-top:1.5rem !important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem !important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem !important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem !important}.m-xl-5{margin:3rem !important}.mt-xl-5,.my-xl-5{margin-top:3rem !important}.mr-xl-5,.mx-xl-5{margin-right:3rem !important}.mb-xl-5,.my-xl-5{margin-bottom:3rem !important}.ml-xl-5,.mx-xl-5{margin-left:3rem !important}.p-xl-0{padding:0 !important}.pt-xl-0,.py-xl-0{padding-top:0 !important}.pr-xl-0,.px-xl-0{padding-right:0 !important}.pb-xl-0,.py-xl-0{padding-bottom:0 !important}.pl-xl-0,.px-xl-0{padding-left:0 !important}.p-xl-1{padding:0.25rem !important}.pt-xl-1,.py-xl-1{padding-top:0.25rem !important}.pr-xl-1,.px-xl-1{padding-right:0.25rem !important}.pb-xl-1,.py-xl-1{padding-bottom:0.25rem !important}.pl-xl-1,.px-xl-1{padding-left:0.25rem !important}.p-xl-2{padding:0.5rem !important}.pt-xl-2,.py-xl-2{padding-top:0.5rem !important}.pr-xl-2,.px-xl-2{padding-right:0.5rem !important}.pb-xl-2,.py-xl-2{padding-bottom:0.5rem !important}.pl-xl-2,.px-xl-2{padding-left:0.5rem !important}.p-xl-3{padding:1rem !important}.pt-xl-3,.py-xl-3{padding-top:1rem !important}.pr-xl-3,.px-xl-3{padding-right:1rem !important}.pb-xl-3,.py-xl-3{padding-bottom:1rem !important}.pl-xl-3,.px-xl-3{padding-left:1rem !important}.p-xl-4{padding:1.5rem !important}.pt-xl-4,.py-xl-4{padding-top:1.5rem !important}.pr-xl-4,.px-xl-4{padding-right:1.5rem !important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem !important}.pl-xl-4,.px-xl-4{padding-left:1.5rem !important}.p-xl-5{padding:3rem !important}.pt-xl-5,.py-xl-5{padding-top:3rem !important}.pr-xl-5,.px-xl-5{padding-right:3rem !important}.pb-xl-5,.py-xl-5{padding-bottom:3rem !important}.pl-xl-5,.px-xl-5{padding-left:3rem !important}.m-xl-auto{margin:auto !important}.mt-xl-auto,.my-xl-auto{margin-top:auto !important}.mr-xl-auto,.mx-xl-auto{margin-right:auto !important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto !important}.ml-xl-auto,.mx-xl-auto{margin-left:auto !important}}.text-monospace{font-family:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}.text-justify{text-align:justify !important}.text-nowrap{white-space:nowrap !important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left !important}.text-right{text-align:right !important}.text-center{text-align:center !important}@media (min-width: 576px){.text-sm-left{text-align:left !important}.text-sm-right{text-align:right !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.text-md-left{text-align:left !important}.text-md-right{text-align:right !important}.text-md-center{text-align:center !important}}@media (min-width: 992px){.text-lg-left{text-align:left !important}.text-lg-right{text-align:right !important}.text-lg-center{text-align:center !important}}@media (min-width: 1200px){.text-xl-left{text-align:left !important}.text-xl-right{text-align:right !important}.text-xl-center{text-align:center !important}}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.font-weight-light{font-weight:300 !important}.font-weight-normal{font-weight:400 !important}.font-weight-bold{font-weight:700 !important}.font-italic{font-style:italic !important}.text-white{color:#fff !important}.text-primary{color:#2FA4E7 !important}a.text-primary:hover,a.text-primary:focus{color:#178acc !important}.text-secondary{color:#e9ecef !important}a.text-secondary:hover,a.text-secondary:focus{color:#cbd3da !important}.text-success{color:#73A839 !important}a.text-success:hover,a.text-success:focus{color:#59822c !important}.text-info{color:#033C73 !important}a.text-info:hover,a.text-info:focus{color:#022241 !important}.text-warning{color:#DD5600 !important}a.text-warning:hover,a.text-warning:focus{color:#aa4200 !important}.text-danger{color:#C71C22 !important}a.text-danger:hover,a.text-danger:focus{color:#9a161a !important}.text-light{color:#f8f9fa !important}a.text-light:hover,a.text-light:focus{color:#dae0e5 !important}.text-dark{color:#343a40 !important}a.text-dark:hover,a.text-dark:focus{color:#1d2124 !important}.text-body{color:#495057 !important}.text-muted{color:#868e96 !important}.text-black-50{color:rgba(0,0,0,0.5) !important}.text-white-50{color:rgba(255,255,255,0.5) !important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.visible{visibility:visible !important}.invisible{visibility:hidden !important}@media print{*,*::before,*::after{text-shadow:none !important;-webkit-box-shadow:none !important;box-shadow:none !important}a:not(.btn){text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap !important}pre,blockquote{border:1px solid #adb5bd;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body{min-width:992px !important}.container{min-width:992px !important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse !important}.table td,.table th{background-color:#fff !important}.table-bordered th,.table-bordered td{border:1px solid #dee2e6 !important}.table-dark{color:inherit}.table-dark th,.table-dark td,.table-dark thead th,.table-dark tbody+tbody{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}}.bg-primary{background-image:-webkit-gradient(linear, left top, left bottom, from(#54b4eb), color-stop(60%, #2FA4E7), to(#1d9ce5));background-image:linear-gradient(#54b4eb, #2FA4E7 60%, #1d9ce5);background-repeat:no-repeat}.bg-dark{background-image:-webkit-gradient(linear, left top, left bottom, from(#04519b), color-stop(60%, #033C73), to(#02325f));background-image:linear-gradient(#04519b, #033C73 60%, #02325f);background-repeat:no-repeat}.bg-light{background-image:-webkit-gradient(linear, left top, left bottom, from(white), color-stop(60%, #e9ecef), to(#e3e7eb));background-image:linear-gradient(white, #e9ecef 60%, #e3e7eb);background-repeat:no-repeat}.navbar-brand,.nav-link{text-shadow:0 1px 0 rgba(0,0,0,0.05)}.btn{text-shadow:0 1px 0 rgba(0,0,0,0.05)}.btn-primary{background-image:-webkit-gradient(linear, left top, left bottom, from(#54b4eb), color-stop(60%, #2FA4E7), to(#1d9ce5));background-image:linear-gradient(#54b4eb, #2FA4E7 60%, #1d9ce5);background-repeat:no-repeat}.btn-secondary{background-image:-webkit-gradient(linear, left top, left bottom, from(white), color-stop(60%, #e9ecef), to(#dde2e6));background-image:linear-gradient(white, #e9ecef 60%, #dde2e6);background-repeat:no-repeat;color:#495057}.btn-success{background-image:-webkit-gradient(linear, left top, left bottom, from(#88c149), color-stop(60%, #73A839), to(#699934));background-image:linear-gradient(#88c149, #73A839 60%, #699934);background-repeat:no-repeat}.btn-info{background-image:-webkit-gradient(linear, left top, left bottom, from(#04519b), color-stop(60%, #033C73), to(#02325f));background-image:linear-gradient(#04519b, #033C73 60%, #02325f);background-repeat:no-repeat}.btn-warning{background-image:-webkit-gradient(linear, left top, left bottom, from(#ff6707), color-stop(60%, #DD5600), to(#c94e00));background-image:linear-gradient(#ff6707, #DD5600 60%, #c94e00);background-repeat:no-repeat}.btn-danger{background-image:-webkit-gradient(linear, left top, left bottom, from(#e12b31), color-stop(60%, #C71C22), to(#b5191f));background-image:linear-gradient(#e12b31, #C71C22 60%, #b5191f);background-repeat:no-repeat}.btn-light{background-image:-webkit-gradient(linear, left top, left bottom, from(white), color-stop(60%, #f8f9fa), to(#eceff2));background-image:linear-gradient(white, #f8f9fa 60%, #eceff2);background-repeat:no-repeat}.btn-dark{background-image:-webkit-gradient(linear, left top, left bottom, from(#464e57), color-stop(60%, #343a40), to(#2b3035));background-image:linear-gradient(#464e57, #343a40 60%, #2b3035);background-repeat:no-repeat}.bg-primary h1,.bg-primary h2,.bg-primary h3,.bg-primary h4,.bg-primary h5,.bg-primary h6,.bg-success h1,.bg-success h2,.bg-success h3,.bg-success h4,.bg-success h5,.bg-success h6,.bg-info h1,.bg-info h2,.bg-info h3,.bg-info h4,.bg-info h5,.bg-info h6,.bg-warning h1,.bg-warning h2,.bg-warning h3,.bg-warning h4,.bg-warning h5,.bg-warning h6,.bg-danger h1,.bg-danger h2,.bg-danger h3,.bg-danger h4,.bg-danger h5,.bg-danger h6,.bg-dark h1,.bg-dark h2,.bg-dark h3,.bg-dark h4,.bg-dark h5,.bg-dark h6{color:#fff}.dropdown-menu .dropdown-header{color:#868e96} diff --git a/css/font-awesome.min.css b/css/font-awesome.min.css new file mode 100644 index 0000000..540440c --- /dev/null +++ b/css/font-awesome.min.css @@ -0,0 +1,4 @@ +/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:'FontAwesome';src:url('../fonts/fontawesome-webfont.eot?v=4.7.0');src:url('../fonts/fontawesome-webfont.eot?#iefix&v=4.7.0') format('embedded-opentype'),url('../fonts/fontawesome-webfont.woff2?v=4.7.0') format('woff2'),url('../fonts/fontawesome-webfont.woff?v=4.7.0') format('woff'),url('../fonts/fontawesome-webfont.ttf?v=4.7.0') format('truetype'),url('../fonts/fontawesome-webfont.svg?v=4.7.0#fontawesomeregular') format('svg');font-weight:normal;font-style:normal}.fa{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571429em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14285714em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14285714em;width:2.14285714em;top:.14285714em;text-align:center}.fa-li.fa-lg{left:-1.85714286em}.fa-border{padding:.2em .25em .15em;border:solid .08em #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left{margin-right:.3em}.fa.fa-pull-right{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left{margin-right:.3em}.fa.pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s infinite linear;animation:fa-spin 2s infinite linear}.fa-pulse{-webkit-animation:fa-spin 1s infinite steps(8);animation:fa-spin 1s infinite steps(8)}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scale(-1, 1);-ms-transform:scale(-1, 1);transform:scale(-1, 1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scale(1, -1);-ms-transform:scale(1, -1);transform:scale(1, -1)}:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270,:root .fa-flip-horizontal,:root .fa-flip-vertical{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:"\f000"}.fa-music:before{content:"\f001"}.fa-search:before{content:"\f002"}.fa-envelope-o:before{content:"\f003"}.fa-heart:before{content:"\f004"}.fa-star:before{content:"\f005"}.fa-star-o:before{content:"\f006"}.fa-user:before{content:"\f007"}.fa-film:before{content:"\f008"}.fa-th-large:before{content:"\f009"}.fa-th:before{content:"\f00a"}.fa-th-list:before{content:"\f00b"}.fa-check:before{content:"\f00c"}.fa-remove:before,.fa-close:before,.fa-times:before{content:"\f00d"}.fa-search-plus:before{content:"\f00e"}.fa-search-minus:before{content:"\f010"}.fa-power-off:before{content:"\f011"}.fa-signal:before{content:"\f012"}.fa-gear:before,.fa-cog:before{content:"\f013"}.fa-trash-o:before{content:"\f014"}.fa-home:before{content:"\f015"}.fa-file-o:before{content:"\f016"}.fa-clock-o:before{content:"\f017"}.fa-road:before{content:"\f018"}.fa-download:before{content:"\f019"}.fa-arrow-circle-o-down:before{content:"\f01a"}.fa-arrow-circle-o-up:before{content:"\f01b"}.fa-inbox:before{content:"\f01c"}.fa-play-circle-o:before{content:"\f01d"}.fa-rotate-right:before,.fa-repeat:before{content:"\f01e"}.fa-refresh:before{content:"\f021"}.fa-list-alt:before{content:"\f022"}.fa-lock:before{content:"\f023"}.fa-flag:before{content:"\f024"}.fa-headphones:before{content:"\f025"}.fa-volume-off:before{content:"\f026"}.fa-volume-down:before{content:"\f027"}.fa-volume-up:before{content:"\f028"}.fa-qrcode:before{content:"\f029"}.fa-barcode:before{content:"\f02a"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-book:before{content:"\f02d"}.fa-bookmark:before{content:"\f02e"}.fa-print:before{content:"\f02f"}.fa-camera:before{content:"\f030"}.fa-font:before{content:"\f031"}.fa-bold:before{content:"\f032"}.fa-italic:before{content:"\f033"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-align-left:before{content:"\f036"}.fa-align-center:before{content:"\f037"}.fa-align-right:before{content:"\f038"}.fa-align-justify:before{content:"\f039"}.fa-list:before{content:"\f03a"}.fa-dedent:before,.fa-outdent:before{content:"\f03b"}.fa-indent:before{content:"\f03c"}.fa-video-camera:before{content:"\f03d"}.fa-photo:before,.fa-image:before,.fa-picture-o:before{content:"\f03e"}.fa-pencil:before{content:"\f040"}.fa-map-marker:before{content:"\f041"}.fa-adjust:before{content:"\f042"}.fa-tint:before{content:"\f043"}.fa-edit:before,.fa-pencil-square-o:before{content:"\f044"}.fa-share-square-o:before{content:"\f045"}.fa-check-square-o:before{content:"\f046"}.fa-arrows:before{content:"\f047"}.fa-step-backward:before{content:"\f048"}.fa-fast-backward:before{content:"\f049"}.fa-backward:before{content:"\f04a"}.fa-play:before{content:"\f04b"}.fa-pause:before{content:"\f04c"}.fa-stop:before{content:"\f04d"}.fa-forward:before{content:"\f04e"}.fa-fast-forward:before{content:"\f050"}.fa-step-forward:before{content:"\f051"}.fa-eject:before{content:"\f052"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-plus-circle:before{content:"\f055"}.fa-minus-circle:before{content:"\f056"}.fa-times-circle:before{content:"\f057"}.fa-check-circle:before{content:"\f058"}.fa-question-circle:before{content:"\f059"}.fa-info-circle:before{content:"\f05a"}.fa-crosshairs:before{content:"\f05b"}.fa-times-circle-o:before{content:"\f05c"}.fa-check-circle-o:before{content:"\f05d"}.fa-ban:before{content:"\f05e"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrow-down:before{content:"\f063"}.fa-mail-forward:before,.fa-share:before{content:"\f064"}.fa-expand:before{content:"\f065"}.fa-compress:before{content:"\f066"}.fa-plus:before{content:"\f067"}.fa-minus:before{content:"\f068"}.fa-asterisk:before{content:"\f069"}.fa-exclamation-circle:before{content:"\f06a"}.fa-gift:before{content:"\f06b"}.fa-leaf:before{content:"\f06c"}.fa-fire:before{content:"\f06d"}.fa-eye:before{content:"\f06e"}.fa-eye-slash:before{content:"\f070"}.fa-warning:before,.fa-exclamation-triangle:before{content:"\f071"}.fa-plane:before{content:"\f072"}.fa-calendar:before{content:"\f073"}.fa-random:before{content:"\f074"}.fa-comment:before{content:"\f075"}.fa-magnet:before{content:"\f076"}.fa-chevron-up:before{content:"\f077"}.fa-chevron-down:before{content:"\f078"}.fa-retweet:before{content:"\f079"}.fa-shopping-cart:before{content:"\f07a"}.fa-folder:before{content:"\f07b"}.fa-folder-open:before{content:"\f07c"}.fa-arrows-v:before{content:"\f07d"}.fa-arrows-h:before{content:"\f07e"}.fa-bar-chart-o:before,.fa-bar-chart:before{content:"\f080"}.fa-twitter-square:before{content:"\f081"}.fa-facebook-square:before{content:"\f082"}.fa-camera-retro:before{content:"\f083"}.fa-key:before{content:"\f084"}.fa-gears:before,.fa-cogs:before{content:"\f085"}.fa-comments:before{content:"\f086"}.fa-thumbs-o-up:before{content:"\f087"}.fa-thumbs-o-down:before{content:"\f088"}.fa-star-half:before{content:"\f089"}.fa-heart-o:before{content:"\f08a"}.fa-sign-out:before{content:"\f08b"}.fa-linkedin-square:before{content:"\f08c"}.fa-thumb-tack:before{content:"\f08d"}.fa-external-link:before{content:"\f08e"}.fa-sign-in:before{content:"\f090"}.fa-trophy:before{content:"\f091"}.fa-github-square:before{content:"\f092"}.fa-upload:before{content:"\f093"}.fa-lemon-o:before{content:"\f094"}.fa-phone:before{content:"\f095"}.fa-square-o:before{content:"\f096"}.fa-bookmark-o:before{content:"\f097"}.fa-phone-square:before{content:"\f098"}.fa-twitter:before{content:"\f099"}.fa-facebook-f:before,.fa-facebook:before{content:"\f09a"}.fa-github:before{content:"\f09b"}.fa-unlock:before{content:"\f09c"}.fa-credit-card:before{content:"\f09d"}.fa-feed:before,.fa-rss:before{content:"\f09e"}.fa-hdd-o:before{content:"\f0a0"}.fa-bullhorn:before{content:"\f0a1"}.fa-bell:before{content:"\f0f3"}.fa-certificate:before{content:"\f0a3"}.fa-hand-o-right:before{content:"\f0a4"}.fa-hand-o-left:before{content:"\f0a5"}.fa-hand-o-up:before{content:"\f0a6"}.fa-hand-o-down:before{content:"\f0a7"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-globe:before{content:"\f0ac"}.fa-wrench:before{content:"\f0ad"}.fa-tasks:before{content:"\f0ae"}.fa-filter:before{content:"\f0b0"}.fa-briefcase:before{content:"\f0b1"}.fa-arrows-alt:before{content:"\f0b2"}.fa-group:before,.fa-users:before{content:"\f0c0"}.fa-chain:before,.fa-link:before{content:"\f0c1"}.fa-cloud:before{content:"\f0c2"}.fa-flask:before{content:"\f0c3"}.fa-cut:before,.fa-scissors:before{content:"\f0c4"}.fa-copy:before,.fa-files-o:before{content:"\f0c5"}.fa-paperclip:before{content:"\f0c6"}.fa-save:before,.fa-floppy-o:before{content:"\f0c7"}.fa-square:before{content:"\f0c8"}.fa-navicon:before,.fa-reorder:before,.fa-bars:before{content:"\f0c9"}.fa-list-ul:before{content:"\f0ca"}.fa-list-ol:before{content:"\f0cb"}.fa-strikethrough:before{content:"\f0cc"}.fa-underline:before{content:"\f0cd"}.fa-table:before{content:"\f0ce"}.fa-magic:before{content:"\f0d0"}.fa-truck:before{content:"\f0d1"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-square:before{content:"\f0d3"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-plus:before{content:"\f0d5"}.fa-money:before{content:"\f0d6"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-up:before{content:"\f0d8"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-columns:before{content:"\f0db"}.fa-unsorted:before,.fa-sort:before{content:"\f0dc"}.fa-sort-down:before,.fa-sort-desc:before{content:"\f0dd"}.fa-sort-up:before,.fa-sort-asc:before{content:"\f0de"}.fa-envelope:before{content:"\f0e0"}.fa-linkedin:before{content:"\f0e1"}.fa-rotate-left:before,.fa-undo:before{content:"\f0e2"}.fa-legal:before,.fa-gavel:before{content:"\f0e3"}.fa-dashboard:before,.fa-tachometer:before{content:"\f0e4"}.fa-comment-o:before{content:"\f0e5"}.fa-comments-o:before{content:"\f0e6"}.fa-flash:before,.fa-bolt:before{content:"\f0e7"}.fa-sitemap:before{content:"\f0e8"}.fa-umbrella:before{content:"\f0e9"}.fa-paste:before,.fa-clipboard:before{content:"\f0ea"}.fa-lightbulb-o:before{content:"\f0eb"}.fa-exchange:before{content:"\f0ec"}.fa-cloud-download:before{content:"\f0ed"}.fa-cloud-upload:before{content:"\f0ee"}.fa-user-md:before{content:"\f0f0"}.fa-stethoscope:before{content:"\f0f1"}.fa-suitcase:before{content:"\f0f2"}.fa-bell-o:before{content:"\f0a2"}.fa-coffee:before{content:"\f0f4"}.fa-cutlery:before{content:"\f0f5"}.fa-file-text-o:before{content:"\f0f6"}.fa-building-o:before{content:"\f0f7"}.fa-hospital-o:before{content:"\f0f8"}.fa-ambulance:before{content:"\f0f9"}.fa-medkit:before{content:"\f0fa"}.fa-fighter-jet:before{content:"\f0fb"}.fa-beer:before{content:"\f0fc"}.fa-h-square:before{content:"\f0fd"}.fa-plus-square:before{content:"\f0fe"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angle-down:before{content:"\f107"}.fa-desktop:before{content:"\f108"}.fa-laptop:before{content:"\f109"}.fa-tablet:before{content:"\f10a"}.fa-mobile-phone:before,.fa-mobile:before{content:"\f10b"}.fa-circle-o:before{content:"\f10c"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-spinner:before{content:"\f110"}.fa-circle:before{content:"\f111"}.fa-mail-reply:before,.fa-reply:before{content:"\f112"}.fa-github-alt:before{content:"\f113"}.fa-folder-o:before{content:"\f114"}.fa-folder-open-o:before{content:"\f115"}.fa-smile-o:before{content:"\f118"}.fa-frown-o:before{content:"\f119"}.fa-meh-o:before{content:"\f11a"}.fa-gamepad:before{content:"\f11b"}.fa-keyboard-o:before{content:"\f11c"}.fa-flag-o:before{content:"\f11d"}.fa-flag-checkered:before{content:"\f11e"}.fa-terminal:before{content:"\f120"}.fa-code:before{content:"\f121"}.fa-mail-reply-all:before,.fa-reply-all:before{content:"\f122"}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:"\f123"}.fa-location-arrow:before{content:"\f124"}.fa-crop:before{content:"\f125"}.fa-code-fork:before{content:"\f126"}.fa-unlink:before,.fa-chain-broken:before{content:"\f127"}.fa-question:before{content:"\f128"}.fa-info:before{content:"\f129"}.fa-exclamation:before{content:"\f12a"}.fa-superscript:before{content:"\f12b"}.fa-subscript:before{content:"\f12c"}.fa-eraser:before{content:"\f12d"}.fa-puzzle-piece:before{content:"\f12e"}.fa-microphone:before{content:"\f130"}.fa-microphone-slash:before{content:"\f131"}.fa-shield:before{content:"\f132"}.fa-calendar-o:before{content:"\f133"}.fa-fire-extinguisher:before{content:"\f134"}.fa-rocket:before{content:"\f135"}.fa-maxcdn:before{content:"\f136"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-html5:before{content:"\f13b"}.fa-css3:before{content:"\f13c"}.fa-anchor:before{content:"\f13d"}.fa-unlock-alt:before{content:"\f13e"}.fa-bullseye:before{content:"\f140"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-rss-square:before{content:"\f143"}.fa-play-circle:before{content:"\f144"}.fa-ticket:before{content:"\f145"}.fa-minus-square:before{content:"\f146"}.fa-minus-square-o:before{content:"\f147"}.fa-level-up:before{content:"\f148"}.fa-level-down:before{content:"\f149"}.fa-check-square:before{content:"\f14a"}.fa-pencil-square:before{content:"\f14b"}.fa-external-link-square:before{content:"\f14c"}.fa-share-square:before{content:"\f14d"}.fa-compass:before{content:"\f14e"}.fa-toggle-down:before,.fa-caret-square-o-down:before{content:"\f150"}.fa-toggle-up:before,.fa-caret-square-o-up:before{content:"\f151"}.fa-toggle-right:before,.fa-caret-square-o-right:before{content:"\f152"}.fa-euro:before,.fa-eur:before{content:"\f153"}.fa-gbp:before{content:"\f154"}.fa-dollar:before,.fa-usd:before{content:"\f155"}.fa-rupee:before,.fa-inr:before{content:"\f156"}.fa-cny:before,.fa-rmb:before,.fa-yen:before,.fa-jpy:before{content:"\f157"}.fa-ruble:before,.fa-rouble:before,.fa-rub:before{content:"\f158"}.fa-won:before,.fa-krw:before{content:"\f159"}.fa-bitcoin:before,.fa-btc:before{content:"\f15a"}.fa-file:before{content:"\f15b"}.fa-file-text:before{content:"\f15c"}.fa-sort-alpha-asc:before{content:"\f15d"}.fa-sort-alpha-desc:before{content:"\f15e"}.fa-sort-amount-asc:before{content:"\f160"}.fa-sort-amount-desc:before{content:"\f161"}.fa-sort-numeric-asc:before{content:"\f162"}.fa-sort-numeric-desc:before{content:"\f163"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbs-down:before{content:"\f165"}.fa-youtube-square:before{content:"\f166"}.fa-youtube:before{content:"\f167"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-youtube-play:before{content:"\f16a"}.fa-dropbox:before{content:"\f16b"}.fa-stack-overflow:before{content:"\f16c"}.fa-instagram:before{content:"\f16d"}.fa-flickr:before{content:"\f16e"}.fa-adn:before{content:"\f170"}.fa-bitbucket:before{content:"\f171"}.fa-bitbucket-square:before{content:"\f172"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-long-arrow-down:before{content:"\f175"}.fa-long-arrow-up:before{content:"\f176"}.fa-long-arrow-left:before{content:"\f177"}.fa-long-arrow-right:before{content:"\f178"}.fa-apple:before{content:"\f179"}.fa-windows:before{content:"\f17a"}.fa-android:before{content:"\f17b"}.fa-linux:before{content:"\f17c"}.fa-dribbble:before{content:"\f17d"}.fa-skype:before{content:"\f17e"}.fa-foursquare:before{content:"\f180"}.fa-trello:before{content:"\f181"}.fa-female:before{content:"\f182"}.fa-male:before{content:"\f183"}.fa-gittip:before,.fa-gratipay:before{content:"\f184"}.fa-sun-o:before{content:"\f185"}.fa-moon-o:before{content:"\f186"}.fa-archive:before{content:"\f187"}.fa-bug:before{content:"\f188"}.fa-vk:before{content:"\f189"}.fa-weibo:before{content:"\f18a"}.fa-renren:before{content:"\f18b"}.fa-pagelines:before{content:"\f18c"}.fa-stack-exchange:before{content:"\f18d"}.fa-arrow-circle-o-right:before{content:"\f18e"}.fa-arrow-circle-o-left:before{content:"\f190"}.fa-toggle-left:before,.fa-caret-square-o-left:before{content:"\f191"}.fa-dot-circle-o:before{content:"\f192"}.fa-wheelchair:before{content:"\f193"}.fa-vimeo-square:before{content:"\f194"}.fa-turkish-lira:before,.fa-try:before{content:"\f195"}.fa-plus-square-o:before{content:"\f196"}.fa-space-shuttle:before{content:"\f197"}.fa-slack:before{content:"\f198"}.fa-envelope-square:before{content:"\f199"}.fa-wordpress:before{content:"\f19a"}.fa-openid:before{content:"\f19b"}.fa-institution:before,.fa-bank:before,.fa-university:before{content:"\f19c"}.fa-mortar-board:before,.fa-graduation-cap:before{content:"\f19d"}.fa-yahoo:before{content:"\f19e"}.fa-google:before{content:"\f1a0"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-square:before{content:"\f1a2"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-stumbleupon:before{content:"\f1a4"}.fa-delicious:before{content:"\f1a5"}.fa-digg:before{content:"\f1a6"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-drupal:before{content:"\f1a9"}.fa-joomla:before{content:"\f1aa"}.fa-language:before{content:"\f1ab"}.fa-fax:before{content:"\f1ac"}.fa-building:before{content:"\f1ad"}.fa-child:before{content:"\f1ae"}.fa-paw:before{content:"\f1b0"}.fa-spoon:before{content:"\f1b1"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-recycle:before{content:"\f1b8"}.fa-automobile:before,.fa-car:before{content:"\f1b9"}.fa-cab:before,.fa-taxi:before{content:"\f1ba"}.fa-tree:before{content:"\f1bb"}.fa-spotify:before{content:"\f1bc"}.fa-deviantart:before{content:"\f1bd"}.fa-soundcloud:before{content:"\f1be"}.fa-database:before{content:"\f1c0"}.fa-file-pdf-o:before{content:"\f1c1"}.fa-file-word-o:before{content:"\f1c2"}.fa-file-excel-o:before{content:"\f1c3"}.fa-file-powerpoint-o:before{content:"\f1c4"}.fa-file-photo-o:before,.fa-file-picture-o:before,.fa-file-image-o:before{content:"\f1c5"}.fa-file-zip-o:before,.fa-file-archive-o:before{content:"\f1c6"}.fa-file-sound-o:before,.fa-file-audio-o:before{content:"\f1c7"}.fa-file-movie-o:before,.fa-file-video-o:before{content:"\f1c8"}.fa-file-code-o:before{content:"\f1c9"}.fa-vine:before{content:"\f1ca"}.fa-codepen:before{content:"\f1cb"}.fa-jsfiddle:before{content:"\f1cc"}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-saver:before,.fa-support:before,.fa-life-ring:before{content:"\f1cd"}.fa-circle-o-notch:before{content:"\f1ce"}.fa-ra:before,.fa-resistance:before,.fa-rebel:before{content:"\f1d0"}.fa-ge:before,.fa-empire:before{content:"\f1d1"}.fa-git-square:before{content:"\f1d2"}.fa-git:before{content:"\f1d3"}.fa-y-combinator-square:before,.fa-yc-square:before,.fa-hacker-news:before{content:"\f1d4"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-qq:before{content:"\f1d6"}.fa-wechat:before,.fa-weixin:before{content:"\f1d7"}.fa-send:before,.fa-paper-plane:before{content:"\f1d8"}.fa-send-o:before,.fa-paper-plane-o:before{content:"\f1d9"}.fa-history:before{content:"\f1da"}.fa-circle-thin:before{content:"\f1db"}.fa-header:before{content:"\f1dc"}.fa-paragraph:before{content:"\f1dd"}.fa-sliders:before{content:"\f1de"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-bomb:before{content:"\f1e2"}.fa-soccer-ball-o:before,.fa-futbol-o:before{content:"\f1e3"}.fa-tty:before{content:"\f1e4"}.fa-binoculars:before{content:"\f1e5"}.fa-plug:before{content:"\f1e6"}.fa-slideshare:before{content:"\f1e7"}.fa-twitch:before{content:"\f1e8"}.fa-yelp:before{content:"\f1e9"}.fa-newspaper-o:before{content:"\f1ea"}.fa-wifi:before{content:"\f1eb"}.fa-calculator:before{content:"\f1ec"}.fa-paypal:before{content:"\f1ed"}.fa-google-wallet:before{content:"\f1ee"}.fa-cc-visa:before{content:"\f1f0"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-bell-slash:before{content:"\f1f6"}.fa-bell-slash-o:before{content:"\f1f7"}.fa-trash:before{content:"\f1f8"}.fa-copyright:before{content:"\f1f9"}.fa-at:before{content:"\f1fa"}.fa-eyedropper:before{content:"\f1fb"}.fa-paint-brush:before{content:"\f1fc"}.fa-birthday-cake:before{content:"\f1fd"}.fa-area-chart:before{content:"\f1fe"}.fa-pie-chart:before{content:"\f200"}.fa-line-chart:before{content:"\f201"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-bicycle:before{content:"\f206"}.fa-bus:before{content:"\f207"}.fa-ioxhost:before{content:"\f208"}.fa-angellist:before{content:"\f209"}.fa-cc:before{content:"\f20a"}.fa-shekel:before,.fa-sheqel:before,.fa-ils:before{content:"\f20b"}.fa-meanpath:before{content:"\f20c"}.fa-buysellads:before{content:"\f20d"}.fa-connectdevelop:before{content:"\f20e"}.fa-dashcube:before{content:"\f210"}.fa-forumbee:before{content:"\f211"}.fa-leanpub:before{content:"\f212"}.fa-sellsy:before{content:"\f213"}.fa-shirtsinbulk:before{content:"\f214"}.fa-simplybuilt:before{content:"\f215"}.fa-skyatlas:before{content:"\f216"}.fa-cart-plus:before{content:"\f217"}.fa-cart-arrow-down:before{content:"\f218"}.fa-diamond:before{content:"\f219"}.fa-ship:before{content:"\f21a"}.fa-user-secret:before{content:"\f21b"}.fa-motorcycle:before{content:"\f21c"}.fa-street-view:before{content:"\f21d"}.fa-heartbeat:before{content:"\f21e"}.fa-venus:before{content:"\f221"}.fa-mars:before{content:"\f222"}.fa-mercury:before{content:"\f223"}.fa-intersex:before,.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-venus-double:before{content:"\f226"}.fa-mars-double:before{content:"\f227"}.fa-venus-mars:before{content:"\f228"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-neuter:before{content:"\f22c"}.fa-genderless:before{content:"\f22d"}.fa-facebook-official:before{content:"\f230"}.fa-pinterest-p:before{content:"\f231"}.fa-whatsapp:before{content:"\f232"}.fa-server:before{content:"\f233"}.fa-user-plus:before{content:"\f234"}.fa-user-times:before{content:"\f235"}.fa-hotel:before,.fa-bed:before{content:"\f236"}.fa-viacoin:before{content:"\f237"}.fa-train:before{content:"\f238"}.fa-subway:before{content:"\f239"}.fa-medium:before{content:"\f23a"}.fa-yc:before,.fa-y-combinator:before{content:"\f23b"}.fa-optin-monster:before{content:"\f23c"}.fa-opencart:before{content:"\f23d"}.fa-expeditedssl:before{content:"\f23e"}.fa-battery-4:before,.fa-battery:before,.fa-battery-full:before{content:"\f240"}.fa-battery-3:before,.fa-battery-three-quarters:before{content:"\f241"}.fa-battery-2:before,.fa-battery-half:before{content:"\f242"}.fa-battery-1:before,.fa-battery-quarter:before{content:"\f243"}.fa-battery-0:before,.fa-battery-empty:before{content:"\f244"}.fa-mouse-pointer:before{content:"\f245"}.fa-i-cursor:before{content:"\f246"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-sticky-note:before{content:"\f249"}.fa-sticky-note-o:before{content:"\f24a"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-diners-club:before{content:"\f24c"}.fa-clone:before{content:"\f24d"}.fa-balance-scale:before{content:"\f24e"}.fa-hourglass-o:before{content:"\f250"}.fa-hourglass-1:before,.fa-hourglass-start:before{content:"\f251"}.fa-hourglass-2:before,.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-3:before,.fa-hourglass-end:before{content:"\f253"}.fa-hourglass:before{content:"\f254"}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:"\f255"}.fa-hand-stop-o:before,.fa-hand-paper-o:before{content:"\f256"}.fa-hand-scissors-o:before{content:"\f257"}.fa-hand-lizard-o:before{content:"\f258"}.fa-hand-spock-o:before{content:"\f259"}.fa-hand-pointer-o:before{content:"\f25a"}.fa-hand-peace-o:before{content:"\f25b"}.fa-trademark:before{content:"\f25c"}.fa-registered:before{content:"\f25d"}.fa-creative-commons:before{content:"\f25e"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-tripadvisor:before{content:"\f262"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-get-pocket:before{content:"\f265"}.fa-wikipedia-w:before{content:"\f266"}.fa-safari:before{content:"\f267"}.fa-chrome:before{content:"\f268"}.fa-firefox:before{content:"\f269"}.fa-opera:before{content:"\f26a"}.fa-internet-explorer:before{content:"\f26b"}.fa-tv:before,.fa-television:before{content:"\f26c"}.fa-contao:before{content:"\f26d"}.fa-500px:before{content:"\f26e"}.fa-amazon:before{content:"\f270"}.fa-calendar-plus-o:before{content:"\f271"}.fa-calendar-minus-o:before{content:"\f272"}.fa-calendar-times-o:before{content:"\f273"}.fa-calendar-check-o:before{content:"\f274"}.fa-industry:before{content:"\f275"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-map-o:before{content:"\f278"}.fa-map:before{content:"\f279"}.fa-commenting:before{content:"\f27a"}.fa-commenting-o:before{content:"\f27b"}.fa-houzz:before{content:"\f27c"}.fa-vimeo:before{content:"\f27d"}.fa-black-tie:before{content:"\f27e"}.fa-fonticons:before{content:"\f280"}.fa-reddit-alien:before{content:"\f281"}.fa-edge:before{content:"\f282"}.fa-credit-card-alt:before{content:"\f283"}.fa-codiepie:before{content:"\f284"}.fa-modx:before{content:"\f285"}.fa-fort-awesome:before{content:"\f286"}.fa-usb:before{content:"\f287"}.fa-product-hunt:before{content:"\f288"}.fa-mixcloud:before{content:"\f289"}.fa-scribd:before{content:"\f28a"}.fa-pause-circle:before{content:"\f28b"}.fa-pause-circle-o:before{content:"\f28c"}.fa-stop-circle:before{content:"\f28d"}.fa-stop-circle-o:before{content:"\f28e"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-hashtag:before{content:"\f292"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-percent:before{content:"\f295"}.fa-gitlab:before{content:"\f296"}.fa-wpbeginner:before{content:"\f297"}.fa-wpforms:before{content:"\f298"}.fa-envira:before{content:"\f299"}.fa-universal-access:before{content:"\f29a"}.fa-wheelchair-alt:before{content:"\f29b"}.fa-question-circle-o:before{content:"\f29c"}.fa-blind:before{content:"\f29d"}.fa-audio-description:before{content:"\f29e"}.fa-volume-control-phone:before{content:"\f2a0"}.fa-braille:before{content:"\f2a1"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asl-interpreting:before,.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-deafness:before,.fa-hard-of-hearing:before,.fa-deaf:before{content:"\f2a4"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-signing:before,.fa-sign-language:before{content:"\f2a7"}.fa-low-vision:before{content:"\f2a8"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-pied-piper:before{content:"\f2ae"}.fa-first-order:before{content:"\f2b0"}.fa-yoast:before{content:"\f2b1"}.fa-themeisle:before{content:"\f2b2"}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:"\f2b3"}.fa-fa:before,.fa-font-awesome:before{content:"\f2b4"}.fa-handshake-o:before{content:"\f2b5"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-o:before{content:"\f2b7"}.fa-linode:before{content:"\f2b8"}.fa-address-book:before{content:"\f2b9"}.fa-address-book-o:before{content:"\f2ba"}.fa-vcard:before,.fa-address-card:before{content:"\f2bb"}.fa-vcard-o:before,.fa-address-card-o:before{content:"\f2bc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-circle-o:before{content:"\f2be"}.fa-user-o:before{content:"\f2c0"}.fa-id-badge:before{content:"\f2c1"}.fa-drivers-license:before,.fa-id-card:before{content:"\f2c2"}.fa-drivers-license-o:before,.fa-id-card-o:before{content:"\f2c3"}.fa-quora:before{content:"\f2c4"}.fa-free-code-camp:before{content:"\f2c5"}.fa-telegram:before{content:"\f2c6"}.fa-thermometer-4:before,.fa-thermometer:before,.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-thermometer-2:before,.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:"\f2cb"}.fa-shower:before{content:"\f2cc"}.fa-bathtub:before,.fa-s15:before,.fa-bath:before{content:"\f2cd"}.fa-podcast:before{content:"\f2ce"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-times-rectangle:before,.fa-window-close:before{content:"\f2d3"}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:"\f2d4"}.fa-bandcamp:before{content:"\f2d5"}.fa-grav:before{content:"\f2d6"}.fa-etsy:before{content:"\f2d7"}.fa-imdb:before{content:"\f2d8"}.fa-ravelry:before{content:"\f2d9"}.fa-eercast:before{content:"\f2da"}.fa-microchip:before{content:"\f2db"}.fa-snowflake-o:before{content:"\f2dc"}.fa-superpowers:before{content:"\f2dd"}.fa-wpexplorer:before{content:"\f2de"}.fa-meetup:before{content:"\f2e0"}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0, 0, 0, 0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto} diff --git a/details/best-pratice-collect/index.html b/details/best-pratice-collect/index.html new file mode 100644 index 0000000..885ab75 --- /dev/null +++ b/details/best-pratice-collect/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + Bringing too much data back to the driver (collect and friends) - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Bringing too much data back to the driver (collect and friends)

+

A common anti-pattern in Apache Spark is using collect() and then processing records on the driver. There are a few different reasons why folks tend to do this and we can work through some alternatives:

+
    +
  • Label items in ascending order
      +
    • ZipWithIndex
    • +
    +
  • +
  • Index items in order
      +
    • Compute the size of each partition use this to assign indexes.
    • +
    +
  • +
  • In order processing
      +
    • Compute a partition at a time (this is annoying to do, sorry).
    • +
    +
  • +
  • Writing out to a format not supported by Spark
      +
    • Use foreachPartition or implement your own DataSink.
    • +
    +
  • +
  • Need to aggregate everything into a single record
      +
    • Call reduce or treeReduce
    • +
    +
  • +
+

Sometimes you do really need to bring the data back to the driver for some reason (e.g., updating model weights). In those cases, especially if you process the data sequentially, you can limit the amount of data coming back to the driver at one time. toLocalIterator gives you back an iterator which will only need to fetch a partition at a time (although in Python this may be pipeline for efficency). By default toLocalIterator will launch a Spark job for each partition, so if you know you will eventually need all of the data it makes sense to do a persist + a count (async or otherwise) so you don't block as long between partitions.

+

This doesn't mean every call to collect() is bad, if the amount of data being returned is under ~1gb it's probably OK although it will limit parallelism.

+
+
+ + + + + + + + + + + + diff --git a/details/big-broadcast-join/index.html b/details/big-broadcast-join/index.html new file mode 100644 index 0000000..0544a0e --- /dev/null +++ b/details/big-broadcast-join/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Too big broadcast joins - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Too big broadcast joins

+

Beware that broadcast joins put unnecessary pressure on the driver. Before the tables are broadcasted to all the executors, the data is brought back to the driver and then broadcasted to executors. So you might run into driver OOMs.

+

Broadcast smaller tables but this is usually recommended for < 10 Mb tables. Although that is mostly the default, we can comfortably broadcast much larger datasets as long as they fit in the executor and driver memories. Remember if there are multiple broadcast joins in the same stage, you need to have enough room for all those datasets in memory. +You can configure the broadcast threshold usingspark.sql.autoBroadcastJoinThreshold or increase the driver memory by setting spark.driver.memory to a higher value

+

Make sure that you need more memory on your driver than the sum of all your broadcasted data in any stage plus all the other overheads that the driver deals with!

+
+
+ + + + + + + + + + + + diff --git a/details/broadcast-with-disable/index.html b/details/broadcast-with-disable/index.html new file mode 100644 index 0000000..6785a10 --- /dev/null +++ b/details/broadcast-with-disable/index.html @@ -0,0 +1,398 @@ + + + + + + + + + + + Tables getting broadcasted even when broadcast is disabled - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Tables getting broadcasted even when broadcast is disabled

+

You expect the broadcast to stop after you disable the broadcast threshold, by setting spark.sql.autoBroadcastJoinThreshold to -1, but Spark tries to broadcast the bigger table and fails with a broadcast error. And you observe that the query plan has BroadcastNestedLoopJoin in the physical plan.

+
    +
  • Check for sub queries in your code using NOT IN
  • +
+

Example :

+
select * from TableA where id not in (select id from TableB)
+
+

This typically results in a forced BroadcastNestedLoopJoin even when the broadcast setting is disabled. +If the data being processed is large enough, this results in broadcast errors when Spark attempts to broadcast the table

+
    +
  • Rewrite query using not exists or a regular LEFT JOIN instead of not in
  • +
+

Example:

+
select * from TableA where not exists (select 1 from TableB where TableA.id = TableB.id)
+
+

The query will use SortMergeJoin and will resolve any Driver memory errors because of forced broadcasts

+

Relevant links

+

External Resource

+
+
+ + + + + + + + + + + + diff --git a/details/class-or-method-not-found/index.html b/details/class-or-method-not-found/index.html new file mode 100644 index 0000000..b052635 --- /dev/null +++ b/details/class-or-method-not-found/index.html @@ -0,0 +1,399 @@ + + + + + + + + + + + Class or method not found - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Class or method not found

+

When your compile-time class path differs from the runtime class path, you may encounter errors that signal that a class or method could not be found (e.g., NoClassDefFoundError, NoSuchMethodError).

+
java.lang.NoSuchMethodError: com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.subTypeValidator(Lcom/fasterxml/jackson/databind/cfg/MapperConfig;)Lcom/fasterxml/jackson/databind/jsontype/PolymorphicTypeValidator;
+    at com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.buildTypeDeserializer(AvroTypeResolverBuilder.java:43)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findTypeDeserializer(BasicDeserializerFactory.java:1598)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findPropertyContentTypeDeserializer(BasicDeserializerFactory.java:1766)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.resolveMemberAndTypeAnnotations(BasicDeserializerFactory.java:2092)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.constructCreatorProperty(BasicDeserializerFactory.java:1069)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addExplicitPropertyCreator(BasicDeserializerFactory.java:703)
+    at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addDeserializerConstructors(BasicDeserializerFactory.java:476)
+    ...
+

This may be due to packaging a fat JAR with dependency versions that are in conflict with those provided by the Spark environment. When there are multiple versions of the same library in the runtime class path under the same package, Java's class loader hierarchy kicks in, which can lead to unintended behaviors.

+

There are a few options to get around this.

+
    +
  1. Identify the version of the problematic library within your Spark environment and pin the dependency to that version in your build file. To identify the version used in your Spark environment, in the Spark UI go to the Environment tab, scroll down to Classpath Entries, and find the corresponding library.
  2. +
  3. Exclude the transient dependency of the problematic library from imported libraries in your build file.
  4. +
  5. Shade the problematic library under a different package.
  6. +
+

If options (1) and (2) result in more dependency conflicts, it may be that the version of the problematic library in the Spark environment is incompatible with your application code. Therefore, it makes sense to shade the problematic library so that your application can run with a version of the library isolated from the rest of the Spark environment.

+

If you are using the shadow plugin in Gradle, you can shade using: +

shadowJar {
+    ...
+    relocate 'com.fasterxml.jackson', 'shaded.fasterxml.jackson'
+}
+In this example, Jackson libraries used by your application will be available in the shaded.fasterxml.jackson package at runtime.

+
+
+ + + + + + + + + + + + diff --git a/details/container-oom/index.html b/details/container-oom/index.html new file mode 100644 index 0000000..a8f1688 --- /dev/null +++ b/details/container-oom/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + Container OOMs - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Container OOMs

+

Container OOMs can be difficult to debug as the container running the problematic code is killed, and sometimes not all of the log information is available.

+

Non-JVM language users (such as Python) are most likely to encounter issues with container OOMs. This is because the JVM is generally configured to not use more memory than the container it is running in.

+

Everything which isn't inside the JVM is considered "overhead", so Tensorflow, Python, bash, etc. A first step with a container OOM is often increasing spark.executor.memoryOverhead and spark.driver.memoryOverhead to leave more memory for non-Java processes.

+

Python users can set spark.executor.pyspark.memory to limit the Python VM to a certain amount of memory. This amount of memory is then added to the overhead.

+

Python users performing aggregations in Python should also check out the PyUDFOOM page.

+
+
+ + + + + + + + + + + + diff --git a/details/correlated-column-not-allowed/index.html b/details/correlated-column-not-allowed/index.html new file mode 100644 index 0000000..bf9dd85 --- /dev/null +++ b/details/correlated-column-not-allowed/index.html @@ -0,0 +1,413 @@ + + + + + + + + + + + spark.sql.AnalysisException: Correlated column is not allowed in predicate - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

spark.sql.AnalysisException: Correlated column is not allowed in predicate

+

SPARK-35080 introduces a check for correlated subqueries with aggregates which may have previously return incorect results. +Instead, starting in Spark 2.4.8, these queries will raise an org.apache.spark.sql.AnalysisException exception.

+

One of the examples of this (from the JIRA) is:

+
create or replace view t1(c) as values ('a'), ('b');
+create or replace view t2(c) as values ('ab'), ('abc'), ('bc');
+
+select c, (select count(*) from t2 where t1.c = substring(t2.c, 1, 1)) from t1;
+

Instead you should do an explicit join and then perform your aggregation:

+
create or replace view t1(c) as values ('a'), ('b');
+create or replace view t2(c) as values ('ab'), ('abc'), ('bc');
+
+create or replace view t3 as select t1.c from t2 INNER JOIN t1 ON t1.c = substring(t2.c, 1, 1);
+
+select c, count(*) from t3 group by c;
+

Similarly:

+
create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3);
+create or replace view t2(c) as values (6);
+
+select c, (select count(*) from t1 where a + b = c) from t2;
+

Can be rewritten as:

+
create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3);
+create or replace view t2(c) as values (6);
+
+create or replace view t3 as select t2.c from t2 INNER JOIN t1 ON t2.c = t1.a + t1.b;
+
+select c, count(*) from t3 group by c;
+

Likewise in Scala and Python use an explicit .join and then perform your aggregation on the joined result. +Now Spark can compute correct results thus avoiding the exception.

+

Relevant links:

+
+
+
+ + + + + + + + + + + + diff --git a/details/driver-max-result-size/index.html b/details/driver-max-result-size/index.html new file mode 100644 index 0000000..e637a66 --- /dev/null +++ b/details/driver-max-result-size/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + Result size larger than spark.driver.maxResultSize error OR Kryo serialization failed: Buffer overflow. - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Result size larger than spark.driver.maxResultSize error OR Kryo serialization failed: Buffer overflow.

+

ex: Key-Skew-Spark-UI

+

You typically run into this error for one of the following reasons.

+
    +
  1. You are sending a large result set to the driver using SELECT(in SQL) or COLLECT(in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high number of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters.
  2. +
  3. You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultSize, you will run into this error. You can overcome this by either increasing the spark.driver.maxResultSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join.
  4. +
  5. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. + a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultSize. The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a sub-sequent stage or non-uniform output files etc..). If using the later option, be sure spark.driver.maxResultSize is less than spark.driver.memory. + b. You have too many Spark partitions from the previous stage: In this case, you have a large number of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the number of rows(partitions*sampleSize) could be larger than spark.driver.maxResultSize. A recommended way to resolve this issue is by combining the splits for the table(increase spark.(path).(db).(table).target-size) with high map tasks. Note that having a large number of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions.
  6. +
+

External resources: +- Apache Spark job fails with maxResultSize exception

+
+
+ + + + + + + + + + + + diff --git a/details/error-driver-max-result-size/index.html b/details/error-driver-max-result-size/index.html new file mode 100644 index 0000000..bfb4f5c --- /dev/null +++ b/details/error-driver-max-result-size/index.html @@ -0,0 +1,392 @@ + + + + + + + + + + + Result size larger than spark.driver.maxResultsSize error - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Result size larger than spark.driver.maxResultsSize error

+

ex: spark-driver-maxResultsSize-Error

+

You typically run into this error for one of the following reasons.

+
    +
  1. You are sending a large result set to the driver using SELECT(in SQL) or COLLECT(in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high no.of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters. (Collect best practices )
  2. +
  3. You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultsSize, you will run into this error. You can overcome this by either increasing the spark.driver.maxResultsSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join. Note that Spark broadcasts a table referenced in a join if the size of the table is less than spark.sql.autoBroadcastJoinThreshold(100 MB by default at Netflix). You can change this config to include a larger tables in broadcast or reduce the threshold if you want to exclude certain tables. You can also set this to -1 if you want to disable broadcast joins.
  4. +
  5. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. + a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultsSize. The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultsSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a subsequent stage or non-uniform output files etc.) + b. You have too many Spark partitions from the previous stage: In this case, you have a large no.of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the no.of rows(partitions*sampleSize) could be larger than spark.driver.maxResultsSize. A recommended way to resolve this issue is by combining the splits for the table(increase spark.netflix.(db).(table).target-size) with high map tasks. Note that having a large no.of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions.
  6. +
+

Broadcast join related articles

+

SQL Broadcast Join Hints

+

Tables getting broadcasted even when broadcast is disabled

+
+
+ + + + + + + + + + + + diff --git a/details/error-driver-out-of-memory/index.html b/details/error-driver-out-of-memory/index.html new file mode 100644 index 0000000..b57b3da --- /dev/null +++ b/details/error-driver-out-of-memory/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Driver ran out of memory - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Driver ran out of memory

+

IF you see java.lang.OutOfMemoryError: in the driver log/stderr, it is most likely from driver JVM running out of memory. This article has the memory config for increasing the driver memory. One reason you could run into this error is +if you are reading from a table with too many splits(s3 files) and overwhelming the driver with a lot of metadata.

+

Another cause for driver out of memory errors is when the number of partitions is too high and you trigger a sort or shuffle where Spark samples the data, but then runs out of memory while collecting the sample. To solve this repartition to a lower number of partitions or if you're in RDDs coalesce is a more efficent option (in DataFrames coalesce can have impact upstream in the query plan).

+

A less common, but still semi-frequent, occurnce of driver out of memory is an excessive number of tasks in the UI. This can be controlled by reducing spark.ui.retainedTasks (default 100k).

+
+
+ + + + + + + + + + + + diff --git a/details/error-driver-stack-overflow/index.html b/details/error-driver-stack-overflow/index.html new file mode 100644 index 0000000..eb76cf6 --- /dev/null +++ b/details/error-driver-stack-overflow/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Driver ran out of memory - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Driver ran out of memory

+

Note that it is very rare to run into this error. You may see this error when you are using too many filters(in your sql/dataframe/dataset). Workaround is to increase spark driver JVM stack size by setting below config to something higher than the default

+
    +
  • spark.driver.extraJavaOptions: "-Xss512M" #Sets the stack size to 512 MB
  • +
+
+
+ + + + + + + + + + + + diff --git a/details/error-executor-out-of-disk/index.html b/details/error-executor-out-of-disk/index.html new file mode 100644 index 0000000..2fc8412 --- /dev/null +++ b/details/error-executor-out-of-disk/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + Executor out of disk error - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Executor out of disk error

+

By far the most common cause of executor out of disk errors is a mis-configuration of Spark's temporary directories.

+

You should set spark.local.dir to a directory with lots of local storage available. If you are on YARN this will be overriden by LOCAL_DIRS environment variable on the workers.

+

Kubernetes users may wish to add a large emptyDir for Spark to use for temporary storage.

+

Another common cause is having no longer needed/used RDDs/DataFrames/Datasets in scope. This tends to happen more often with notebooks as more things are placed in the global scope where they are not automatically cleaned up. A solution to this is breaking your code into more functions so that things go out of scope, or explicitily setting no longer needed RDDs/DataFrames/Datasets to None/null.

+

On the other hand if you have an iterative algorithm you should investigate if you may have to big of a DAG.

+
+
+ + + + + + + + + + + + diff --git a/details/error-executor-out-of-memory/index.html b/details/error-executor-out-of-memory/index.html new file mode 100644 index 0000000..ff12668 --- /dev/null +++ b/details/error-executor-out-of-memory/index.html @@ -0,0 +1,383 @@ + + + + + + + + + + + Executor ran out of memory - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Executor ran out of memory

+

Executor out of memory issues can come from many sources. To narrow down what the cause of the error there are a few important places to look: the Spark Web UI, the executor log, the driver log, and (if applicable) the cluster manager (e.g. YARN/K8s) log/UI.

+

Container OOM

+

If the driver log indicates Container killed by YARN for exceeding memory limits for the applicable executor, or if (on K8s) the Spark UI show's the reason for the executor loss as "OOMKill" / exit code 137 then it's likely your program is exceeding the amount of memory assigned to it. This doesn't normally happen with pure JVM code, but instead when calling PySpark or JNI libraries (or using off-heap storage).

+

PySpark users are the most likely to encounter container OOMs. If you have PySpark UDF in the stage you should check out Python UDF OOM to eliminate that potential cause. Another potential issue to investigate is if your have key skew as trying to load too large a partition in Python can result in an OOM. If you are using a library, like Tensorflow, which results in

+
+
+ + + + + + + + + + + + diff --git a/details/error-invalid-file/index.html b/details/error-invalid-file/index.html new file mode 100644 index 0000000..fed447b --- /dev/null +++ b/details/error-invalid-file/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Missing Files / File Not Found / Reading past RLE/BitPacking stream - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Missing Files / File Not Found / Reading past RLE/BitPacking stream

+

Missing files are a relatively rare error in Spark. Most commonly they are caused by non-atomic operations in the data writer and will go away when you re-run your query/job.

+

On the other hand Reading past RLE/BitPacking stream or other file read errors tend to be non-transient. +If the error is not transient it may mean that the metadata store (e.g. hive or iceberg) are pointing to a file that does not exist or has a bad format. You can cleanup Iceberg tables using Iceberg Table Cleanup from holden's spark-misc-utils, but be careful and talk with whoever produced the table to make sure that it's ok.

+

If you get a failed to read parquet file while you are not trying to read a parquet file, it's likely that you are using the wrong metastore.

+
+
+ + + + + + + + + + + + diff --git a/details/error-job/index.html b/details/error-job/index.html new file mode 100644 index 0000000..f059023 --- /dev/null +++ b/details/error-job/index.html @@ -0,0 +1,382 @@ + + + + + + + + + + + Error - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Error

+

Most of the errors should fall into below 4 categories. Drill-down to individual sections to isolate your error/exception.

+
    +
  1. SQL Analysis Exception
  2. +
  3. Memory Error
  4. +
  5. Shuffle Error
  6. +
  7. Other Error
  8. +
+
+
+ + + + + + + + + + + + diff --git a/details/error-memory/index.html b/details/error-memory/index.html new file mode 100644 index 0000000..1206a7d --- /dev/null +++ b/details/error-memory/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + Memory Errors - Spark Advanced Topics + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + + + + + + diff --git a/details/error-other/index.html b/details/error-other/index.html new file mode 100644 index 0000000..05dc336 --- /dev/null +++ b/details/error-other/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + Other errors - Spark Advanced Topics + + + + + + + + + + + +
+
+
+ +
+
+ + + + + + + + + + + + diff --git a/details/error-shuffle/index.html b/details/error-shuffle/index.html new file mode 100644 index 0000000..d8f61b0 --- /dev/null +++ b/details/error-shuffle/index.html @@ -0,0 +1,426 @@ + + + + + + + + + + + Fetch Failed exceptions - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Fetch Failed exceptions

+

No time to read, help me now.

+

FetchFailed exceptions are mainly due to misconfiguration of spark.sql.shuffle.partitions:

+
    +
  1. Too few shuffle partitions: Having too few shuffle partitions means you could have a shuffle block that is larger than the limit(Integer.MaxValue=~2GB) or OOM(Exit code 143). The symptom for this can also be long-running tasks where the blocks are large but not reached the limit. A quick fix is to increase the shuffle/reducer parallelism by increasing spark.sqlshuffle.partitions(default is 500).
  2. +
  3. Too many shuffle partitions: Too many shuffle partitions could put a stress on the shuffle service and could run into errors like network timeout ```. Note that the shuffle service is a shared service for all the jobs running on the cluster so it is possible that someone else's job with high shuffle activity could cause errors for your job. It is worth checking to see if there is a pattern of these failures for your job to confirm if it is an issue with your job or not. Also note that the higher the shuffle partitions, the more likely that you would see this issue.
  4. +
+

Tell me more.

+

FetchFailed Exceptions can be bucketed into below 4 categories:

+
    +
  1. Ran out of heap memory(OOM) on an Executor
  2. +
  3. Ran out of overhead memory on an Executor
  4. +
  5. Shuffle block greater than 2 GB
  6. +
  7. Network TimeOut.
  8. +
+

Ran out of heap memory(OOM) on an Executor

+

This error indicates that the executor hosting the shuffle block has crashed due to Java OOM. The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions. A workaround is to increase the shuffle partitions. Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds.

+

Errors that you normally see in the executor/task logs:

+
    +
  • ExecutorLostFailure due to Exit code 143
  • +
  • ExecutorLostFailure due to Executor Heartbeat timed out.
  • +
+

Ran out of overhead memory on an Executor

+

This error indicates that the executor hosting the shuffle block has crashed due to off-heap(overhead) memory. Increasing spark.yarn.executor.Overhead should prevent this specific exception.

+

Error that you normally see in the executor/task logs:

+
    +
  • ExecutorLostFailure, # GB of # GB physical memory used. Consider boosting the spark.yarn.executor.Overhead
  • +
+

Shuffle block greater than 2 GB

+

The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions. A workaround is to increase the shuffle partitions(increases the no.of blocks and reduces the block size). Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds.

+

Error that you normally see in the executor/task logs:

+
    +
  • Too Large Frame
  • +
  • Frame size exceeding
  • +
  • size exceeding Integer.MaxValue(~2GB)
  • +
+

Network Timeout

+

The most likely cause for this exception is a high shuffle activity(high network load) in your job. Reducing the shuffle partitions spark.sqlshuffle.partitions would mitigate this issue. You can also reduce the network load by modifying the shuffle config. (todo: add details)

+

Error that you normally see in the executor/task logs:

+
    +
  • org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 0
  • +
  • org.apache.spark.shuffle.FetchFailedException: Failed to connect to ip-xxxxxxxx
  • +
  • Caused by: org.apache.spark.shuffle.FetchFailedException: Too large frame: xxxxxxxxxxx
  • +
+
+
+ + + + + + + + + + + + diff --git a/details/error-sql-analysis/index.html b/details/error-sql-analysis/index.html new file mode 100644 index 0000000..182bed8 --- /dev/null +++ b/details/error-sql-analysis/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + spark.sql.AnalysisException - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

spark.sql.AnalysisException

+

Spark SQL AnalysisException covers a wide variety of potential issues, ranging from ambigious columns to more esotoric items like subquery issues. A good first step is making sure that your SQL is valid and your brackets are where you intend by putting your query through a SQL pretty-printer. After that hopefully the details of the AnalysisException error will guide you to one of the sub-nodes in the error graph.

+

Known issues

+
+
+
+ + + + + + + + + + + + diff --git a/details/even_partitioning_still_slow/index.html b/details/even_partitioning_still_slow/index.html new file mode 100644 index 0000000..fa5c298 --- /dev/null +++ b/details/even_partitioning_still_slow/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + Even Partitioning Yet Still Slow - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Even Partitioning Yet Still Slow

+

To see if a stage if evenly partitioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even partitioning is still slow.

+

There are a few common possible causes when the partitioning is even for slow stages. If your tasks are too short (e.g. finishing in under a few minutes), likely you have too many partitions/tasks. If your tasks are taking just the right amount of time but your jobs are slow you may not have enough executors. If your tasks are taking a long time you may have too large records, not enough partitions/tasks, or just slow functions. Another sign of not enough tasks can be excessive spill to disk.

+

If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. Insufficient partitioning can be fixed by increasing the number of partitions (e.g. repartition(5000) or change spark.sql.shuffle.partitions).

+

Another cause of too large partitioning can be non-splittable compression formats, like gzip, that can be worked around with tools like splittablegzip.

+

Finally consider the possibility the records are too large.

+
+
+ + + + + + + + + + + + diff --git a/details/failed-to-read-non-parquet-file/index.html b/details/failed-to-read-non-parquet-file/index.html new file mode 100644 index 0000000..7c07d3e --- /dev/null +++ b/details/failed-to-read-non-parquet-file/index.html @@ -0,0 +1,377 @@ + + + + + + + + + + + Failed to read non-parquet file - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Failed to read non-parquet file

+

Iceberg does not perform validation on the files specified, so it will let you create a table pointing to non-supported formats, e.g. CSV data, but will fail at query time. In this case you need to use a different metastore (e.g. Hive)

+

If the data is stored in a supported format, it is also possible you have an invalid iceberg table.

+
+
+ + + + + + + + + + + + diff --git a/details/failure-executor-large-record/index.html b/details/failure-executor-large-record/index.html new file mode 100644 index 0000000..ee2c106 --- /dev/null +++ b/details/failure-executor-large-record/index.html @@ -0,0 +1,381 @@ + + + + + + + + + + + Large record problems can show up in a few different ways. - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Large record problems can show up in a few different ways.

+

For particularly large records you may find an executor out of memory exception, otherwise you may find slow stages.

+

You can get a Kyro serialization (for SQL) or Java serialization error (for RDD). In addition if a given column in a row is too large you may encounter a IllegalArgumentException: Cannot grow BufferHolder by size, because the size after growing exceeds size limitation 2147483632.

+

Some common causes of too big records are groupByKey in RDD land, UDAFs or list aggregations (like collect_list) in Spark SQL, highly compressed or Sparse records without a sparse seriaization.

+

For sparse records check out AltEncoder in (spark-misc-utils)[https://github.com/holdenk/spark-misc-utils].

+

If you are uncertain of where exactly the too big record is coming from after looking at the executor logs, you can try and seperate the stage which is failing into distinct parts of the code by using persist at the DISK_ONLY level to introduce cuts into the graph.

+

If your exception is happening with a Python UDF, it's possible that the individual records themselves might not be too large, but the batch-size used by Spark is set too high for the size of your records. You can try turning down the record size.

+
+
+ + + + + + + + + + + + diff --git a/details/forced-computations/index.html b/details/forced-computations/index.html new file mode 100644 index 0000000..621d5e6 --- /dev/null +++ b/details/forced-computations/index.html @@ -0,0 +1,389 @@ + + + + + + + + + + + Force computations - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Force computations

+

There are multiple use cases where you might want to measure performance for different transformations in your spark job, in which case you have to materialize the transformations by calling an explicit action. If you encounter an exception during the write phase that appears unrelated, one technique is to force computation earlier of the DataFrame or RDD to narrow down the true cause of the exception.

+

Forcing computation on RDDs is relatively simple, all you need to do is call count() and Spark will evaluate the RDD.

+

Forcing computation on DataFrames is more complex. Calling an action like count() on a DataFrame might not necessarily work because the optimizer will likely ignore unnecessary transformations. In order to compute the row count, Spark does not have to execute all transformations. The Spark optimizer can simplify the query plan in such a way that the actual transformation that you need to measure will be skipped because it is simply not needed for finding out the final count. In order to make sure all the transformations are called, we need to force Spark to compute them using other ways.

+

Here are some options to force Spark to compute all transformations of a DataFrame:

+
    +
  • df.rdd.count() : convert to an RDD and perform a count
  • +
  • df.foreach (_ => ()) : do-nothing foreach
  • +
  • Write to an output table (not recommended for performance benchmarking since the execution time will be impacted heavily by the actual writing process)
  • +
  • If using Spark 3.0 and above, benchmarking is simplified by supporting a "noop" write format which will force compute all transformations without having to write it. +
      df.write
    +  .mode("overwrite")
    +  .format("noop")
    +  .save()
  • +
+
+
+ + + + + + + + + + + + diff --git a/details/key-skew/index.html b/details/key-skew/index.html new file mode 100644 index 0000000..21a8fcc --- /dev/null +++ b/details/key-skew/index.html @@ -0,0 +1,419 @@ + + + + + + + + + + + Key/Partition Skew - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Key/Partition Skew

+

Key or partition skew is a frequent problem in Spark. Key skew can result in everything from slowly running jobs (with stragglers), to failing jobs.

+

What is data skew?

+
    +
  1. +

    Usually caused during a transformation when the data in one partition ends up being a lot more than the others, bumping up memory could resolve an OOM error but does not solve the underlying problem

    +
  2. +
  3. +

    Processing partitions are unbalanced by a magnitude then the largest partition becomes the bottleneck

    +
  4. +
+

How to identify skew

+
    +
  1. If one task took much longer to complete than the other tasks, it's usually a sign of Skew. On the Spark UI under Summary Metrics for completed tasks if the Max duration is higher by a significant magnitude from the Median it usually represents Skew, e.g.: +Key-Skew-Spark-UI
  2. +
+

Things to consider

+
    +
  1. Mitigating skew has a cost (e.g. repartition) hence its ignorable unless the duration or input size is significantly higher in magnitude severely impacting job time
  2. +
+

Mitigation strategies

+
    +
  1. +

    Increasing executor memory to prevent OOM exceptions -> This a short-term solution if you want to unblock yourself but does not address the underlying issue. Sometimes this is not an option when you are already running at the max memory settings allowable.

    +
  2. +
  3. +

    Salting is a way to balance partitions by introducing a salt/dummy key for the skewed partitions. Here is a sample workbook and an example of salting in content performance show completion pipeline, where the whole salting operation is parametrized with a JOIN_BUCKETS variable which helps with maintenance of this job. +Spark-Salted-UI

    +
  4. +
  5. +

    Isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results

    +
  6. +
  7. +

    Adaptive Query Execution is a new framework with Spark 3.0, it enables Spark to dynamically identify skew. Under the hood adaptive query execution splits (and replicates if needed) skewed (large) partitions. If you are unable to upgrade to Spark 3.0, you can build the solution into the code by using the Salting/Partitioning technique listed above.

    +
  8. +
  9. +

    Using approximate functions/ probabilistic data structure

    +
  10. +
  11. +

    Using approximate distinct counts (Hyperloglog) can help get around skew if absolute precision isn't important.

    +
  12. +
+

Approximate data structures like Tdigest can help with quantile computations. +If you need exact quantiles, check out the example in High Performance Spark

+

Certain types of aggregations and windows can result in partitioning the data on a particular key.

+
+
+ + + + + + + + + + + + diff --git a/details/notenoughexecs/index.html b/details/notenoughexecs/index.html new file mode 100644 index 0000000..fb79209 --- /dev/null +++ b/details/notenoughexecs/index.html @@ -0,0 +1,371 @@ + + + + + + + + + + + Notenoughexecs - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Not enough execs

+
+
+ + + + + + + + + + + + diff --git a/details/partial_aggregates/index.html b/details/partial_aggregates/index.html new file mode 100644 index 0000000..8a4c0f9 --- /dev/null +++ b/details/partial_aggregates/index.html @@ -0,0 +1,378 @@ + + + + + + + + + + + Partial v.s. Full Aggregates - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Partial v.s. Full Aggregates

+

Partial Aggregation is a key concept when handling large amounts of data in Spark. Full aggregation means that all of the data for one key must be together on the same node and then it can be aggregated, whereas partial aggregation allows Spark to start the aggregation "map-side" (e.g. before the shuffle) and then combine these "partial" aggregations together.

+

In RDD world the classic "full" aggregation is groupByKey and partial aggregation is reduceByKey.

+

In DataFrame/Datasets, Scala UDAFs implement partial aggregation but the basic PySpark Panda's/Arrow UDAFs do not support partial aggregation.

+
+
+ + + + + + + + + + + + diff --git a/details/pyudfoom/index.html b/details/pyudfoom/index.html new file mode 100644 index 0000000..6b1395b --- /dev/null +++ b/details/pyudfoom/index.html @@ -0,0 +1,393 @@ + + + + + + + + + + + PySpark UDF / UDAF OOM - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

PySpark UDF / UDAF OOM

+

Out of memory exceptions with Python user-defined-functions are especially likely as Spark doesn't do a good job of managing memory between the JVM and Python VM. Together this can result in exceeding container memory limits.

+

Grouped Map / Co-Grouped

+

The Grouped & Co-Grouped UDFs are especially likely to cause out-of-memory exceptions in PySpark when combined with key skew. +Unlike most built in Spark aggregations, PySpark user-defined-aggregates do not support partial aggregation. This means that all of the data for a single key must fit in memory. If possible try and use an equivalent built-in aggregation, write a Scala aggregation supporting partial aggregates, or switch to an RDD and use reduceByKey.

+

This limitation applies regardless of whether you are using Arrow or "vanilla" UDAFs.

+

Arrow / Pandas / Vectorized UDFS

+

If you are using PySpark's not-so-new Arrow based UDFS (sometimes called pandas UDFS or vectorized UDFs), record batching can cause issues. You can configure spark.sql.execution.arrow.maxRecordsPerBatch, which defaults to 10k records per batch. If your records are large this default may very well be the source of your out of memory exceptions.

+

Note: setting spark.sql.execution.arrow.maxRecordsPerBatch too-small will result in reduced performance and reduced ability to vectorize operations over the data frames.

+

mapInPandas / mapInArrow

+

If you use mapInPandas or mapInArrow (proposed in 3.3+) it's important to note that Spark will serialize entire records, not just the columns needed by your UDF. If you encounter OOMs here because of record sizes, one option is to minimize the amount of data being serialized in each record. Select only the minimal data needed to perform the UDF + a key to rejoin with the target dataset.

+
+
+ + + + + + + + + + + + diff --git a/details/read-partition-issue/index.html b/details/read-partition-issue/index.html new file mode 100644 index 0000000..f648ebb --- /dev/null +++ b/details/read-partition-issue/index.html @@ -0,0 +1,413 @@ + + + + + + + + + + + Partition at read time - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Partition at read time

+

We're used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. Note that the number of partitions can be optionally specified when using the read API.

+

How to decide on a partition column or partition key?

+
    +
  1. +

    Does the key have relatively low cardinality? +1k distinct values are better than 1M distinct values. +Consider a numeric, date, or timestamp column.

    +
  2. +
  3. +

    Does the key have enough data in each partition?

    +
    +

    1Gb is a good goal.

    +
    +
  4. +
  5. +

    Does the key have too much data in each partition? +The data must fit on a single task in memory and avoid spilling to disk.

    +
  6. +
  7. +

    Does the key have evenly distributed data in each partition? +If some partitions have orders of magnitude more data than others, those larger partitions have the potential to spill to disk, OOM, or simply consume excess resources in comparison to the partitions with median amounts of data. You don't want to size executors for the bloated partition. If none of the columns or keys has a particularly even distribution, then create a new column at the expense of saving a new version of the table/RDD/DF. A frequent approach here is to create a new column using a hash based on existing columns.

    +
  8. +
  9. +

    Does the key allow for fewer wide transformations? +Wide transformations are more costly than narrow transformations.

    +
  10. +
  11. +

    Does the number of partitions approximate 2-3x the number of allocated cores on the executors?

    +
  12. +
+

Reference links

+

Learning Spark +High Performance Spark

+
+
+ + + + + + + + + + + + diff --git a/details/revise-bad_partitioning/index.html b/details/revise-bad_partitioning/index.html new file mode 100644 index 0000000..8109a1c --- /dev/null +++ b/details/revise-bad_partitioning/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Bad Partitioning - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Bad Partitioning

+

There are three main different types and causes of bad partitioning in Spark. Partitioning is often the limitation of parallelism for most Spark jobs.

+

The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. With key-skew the problem is not the number of partions, but that the data is not evenly distributed amongst the partions. The most frequent cause of skewed partitioning is that of "key-skew.". This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null).

+

The other type of skewed partitioning comes from "input partioned" data which is not evenly partioned. With input partioned data, the RDD or Dataframe doesn't have a particular partioner it just matches however the data is stored on disk. Uneven input partioned data can be fixed with an explicit repartion/shuffle. This input partioned data can also be skewed due to key-skew if the data is written out partitioned on a skewed key.

+

Insufficent partitioning is similar to input skewed partitioning, except instead of skew there just are not enough partions. Similarily you the number of partions (e.g. repartion(5000) or change spark.sql.shuffle.partitions).

+
+
+ + + + + + + + + + + + diff --git a/details/revise-even_partitioning_still_slow/index.html b/details/revise-even_partitioning_still_slow/index.html new file mode 100644 index 0000000..664a472 --- /dev/null +++ b/details/revise-even_partitioning_still_slow/index.html @@ -0,0 +1,382 @@ + + + + + + + + + + + Even Partitioning Yet Still Slow - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Even Partitioning Yet Still Slow

+

To see if a stage if evenly partioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even parititoning is still slow.

+

If the max task duration is still substantailly shorter than the stages overall duration, this is often a sign of an insufficient number of executors. Spark can run (at most) spark.executor.cores * spark.dynamicAllocation.maxExecutors tasks in parallel (and in practice this will be lower since some tasks will be speculatively executed and some executors will fail). Try increasing the maxExecutors and seeing if your job speeds up.

+
+

Note

+

Setting spark.executor.cores * spark.dynamicAllocation.maxExecutors in excess of cluster capacity can result in the job waiting in PENDING state. So, try increasing maxExecutors within the limitations of the cluster resources and check if the job runtime is faster given the same input data.

+
+

If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. See Bad Partitioning.

+
+
+ + + + + + + + + + + + diff --git a/details/slow-executor/index.html b/details/slow-executor/index.html new file mode 100644 index 0000000..b3a20ec --- /dev/null +++ b/details/slow-executor/index.html @@ -0,0 +1,381 @@ + + + + + + + + + + + Slow executor - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow executor

+

There can be many reasons executors are slow; here are a few things you can look into:

+
    +
  • Performance distribution among tasks in the same stage: In Spark UI - Stages - Summary Metric: check if there's uneven distribution of duration / input size. If true, there may be data skews or uneven partition splits. See uneven partitioning.
  • +
  • Task size: In Spark UI - Stages - Summary Metrics, check the input/output size of tasks. If individual input or output tasks are larger than a few hundred megabytes, you may need more partitions. Try increasing spark.sql.shuffle.partitions or spark.sql.files.maxPartitionBytes or consider making a repartition call.
  • +
  • GC: Check if GC time is a small fraction of duration, if it's more than a few percents, try increasing executor memory and see if any difference. If adding memory is not helping, you can now see if any optimization can be done in your code for that stage.
  • +
+
+
+ + + + + + + + + + + + diff --git a/details/slow-job-slow-cluster/index.html b/details/slow-job-slow-cluster/index.html new file mode 100644 index 0000000..fcdc97a --- /dev/null +++ b/details/slow-job-slow-cluster/index.html @@ -0,0 +1,377 @@ + + + + + + + + + + + Slow job slow cluster - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow Cluster

+

How do I know if and when my job is waiting for cluster resources??

+

Sometimes the cluster manager may choke or otherwise not be able to allocate resources and we don't have a good way of detecting this situation making it difficult for the user to debug and tell apart from Spark not scaling up correctly.

+

As of Spark3.4, an executor will note when and for how long it waits for cluster resources. Check the JVM metrics for this information.

+ +

https://issues.apache.org/jira/browse/SPARK-36664

+
+
+ + + + + + + + + + + + diff --git a/details/slow-job/index.html b/details/slow-job/index.html new file mode 100644 index 0000000..44b3611 --- /dev/null +++ b/details/slow-job/index.html @@ -0,0 +1,380 @@ + + + + + + + + + + + Slow job - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow job

+

Spark job can be slow for various reasons but here is a couple of reasons

+
    +
  1. Slow stage(s): Go to Slow Stage section to identify the slow stage. In most cases, a job is slow because one or more of the stages are slow.
  2. +
  3. Too big DAG: Go to TooBigDAG section for more details on this topic
  4. +
+
+
+ + + + + + + + + + + + diff --git a/details/slow-map/index.html b/details/slow-map/index.html new file mode 100644 index 0000000..e4609ae --- /dev/null +++ b/details/slow-map/index.html @@ -0,0 +1,389 @@ + + + + + + + + +Slow Map - Spark Advanced Topics + + + + + + + + + +
+
+
+
+

Slow Map

+

Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios.

+
flowchart LR + +SlowMap[Slow Read / Map] + +SlowMap --> SLOWEXEC[Slow executor] +SlowMap --> EVENPART_SLOW[Even partitioning] +SlowMap --> SkewedMapTasks[Skewed Map Tasks and uneven partitioning] + +EVENPART_SLOW --> MissingSourcePredicates[Reading more data than needed] +EVENPART_SLOW --> TooFewMapTasks[Not enough Read/Map Tasks] +EVENPART_SLOW --> TooManyMapTasks[Too many Read/Map Tasks] +EVENPART_SLOW --> SlowTransformations[Slow Transformations] +EVENPART_SLOW --> UDFSLOWNESS[Slow UDF] + +SkewedMapTasks --> RecordSkew[Record Skew] +SkewedMapTasks --> TaskSkew[Task skew] +TaskSkew --> READPARTITIONISSUES[Read partition issues] +MissingSourcePredicates --> FILTERNOTPUSHED[Filter not pushed] + +click EVENPART_SLOW "../../details/even_partitioning_still_slow" +click SLOWEXEC "../../details/slow-executor" +click SkewedMapTasks "../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning" +click RecordSkew "../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning" +click TaskSkew "../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning" +click MissingSourcePredicates "../../details/slow-map/#reading-more-data-than-needed" + +click UDFSLOWNESS "../../details/udfslow" +click LARGERECORDS "../../details/failure-executor-large-record" + +click TooFewMapTasks "../../details/slow-map/#not-enough-readmap-tasks" +click TooManyMapTasks "../../details/slow-map/#too-many-readmap-tasks" +click SlowTransformations "../../details/slow-map/#slow-transformations" + +click FILTERNOTPUSHED "../../details/slow-partition_filter_pushdown" +click SLOWEXEC "../../details/slow-executor" +click READPARTITIONISSUES "../../details/read-partition-issue" + +
+

Reading more data than needed

+

Iceberg/Parquet provides 3 layers of data pruning/filtering, so it is recommended to make the most of it by utilizing them as upstream in your ETL as possible.

+
    +
  • Partition Pruning : Applying a filter on a partition column would mean the Spark can prune all the partitions that are not needed (ex: utc_date, utc_hour etc.). Refer to this section for some examples.
  • +
  • Column Pruning : Parquet, a columnar format, allows us to read specific columns from a row group without having to read the entire row. By selecting the fields that you only need for your job/sql(instead of "select *"), you can avoid bringing unnecessary data only to drop it in the subsequent stages.
  • +
  • Predicate Push Down: It is also recommended to use filters on non-partition columns as this would allow Spark to exclude specific row groups while reading data from S3. For ex: account_id is not null if you know that you would be dropping the NULL account_ids eventually.
  • +
+

See also filter not pushed down, aggregation not pushed down(todo: add details), Bad storage partitioning(todo: add details).

+

Not enough Read/Map Tasks

+

If your map stage is taking longer, and you are sure that you are not reading more data than needed, then you may be reading the data with small no. of tasks. You can increase the no. of map tasks by decreasing target split size. Note that if you are constrained by the resources(map tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors

+

Too many Read/Map Tasks

+

If you have large no. of map tasks in your stage, you could run into driver memory related errors as the task metadata could overwhelm the driver. This also could put a stress on shuffle(on map side) as more map tasks would create more shuffle blocks. It is recommended to keep the task count for a stage under 80k. You can decrease the no. of map tasks by increasing target split size (todo: add detail) for an Iceberg table. (Note: For a non-iceberg table, the property is spark.sql.maxPartitionBytes and it is at the job level and not at the table level)

+

Slow Transformations

+

Another reason for slow running map tasks could be from many reason, some common ones include:

+
    +
  • Regex : You have RegEx in your transformation. Refer to RegEx tips for tuning.
  • +
  • udf: Make sure you are sending only the data that you need in UDF and tune UDF for performance. Refer to Slow UDF for more details.
  • +
  • Json: TBD
  • +
+

All these transformations may run into skew issues if you have a single row/column that is bloated. You could prevent this by checking the payload size before calling the transformation as a single row/column could potentially slow down the entire stage.

+

Skewed Map Tasks or Uneven partitioning

+

The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. The data is not evenly distributed amongst the partitions.

+
    +
  • +

    Uneven partitioning due to Key-skew : The most frequent cause of skewed partitioning is that of "key-skew." This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null).

    +
  • +
  • +

    Uneven partitioning due to input layout: We are used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. In cases where the RDD or Dataframe doesn't have a particular partitioner, data is partitioned according to the storage on disk. Uneven input partitioned data can be fixed with an explicit repartition/shuffle. Spark is often able to avoid input layout issues by combinding and splitting inputs (when input formats are "splittable"), but not all input formats give Spark this freedom. One common example is gzip, although there is a work-around for "splittable gzip" but this comes at the cost of decompressing the entire file multiple times.

    +
  • +
  • +

    Record Skew : A single bloated row/record could be the root cause for slow map task. The easiest way to identify this is by checking your string fields that has Json payload. ( Ex: A bug in a client could write a lot of data). You can identify the culprit by checking the max(size/length) of the field in your upstream table. For CL, snapshot is a candidate for bloated field.

    +
  • +
  • +

    Task Skew : **This is only applicable to the tables with non-splittable file format(like TEXT, zip) and parquet files should never run into this issue. Task skew is where one of the tasks got more rows than others and it is possible if the upstream table has a single file that is large and has the non-splittable format.

    +
  • +
+
+
+ + + + + + + + + diff --git a/details/slow-partition_filter_pushdown/index.html b/details/slow-partition_filter_pushdown/index.html new file mode 100644 index 0000000..2c33b4a --- /dev/null +++ b/details/slow-partition_filter_pushdown/index.html @@ -0,0 +1,407 @@ + + + + + + + + + + + Partition Filters - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Partition Filters

+

Processing more data than necessary will typically slow down the job. +If the input table is partitioned then applying filters on the partition columns can restrict the input volume Spark + needs + to scan.

+

A simple equality filter gets pushed down to the batch scan and enables Spark to only scan the files +where dateint = 20211101 of a sample table partitioned on dateint and hour.

+

select *
+from jlantos.sample_table
+where dateint = 20211101
+limit 100
+Successful-Filter_Pushdown

+

Examples when the filter does not get pushed down

+

The filter contains an expression

+

If instead of a particular date we'd like to load data from the 1st of any month we might + rewrite the above query such as:

+
select *
+from jlantos.sample_table
+where dateint % 100 = 1
+limit 100
+

The query plan shows that Spark in this case scans the whole table and filters only in a later step.

+

Successful-Filter_Pushdown

+

Filter is dynamic via a join

+

In a more complex job we might restrict the data based on joining to another table. If the filtering criteria is not + static it won't be pushed down to the scan. So in the example below the two table scans happen independently, and + min(dateint) calculated in the CTE won't have an effect on the second scan.

+
with dates as
+  (select min(dateint) dateint
+   from jlantos.sample_table)
+
+select *
+from jlantos.sample_table st
+join dates d on st.dateint = d.dateint
+
+
+ + + + + + + + + + + + diff --git a/details/slow-reduce/index.html b/details/slow-reduce/index.html new file mode 100644 index 0000000..6b67926 --- /dev/null +++ b/details/slow-reduce/index.html @@ -0,0 +1,411 @@ + + + + + + + + + + + Slow reduce - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow Reduce

+

Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios.

+
    +
  1. Not Enough Shuffle Tasks
  2. +
  3. Too many shuffle tasks
  4. +
  5. Skewed Shuffle Tasks
  6. +
  7. Spill To Disk
  8. +
+

Not Enough Shuffle Tasks

+

The default shuffle parallelism for our Spark cluster is 500, and it may not be enough for larger datasets. If you don't see skew and most/all of the tasks are taking really long to finish a reduce stage, you can improve the overall runtime by increasing the spark.sql.shuffle.partitions.

+

Note that if you are constrained by the resources(reduce tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors

+

Too many shuffle tasks

+

While having too many shuffle tasks has no direct effect on the stage duration, it could slow the stage down if there are multiple retries during the shuffle stage due to shuffle fetch failures. Note that the higher the shuffle partitions, the more chances of running into FetchFailure exceptions.

+

Skewed Shuffle Tasks

+

Partitioning problems are often the limitation of parallelism for most Spark jobs.

+

There are two primary types of bad partitioning, skewed partitioning (where the partitions are not equal in size/work) or even but non-ideal number partitioning (where the partitions are equal in size/work). If your tasks are taking roughly equivalent times to complete then you likely have even partitioning, and if they are taking unequal times to complete then you may have skewed or uneven partitioning.

+

What is skew and how to identify skew. Skew is typically from one of the below stages:

+
    +
  • +

    Join: Skew is natural in most of our data sets due to the nature of the data. Both Hash join and Sort-Merge join can run into skew issue if you have a lot of data for one or more keys on either side of the join. Check Skewed Joins for handling skewed joins with example.

    +
  • +
  • +

    Aggregation/Group By: All aggregate functions(UDAFs) using SQL/dataframes/Datasets implement partial aggregation(combiner in MR) so you would only run into a skew if you are using a non-algebraic functions like distinct and percentiles which can't be computed partially. Partial vs Full aggregates

    +
  • +
  • +

    Sort/Repartition/Coalesce before write: It is recommended to introduce an additional stage for Sort or Repartition or Coalesce before the write stage to write optimal no. of S3 files into your target table. CheckSkewed Write for more details.

    +
  • +
+

Slow Aggregation

+

Below non-algebraic functions can slow down the reduce stage if you have too many values/rows for a given key.

+
    +
  • Count Distinct: Use HyperLogLog(HLL) based sketches for cardinality if you just need the approx counts for trends and don't need the exact counts. HLL can estimate with a standard error of 2%.
  • +
  • Percentiles: Use approx_percentile or t-digest sketches which would speed up the computation for a small accuracy trade-off.
  • +
+

Spill To Disk

+

Spark executors will start using "disk" once they exceed the spark memory fraction of executor memory. This it self is not an issue but too much of "spill to disk" will slow down the stage/job. You can overcome this by either increasing the executor memory or tweaking the job/stage to consume less memory.(for ex: a Sort-Merge join requires a lot less memory than a Hash join)

+
+
+ + + + + + + + + + + + diff --git a/details/slow-regex-tips/index.html b/details/slow-regex-tips/index.html new file mode 100644 index 0000000..cba51de --- /dev/null +++ b/details/slow-regex-tips/index.html @@ -0,0 +1,414 @@ + + + + + + + + + + + Regular Expression Tips - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Regular Expression Tips

+

Spark function regexp_extract and regexp_replace can transform data using regular expressions. +The regular expression pattern follows Java regex pattern.

+

Task Running Very Slowly

+

Stack trace shows:

+
java.lang.Character.codePointAt(Character.java:4884)
+java.util.regex.Pattern$CharProperty.match(Pattern.java:3789)
+java.util.regex.Pattern$Curly.match1(Pattern.java:4307)
+java.util.regex.Pattern$Curly.match(Pattern.java:4250)
+java.util.regex.Pattern$GroupHead.match(Pattern.java:4672)
+java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812)
+java.util.regex.Pattern$Curly.match0(Pattern.java:4286)
+java.util.regex.Pattern$Curly.match(Pattern.java:4248)
+java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812)
+java.util.regex.Pattern$Curly.match0(Pattern.java:4286)
+java.util.regex.Pattern$Curly.match(Pattern.java:4248)
+java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812)
+java.util.regex.Pattern$Curly.match0(Pattern.java:4286)
+java.util.regex.Pattern$Curly.match(Pattern.java:4248)
+java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812)
+java.util.regex.Pattern$Curly.match0(Pattern.java:4286)
+java.util.regex.Pattern$Curly.match(Pattern.java:4248)
+java.util.regex.Pattern$Start.match(Pattern.java:3475)
+java.util.regex.Matcher.search(Matcher.java:1248)
+java.util.regex.Matcher.find(Matcher.java:637)
+org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificUnsafeProjection.RegExpExtract_2$(Unknown Source)
+

Certain values in the dataset cause regexp_extract with a certain regex pattern to run very slowly. +See https://stackoverflow.com/questions/5011672/java-regular-expression-running-very-slow.

+

Match Special Character in PySpark

+

You will need 4 backslashes to match any special character, +2 required by Python string escaping and 2 by Java regex parsing.

+
df = spark.sql("SELECT regexp_replace('{{template}}', '\\\\{\\\\{', '#')")
+
+
+ + + + + + + + + + + + diff --git a/details/slow-skewed-join/index.html b/details/slow-skewed-join/index.html new file mode 100644 index 0000000..c531eab --- /dev/null +++ b/details/slow-skewed-join/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Skewed Joins - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Skewed Joins

+

Skewed joins happen frequently as some locations (NYC), data (null), and titles (Mr. Farts - Farting Around The House) are more popular than other types of data.

+

To a certain degree Spark 3.3 query engine has improvements to handle skewed joins, so a first step should be attempting to upgrade to the most recent version of Sprk.

+

Broadcast joins are ideal for handling skewed joins, but they only work when one table is smaller than the other. A general, albiet hacky, solution is to isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results.

+

Other technique can include introduce some type of salting and doing multi-stage joins.

+
+
+ + + + + + + + + + + + diff --git a/details/slow-skewed-write/index.html b/details/slow-skewed-write/index.html new file mode 100644 index 0000000..58f9204 --- /dev/null +++ b/details/slow-skewed-write/index.html @@ -0,0 +1,400 @@ + + + + + + + + + + + Skewed/Slow Write - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Skewed/Slow Write

+

Writes can be slow depending on the preceding stage of write(), target table partition scheme, and write parallelism(spark.sql.shuffle.partitions). +The goal of this article is to go through below options and see the most optimal transformation for writing optimal files in target table/partition.

+

When to use Sort

+

A global sort in Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries.

+

Use global sort

+
    +
  • If you are writing multiple partitions(especially heterogeneous partitions) as part of your write() as it can estimate the no. of files/tasks for a given target table partition based on the no. of sample rows it observes.
  • +
  • If you want to enable predicate-push-down on a set of target table fields for down stream consumption.
  • +
+

Tips: +1. You can increase the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to improve the estimates if you are not seeing optimal no. of files per partition. +2. You can also introduce salt to sort keys to increase the no. of write tasks if the sort keys cardinality less than the spark.sql.shuffle.partitions. Example

+

When to use Repartition

+

Repartition(hash partitioning) partitions rows in a round-robin manner and to produce uniform distribution across the tasks and a hash partitioning just before the write would produce uniform files and all write tasks should take about the same time.

+

Use repartition

+
    +
  • If you are writing into a single partition or a non-partitioned table and want to get uniform file sizes.
  • +
  • If you want to produce a specific no.o files. for ex: using repartiton(100) would generate up to 100 files.
  • +
+

When to use Coalesce

+

Coalesce tries to combine files without invoking a shuffle and useful when you are going from a higher parallelism to lower parallelism. Use Coalesce:

+
    +
  • If you are writing very small no. of files and the file size is relatively small.
  • +
+

Note that, Coalesce(N) is not an optimal way to merge files as it tries to combine multiple files(until it reaches target no. of files 'N' ) without taking size into equation, and you could run into (org.apache.spark.memory.SparkOutOfMemoryError: Unable to acquire 65536 bytes of memory, got 0) if the size exceeds.

+
+
+ + + + + + + + + + + + diff --git a/details/slow-stage/index.html b/details/slow-stage/index.html new file mode 100644 index 0000000..f46c996 --- /dev/null +++ b/details/slow-stage/index.html @@ -0,0 +1,465 @@ + + + + + + + + + + + Identify the slow stage - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Identify the slow stage

+

When you have an event log from an earlier "good run"

+

You can compare the slow and the fast runs. +For this you can even use your local pyspark and calculate a ratio between slow and fast run for each stage metrics:

+
# Helper methods (just copy-paste it)
+
+def createEventView(eventLogFile, eventViewName):
+  sql("CREATE OR REPLACE TEMPORARY VIEW {} USING org.apache.spark.sql.json OPTIONS (path '{}')".format(eventViewName, eventLogFile))
+
+
+def createStageMetricsView(eventViewName, stageMetricsViewName):
+  sql("CREATE OR REPLACE TEMPORARY VIEW {} AS select `Submission Time`, `Completion Time`, `Stage ID`, t3.col.* from (select `Stage Info`.* from {} where Event='SparkListenerStageCompleted') lateral view explode(Accumulables) t3".format(stageMetricsViewName, eventViewName))
+
+
+def showDiffInStage(fastStagesTable, slowStagesTable, stageID):
+  sql("select {fastStages}.Name, {fastStages}.Value as Fast, {slowStages}.Value as Slow, {slowStages}.Value / {fastStages}.Value as `Slow / Fast` from {fastStages} INNER JOIN {slowStages} ON {fastStages}.ID = {slowStages}.ID where {fastStages}.`Stage ID` = {stageID} and {slowStages}.`Stage ID` = {stageID}".format(fastStages=fastStagesTable, slowStages=slowStagesTable, stageID=stageID)).show(40, False)
+
+
+# Creating the views from the event logs (just an example, you have to specify your own paths)
+
+createEventView("<path_to_the_fast_run_event_log>", "FAST_EVENTS")
+createStageMetricsView("FAST_EVENTS", "FAST_STAGE_METRICS")
+
+createEventView("<path_to_the_slow_run_event_log>", "SLOW_EVENTS")
+createStageMetricsView("SLOW_EVENTS", "SLOW_STAGE_METRICS")
+
+>>> sql("SELECT DISTINCT `Stage ID` from FAST_STAGE_METRICS").show()
++--------+
+|Stage ID|
++--------+
+|       0|
+|       1|
+|       2|
++--------+
+
+>>> sql("SELECT DISTINCT `Stage ID` from SLOW_STAGE_METRICS").show()
++--------+
+|Stage ID|
++--------+
+|       0|
+|       1|
+|       2|
++--------+
+
+>>> showDiffInStage("FAST_STAGE_METRICS", "SLOW_STAGE_METRICS", 2)
++-------------------------------------------+-------------+-------------+------------------+
+|Name                                       |Fast         |Slow         |Slow / Fast       |
++-------------------------------------------+-------------+-------------+------------------+
+|scan time total (min, med, max)            |1095931      |1628308      |1.485776020570638 |
+|internal.metrics.executorRunTime           |7486648      |12990126     |1.735105750931525 |
+|duration total (min, med, max)             |7017645      |12322243     |1.7558943206731032|
+|internal.metrics.jvmGCTime                 |220325       |1084412      |4.921874503574266 |
+|internal.metrics.output.bytesWritten       |34767744411  |34767744411  |1.0               |
+|internal.metrics.input.recordsRead         |149652381    |149652381    |1.0               |
+|internal.metrics.executorDeserializeCpuTime|5666230304   |7760682789   |1.3696377260771504|
+|internal.metrics.resultSize                |625598       |626415       |1.0013059504665935|
+|internal.metrics.executorCpuTime           |6403420405851|8762799691603|1.3684560963069305|
+|internal.metrics.input.bytesRead           |69488204276  |69488204276  |1.0               |
+|number of output rows                      |149652381    |149652381    |1.0               |
+|internal.metrics.resultSerializationTime   |36           |72           |2.0               |
+|internal.metrics.output.recordsWritten     |149652381    |149652381    |1.0               |
+|internal.metrics.executorDeserializeTime   |6024         |11954        |1.9843957503320053|
++-------------------------------------------+-------------+-------------+------------------+
+

When there is no event log from a good run

+

Steps:

+
    +
  1. Navigate to Spark UI using spark history URL
  2. +
  3. Click on Stages and sort the stages(click on Duration) in descending order to find the longest running stage.
  4. +
+

IdentifySlowStage

+

Now let's figure out if the slow stage is a Map or Reduce/Shuffle

+

Once you identify the slow stage, check the fields "Input", "Output", "Shuffle Read", "Shuffle Write" of the slow stage and use below grid to identify the stage type and the corresponding ETL action.

+
 -----------------------------------------------------------------------------------
+| Input | Output | Shuffle Read | Shuffle Write |  MR Stage  |  ETL Action          |
+|------------------------------------------------------------|----------------------|
+|   X   |        |              |       X       |    Map     |     Read             |
+|------------------------------------------------------------|----------------------|
+|   X   |    X   |              |               |    Map     |   Read/Write         |
+|------------------------------------------------------------|----------------------|
+|   X   |        |              |               |    Map     | Sort Estimate        |
+|------------------------------------------------------------|----------------------|
+|       |        |      X       |               |    Map     | Sort Estimate        |
+|------------------------------------------------------------|----------------------|
+|       |        |      X       |       X       |   Reduce   | Join/Agg/Repartition |
+|------------------------------------------------------------|----------------------|
+|       |    X   |      X       |               |   Reduce   |     Write            |
+ ------------------------------------------------------------|----------------------
+
+
+

go to Map if the slow stage is from a Map operation. +go to Reduce if the slow stage is from a Reduce/Shuffle operation.

+
+
+ + + + + + + + + + + + diff --git a/details/slow-writes-s3/index.html b/details/slow-writes-s3/index.html new file mode 100644 index 0000000..50cf288 --- /dev/null +++ b/details/slow-writes-s3/index.html @@ -0,0 +1,386 @@ + + + + + + + + + + + Slow writes on S3 - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow writes on S3

+

Using the default file output committer with S3a results in double data writes (sad times!). +Use a newer cloud committer such as the "S3 magic committer" or a committer specialized for your hadoop cluster.

+

Alternatively, write to Apache Iceberg, Delta.io, or Apache Hudi.

+

Reference links

+

S3 Magic Committer blog and Hadoop documentation

+

EMRFS S3-optimized Committer

+
+
+ + + + + + + + + + + + diff --git a/details/slow-writes-too-many-files/index.html b/details/slow-writes-too-many-files/index.html new file mode 100644 index 0000000..2ab0b66 --- /dev/null +++ b/details/slow-writes-too-many-files/index.html @@ -0,0 +1,384 @@ + + + + + + + + + + + Slow writes due to Too many small files - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow writes due to Too many small files

+

Sometimes a partitioning approach works fine for a small dataset, but can cause a surprisingly large number of partitions for a slighly larger dataset. Check out The Small File Problem in context of HDFS.

+

Relevant links

+

HDFS: +The Small File Problem: Partition strategies to avoid IO limitations

+
+
+ + + + + + + + + + + + diff --git a/details/slow-writes/index.html b/details/slow-writes/index.html new file mode 100644 index 0000000..869ea0c --- /dev/null +++ b/details/slow-writes/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + Slow Writes - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Slow Writes

+

The Shuffle Write time is visible as follows:

+

Spark UI --> Stages Tab --> Stages Detail --> Event timeline.

+

Symptom: my spark job is spending more time writing files to disk on shuffle writes.

+

Some potential causes:

+
+
+
+ + + + + + + + + + + + diff --git a/details/toobigdag/index.html b/details/toobigdag/index.html new file mode 100644 index 0000000..4ce5ece --- /dev/null +++ b/details/toobigdag/index.html @@ -0,0 +1,385 @@ + + + + + + + + + + + Too Big DAG (or when iterative algorithms go bump in the night) - Spark Advanced Topics + + + + + + + + + + + +
+
+ +
+ +

Too Big DAG (or when iterative algorithms go bump in the night)

+

Spark uses lazy evaluation and creates a DAG (directed acyclic graph) of the operations needed to compute a peice of data. Even if the data is persisted or cached, Spark will keep this DAG in memory on the driver so that if an executor fails it can re-create this data later. This is more likely to cause problems with iterative algorithms that create RDDs or DataFrames on each iteration based on the previous iteration, like ALS. Some signs of a DAG getting too big are:

+
    +
  • Iterative algorithm becoming slower on each iteration
  • +
  • Driver OOM
  • +
  • Executor out-of-disk-error
  • +
+

If your job hasn't crashed, an easy way to check is by looking at the Spark Web UI and seeing what the DAG visualization looks like. If the DAG takes a measurable length of time to load (minutes), or fills a few screens it's likely "too-big." Just because a DAG "looks" small though doesn't mean that it isn't necessarily an issue, medium-sized-looking DAGs with lots of shuffle files can cause executor out of disk issues too.

+

Working around this can be complicated, but there are some tools to simplify it. The first is Spark's checkpointing which allows Spark to "forget" the DAG so far by writing the data out to a persistent storage like S3 or HDFS. The second is manually doing what checkpointing does, that is on your own writing the data out and loading it back in.

+

Unfortunately, if you work in a notebook environment this might not be enough to solve your problem. While this will introduce a "cut" in the DAG, if the old RDDs or DataFrames/Datasets are still in scope they will still continue to reside in memory on the driver, and any shuffle files will continue to reside on the disks of the workers. To work around this it's important to explicitly clean up your old RDDs/DataFrames by setting their references to None/null.

+

If you still run into executor out of disk space errors, you may need to look at the approach taken in Spark's ALS algorithm of triggering eager shuffle cleanups, but this is an advanced feature and can lead to non-recoverable errors.

+
+
+ + + + + + + + + + + + diff --git a/details/toofew_tasks/index.html b/details/toofew_tasks/index.html new file mode 100644 index 0000000..0191b64 --- /dev/null +++ b/details/toofew_tasks/index.html @@ -0,0 +1,371 @@ + + + + + + + + + + + Toofew tasks - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Too few tasks

+
+
+ + + + + + + + + + + + diff --git a/details/toomany_tasks/index.html b/details/toomany_tasks/index.html new file mode 100644 index 0000000..68e560f --- /dev/null +++ b/details/toomany_tasks/index.html @@ -0,0 +1,371 @@ + + + + + + + + + + + Toomany tasks - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Too many tasks

+
+
+ + + + + + + + + + + + diff --git a/details/udfslow/index.html b/details/udfslow/index.html new file mode 100644 index 0000000..a3a5eee --- /dev/null +++ b/details/udfslow/index.html @@ -0,0 +1,377 @@ + + + + + + + + + + + Avoid UDFs for the most part - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Avoid UDFs for the most part

+

User defined functions in Spark are black blox to Spark and can limit performance. When possible look for built-in alternatives.

+

One important exception is that if you have multiple functions which must be done in Python, the advice changes a little bit. Since moving data from the JVM to Python is expensive, if you can chain together multiple Python UDFs on the same column, Spark is able to pipeline these together into a single copy to/from Python.

+
+
+ + + + + + + + + + + + diff --git a/details/write-fails/index.html b/details/write-fails/index.html new file mode 100644 index 0000000..6297bdc --- /dev/null +++ b/details/write-fails/index.html @@ -0,0 +1,379 @@ + + + + + + + + + + + Write Fails - Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Write Fails

+

Write failures can sometimes mask other problems. A good first step is to insert a cache or persist right before the write step.

+

Iceberg table writes can sometimes fail after upgrading to a new version as the partitioning of the table bubbles further up. Range based partitioning (used by default with sorted tables) can result in a small number of partitions when there is not much key distance.

+

One option is to, as with a manual sort in Spark, add some extra higher cardinality columns to your sort order in your iceberg table.

+

You can go back to pre-Spark 3 behaviour by instead insert your own manual sort and set write mode to none.

+
+
+ + + + + + + + + + + + diff --git a/flowchart/error/index.html b/flowchart/error/index.html new file mode 100644 index 0000000..48ffda7 --- /dev/null +++ b/flowchart/error/index.html @@ -0,0 +1,368 @@ + + + + + + + + +Error - Spark Advanced Topics + + + + + + + + + +
+
+
+
+

Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load.

+
flowchart LR +Error[Error/Exception] + + +Error --> MemoryError[Memory Error] +Error --> ShuffleError[Shuffle Error] +Error --> SqlAnalysisError[sql.AnalysisException] +Error --> WriteFails[WriteFails] +Error --> OtherError[Others] + +Error --> Serialization +Serialization --> KyroBuffer[Kyro Buffer Overflow] + +KyroBuffer --> DriverMaxResultSize + + +MemoryError --> DriverMemory[Driver] +MemoryError --> ExecutorMemory[Executor] + +DriverMemory --> DriverMemoryError[Spark driver ran out of memory] +DriverMemory --> DriverMaxResultSize[MaxResultSize exceeded] +DriverMemory --> TooBigBroadcastJoin[Too Big Broadcast Join] +DriverMemory --> ContainerOOM[Container Out Of Memory] + +DriverMaxResultSize --> TooBigBroadcastJoin + +ExecutorMemory --> ExecutorMemoryError[Spark executor ran out of memory] +ExecutorMemory --> ExecutorDiskError[Executor out of disk error] +ExecutorMemory --> ContainerOOM +ExecutorMemory --> LARGERECORDS[Too large record / column+record] + +click Error "../../details/error-job" +click MemoryError "../../details/error-memory" + +click DriverMemory "../../details/error-memory/#driver" +click DriverMemoryError "../../details/error-driver-out-of-memory" +click DriverMaxResultSize "../../details/error-driver-max-result-size" + +click ExecutorMemory "../../details/error-memory/#executor" +click ExecutorMemoryError "../../details/error-executor-out-of-memory" +click ExecutorDiskError "../../details/error-executor-out-of-disk" + +click ShuffleError "../../details/error-shuffle" +click SqlAnalysisError "../../details/error-sql-analysis" +click OtherError "../../details/error-other" + +click ContainerOOM "../../details/container-oom" +click TooBigBroadcastJoin "../../details/big-broadcast-join" "Broadcast Joins" +click LARGERECORDS "../../details/failure-executor-large-record" + +click WriteFails "../../details/write-fails" + + + + + + +
+
+
+ + + + + + + + + diff --git a/flowchart/index.html b/flowchart/index.html new file mode 100644 index 0000000..fbe3369 --- /dev/null +++ b/flowchart/index.html @@ -0,0 +1,317 @@ + + + + + + + + +Index - Spark Advanced Topics + + + + + + + + + +
+
+
+
+

Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load.

+
graph TD + A[Start here] --> B[Slow Running Job] + C[I have an exception or error] + A --> C + click B "slow" "Slow" + click C "error" "Error" +
+
+
+ + + + + + + + + diff --git a/flowchart/shared/index.html b/flowchart/shared/index.html new file mode 100644 index 0000000..bd3b736 --- /dev/null +++ b/flowchart/shared/index.html @@ -0,0 +1,316 @@ + + + + + + + + +Shared - Spark Advanced Topics + + + + + + + + + +
+
+
+
+

Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load.

+
graph TD + + +OHNOES[Contact support] +
+
+
+
+ + + + + + + + + diff --git a/flowchart/slow/index.html b/flowchart/slow/index.html new file mode 100644 index 0000000..e8fc5b1 --- /dev/null +++ b/flowchart/slow/index.html @@ -0,0 +1,375 @@ + + + + + + + + +Slow - Spark Advanced Topics + + + + + + + + + +
+
+
+
+

Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load.

+
flowchart LR + +SlowJob[Slow Job] + +SlowJob --> SlowStage[Slow Stage] + +SlowStage --> SlowMap[Slow Read/Map] +SlowStage --> SlowReduce[Slow Shuffle/Reducer/Exchange] +SlowStage --> SLOWWRITESTOSTORAGE[Slow writes to storage] + +SlowJob --> TOOBIGDAG[Too Big DAG] +SlowJob --> SlowCluster[Slow Cluster] + +SlowReduce --> PAGGS[Partial aggregates] + +SlowReduce --> TooFewShuffleTasks[Not Enough Shuffle Tasks] +SlowReduce --> TooManyShuffleTasks[Too many shuffle tasks] +SlowReduce --> SkewedShuffleTasks[Skewed Shuffle Tasks] +SlowReduce --> SpillToDisk[Spill To Disk] +SkewedShuffleTasks --> SkewedJoin[Skewed Join] +SkewedShuffleTasks --> SkewedAggregation[Aggregation/Group By] + + +click SlowJob "../../details/slow-job" +click SlowStage "../../details/slow-stage" +click SlowMap "../../details/slow-map" +click SlowReduce "../../details/slow-reduce" +click SlowCluster "../../details/slow-job-slow-cluster" +click TOOBIGDAG "../../details/toobigdag" + +click TooFewShuffleTasks "../../details/slow-reduce/#not-enough-shuffle-tasks" +click TooManyShuffleTasks "../../details/slow-reduce/#too-many-shuffle-tasks" +click SkewedShuffleTasks "../../details/slow-reduce/#skewed-shuffle-tasks" +click SpillToDisk "../../details/slow-reduce/#spill-to-disk" + +click SkewedJoin "../../details/slow-skewed-join" +click SkewedAggregation "../../details/slow-reduce/#skewed-shuffle-tasks" + +SLOWWRITESTOSTORAGE[Slow writes to storage] +SLOWWRITESTOSTORAGE --> TOOMANYFILES[Slow writes because there are too many files] +SLOWWRITESTOSTORAGE --> SkewedWrite[Skewed Write: when to use Sort/Repartition/Coalesce before write] +SLOWWRITESTOSTORAGE --> S3COMMITTER[Slow writes on S3 depend on the committer] + +click UDFSLOWNESS "../../details/udfslow" + +click PAGGS "../../details/partial_aggregates" + +click FILTERNOTPUSHED "../../details/slow-partition_filter_pushdown" +click SLOWSTAGE "../../details/slow-stage" +click SLOWWRITESTOSTORAGE "../../details/slow-writes" +click SkewedWrite "../../details/slow-skewed-write" +click TOOMANYFILES "../../details/slow-writes-too-many-files" +click S3COMMITTER "../../details/slow-writes-s3" + +click TOOMANY "../../details/toomany_tasks" +click TOOFEW "../../details/toofew_tasks" +click NOTENOUGHEXEC "../../details/notenoughexecs" +click SHUFFLEPARTITIONISSUES "../../details/slow-reduce" +click READPARTITIONISSUES "../../details/read-partition-issue" + + + + + +
+
+
+ + + + + + + + + diff --git a/fonts/fontawesome-webfont.eot b/fonts/fontawesome-webfont.eot new file mode 100644 index 0000000..e9f60ca Binary files /dev/null and b/fonts/fontawesome-webfont.eot differ diff --git a/fonts/fontawesome-webfont.svg b/fonts/fontawesome-webfont.svg new file mode 100644 index 0000000..855c845 --- /dev/null +++ b/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/fonts/fontawesome-webfont.ttf b/fonts/fontawesome-webfont.ttf new file mode 100644 index 0000000..35acda2 Binary files /dev/null and b/fonts/fontawesome-webfont.ttf differ diff --git a/fonts/fontawesome-webfont.woff b/fonts/fontawesome-webfont.woff new file mode 100644 index 0000000..400014a Binary files /dev/null and b/fonts/fontawesome-webfont.woff differ diff --git a/fonts/fontawesome-webfont.woff2 b/fonts/fontawesome-webfont.woff2 new file mode 100644 index 0000000..4d13fc6 Binary files /dev/null and b/fonts/fontawesome-webfont.woff2 differ diff --git a/img/favicon.ico b/img/favicon.ico new file mode 100644 index 0000000..e85006a Binary files /dev/null and b/img/favicon.ico differ diff --git a/img/grid.png b/img/grid.png new file mode 100644 index 0000000..878c3ed Binary files /dev/null and b/img/grid.png differ diff --git a/imgs/identify-slow-stage.png b/imgs/identify-slow-stage.png new file mode 100644 index 0000000..811a461 Binary files /dev/null and b/imgs/identify-slow-stage.png differ diff --git a/imgs/spark-driver-max-result-size-error.png b/imgs/spark-driver-max-result-size-error.png new file mode 100644 index 0000000..76a0c80 Binary files /dev/null and b/imgs/spark-driver-max-result-size-error.png differ diff --git a/imgs/spark-filter-ignored.png b/imgs/spark-filter-ignored.png new file mode 100644 index 0000000..c4825ff Binary files /dev/null and b/imgs/spark-filter-ignored.png differ diff --git a/imgs/spark-filter-pushdown-success.png b/imgs/spark-filter-pushdown-success.png new file mode 100644 index 0000000..42f6036 Binary files /dev/null and b/imgs/spark-filter-pushdown-success.png differ diff --git a/imgs/spark-salted.png b/imgs/spark-salted.png new file mode 100644 index 0000000..0affdc3 Binary files /dev/null and b/imgs/spark-salted.png differ diff --git a/imgs/spark-skewed.png b/imgs/spark-skewed.png new file mode 100644 index 0000000..f78a2ac Binary files /dev/null and b/imgs/spark-skewed.png differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..2b5577c --- /dev/null +++ b/index.html @@ -0,0 +1,390 @@ + + + + + + + + + + + Spark Advanced Topics + + + + + + + + + + + +
+
+
+
+ +

Spark Advanced Topics Working Group Documentation

+

Welcome to the Spark Advanced Topics working group documentation. +This documentation is in the early stages. +We have been working on a flowchart to help you solve your current problems. +The documentation is collected under "details" (see above).

+

Other resources

+

Some other resources that may be useful include High Performance Spark by Holden Karau and Rachel Warren (note: some bias as a co-author), as well as the excellent on-line The Internals of Apache Spark and The Internals of Spark SQL by Jacek Laskowski.

+
+
+ + + + + + + + + + + + + + diff --git a/js/base.js b/js/base.js new file mode 100644 index 0000000..b0f4726 --- /dev/null +++ b/js/base.js @@ -0,0 +1,283 @@ +function getSearchTerm() { + var sPageURL = window.location.search.substring(1); + var sURLVariables = sPageURL.split('&'); + for (var i = 0; i < sURLVariables.length; i++) { + var sParameterName = sURLVariables[i].split('='); + if (sParameterName[0] == 'q') { + return sParameterName[1]; + } + } +} + +function applyTopPadding() { + // Update various absolute positions to match where the main container + // starts. This is necessary for handling multi-line nav headers, since + // that pushes the main container down. + var offset = $('body > .container').offset(); + $('html').css('scroll-padding-top', offset.top + 'px'); + $('.bs-sidebar.affix').css('top', offset.top + 'px'); +} + +$(document).ready(function() { + + applyTopPadding(); + + var search_term = getSearchTerm(), + $search_modal = $('#mkdocs_search_modal'), + $keyboard_modal = $('#mkdocs_keyboard_modal'); + + if (search_term) { + $search_modal.modal(); + } + + // make sure search input gets autofocus every time modal opens. + $search_modal.on('shown.bs.modal', function() { + $search_modal.find('#mkdocs-search-query').focus(); + }); + + // Close search modal when result is selected + // The links get added later so listen to parent + $('#mkdocs-search-results').click(function(e) { + if ($(e.target).is('a')) { + $search_modal.modal('hide'); + } + }); + + // Populate keyboard modal with proper Keys + $keyboard_modal.find('.help.shortcut kbd')[0].innerHTML = keyCodes[shortcuts.help]; + $keyboard_modal.find('.prev.shortcut kbd')[0].innerHTML = keyCodes[shortcuts.previous]; + $keyboard_modal.find('.next.shortcut kbd')[0].innerHTML = keyCodes[shortcuts.next]; + $keyboard_modal.find('.search.shortcut kbd')[0].innerHTML = keyCodes[shortcuts.search]; + + // Keyboard navigation + document.addEventListener("keydown", function(e) { + if ($(e.target).is(':input')) return true; + var key = e.which || e.keyCode || window.event && window.event.keyCode; + var page; + switch (key) { + case shortcuts.next: + page = $('.navbar a[rel="next"]:first').prop('href'); + break; + case shortcuts.previous: + page = $('.navbar a[rel="prev"]:first').prop('href'); + break; + case shortcuts.search: + e.preventDefault(); + $keyboard_modal.modal('hide'); + $search_modal.modal('show'); + $search_modal.find('#mkdocs-search-query').focus(); + break; + case shortcuts.help: + $search_modal.modal('hide'); + $keyboard_modal.modal('show'); + break; + default: break; + } + if (page) { + $keyboard_modal.modal('hide'); + window.location.href = page; + } + }); + + $('table').addClass('table table-striped table-hover'); + + // Improve the scrollspy behaviour when users click on a TOC item. + $(".bs-sidenav a").on("click", function() { + var clicked = this; + setTimeout(function() { + var active = $('.nav li.active a'); + active = active[active.length - 1]; + if (clicked !== active) { + $(active).parent().removeClass("active"); + $(clicked).parent().addClass("active"); + } + }, 50); + }); + + function showInnerDropdown(item) { + var popup = $(item).next('.dropdown-menu'); + popup.addClass('show'); + $(item).addClass('open'); + + // First, close any sibling dropdowns. + var container = $(item).parent().parent(); + container.find('> .dropdown-submenu > a').each(function(i, el) { + if (el !== item) { + hideInnerDropdown(el); + } + }); + + var popupMargin = 10; + var maxBottom = $(window).height() - popupMargin; + var bounds = item.getBoundingClientRect(); + + popup.css('left', bounds.right + 'px'); + if (bounds.top + popup.height() > maxBottom && + bounds.top > $(window).height() / 2) { + popup.css({ + 'top': (bounds.bottom - popup.height()) + 'px', + 'max-height': (bounds.bottom - popupMargin) + 'px', + }); + } else { + popup.css({ + 'top': bounds.top + 'px', + 'max-height': (maxBottom - bounds.top) + 'px', + }); + } + } + + function hideInnerDropdown(item) { + var popup = $(item).next('.dropdown-menu'); + popup.removeClass('show'); + $(item).removeClass('open'); + + popup.scrollTop(0); + popup.find('.dropdown-menu').scrollTop(0).removeClass('show'); + popup.find('.dropdown-submenu > a').removeClass('open'); + } + + $('.dropdown-submenu > a').on('click', function(e) { + if ($(this).next('.dropdown-menu').hasClass('show')) { + hideInnerDropdown(this); + } else { + showInnerDropdown(this); + } + + e.stopPropagation(); + e.preventDefault(); + }); + + $('.dropdown-menu').parent().on('hide.bs.dropdown', function(e) { + $(this).find('.dropdown-menu').scrollTop(0); + $(this).find('.dropdown-submenu > a').removeClass('open'); + $(this).find('.dropdown-menu .dropdown-menu').removeClass('show'); + }); +}); + +$(window).on('resize', applyTopPadding); + +$('body').scrollspy({ + target: '.bs-sidebar', + offset: 100 +}); + +/* Prevent disabled links from causing a page reload */ +$("li.disabled a").click(function() { + event.preventDefault(); +}); + +// See https://www.cambiaresearch.com/articles/15/javascript-char-codes-key-codes +// We only list common keys below. Obscure keys are omitted and their use is discouraged. +var keyCodes = { + 8: 'backspace', + 9: 'tab', + 13: 'enter', + 16: 'shift', + 17: 'ctrl', + 18: 'alt', + 19: 'pause/break', + 20: 'caps lock', + 27: 'escape', + 32: 'spacebar', + 33: 'page up', + 34: 'page down', + 35: 'end', + 36: 'home', + 37: '←', + 38: '↑', + 39: '→', + 40: '↓', + 45: 'insert', + 46: 'delete', + 48: '0', + 49: '1', + 50: '2', + 51: '3', + 52: '4', + 53: '5', + 54: '6', + 55: '7', + 56: '8', + 57: '9', + 65: 'a', + 66: 'b', + 67: 'c', + 68: 'd', + 69: 'e', + 70: 'f', + 71: 'g', + 72: 'h', + 73: 'i', + 74: 'j', + 75: 'k', + 76: 'l', + 77: 'm', + 78: 'n', + 79: 'o', + 80: 'p', + 81: 'q', + 82: 'r', + 83: 's', + 84: 't', + 85: 'u', + 86: 'v', + 87: 'w', + 88: 'x', + 89: 'y', + 90: 'z', + 91: 'Left Windows Key / Left ⌘', + 92: 'Right Windows Key', + 93: 'Windows Menu / Right ⌘', + 96: 'numpad 0', + 97: 'numpad 1', + 98: 'numpad 2', + 99: 'numpad 3', + 100: 'numpad 4', + 101: 'numpad 5', + 102: 'numpad 6', + 103: 'numpad 7', + 104: 'numpad 8', + 105: 'numpad 9', + 106: 'multiply', + 107: 'add', + 109: 'subtract', + 110: 'decimal point', + 111: 'divide', + 112: 'f1', + 113: 'f2', + 114: 'f3', + 115: 'f4', + 116: 'f5', + 117: 'f6', + 118: 'f7', + 119: 'f8', + 120: 'f9', + 121: 'f10', + 122: 'f11', + 123: 'f12', + 124: 'f13', + 125: 'f14', + 126: 'f15', + 127: 'f16', + 128: 'f17', + 129: 'f18', + 130: 'f19', + 131: 'f20', + 132: 'f21', + 133: 'f22', + 134: 'f23', + 135: 'f24', + 144: 'num lock', + 145: 'scroll lock', + 186: ';', + 187: '=', + 188: ',', + 189: '‐', + 190: '.', + 191: '?', + 192: '`', + 219: '[', + 220: '\', + 221: ']', + 222: ''', +}; diff --git a/js/bootstrap.min.js b/js/bootstrap.min.js new file mode 100644 index 0000000..ca013b7 --- /dev/null +++ b/js/bootstrap.min.js @@ -0,0 +1,7 @@ +/*! + * Bootstrap v4.3.1 (https://getbootstrap.com/) + * Copyright 2011-2019 The Bootstrap Authors (https://github.com/twbs/bootstrap/graphs/contributors) + * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE) + */ +!function(t,e){"object"==typeof exports&&"undefined"!=typeof module?e(exports,require("jquery"),require("popper.js")):"function"==typeof define&&define.amd?define(["exports","jquery","popper.js"],e):e((t=t||self).bootstrap={},t.jQuery,t.Popper)}(this,function(t,g,u){"use strict";function i(t,e){for(var n=0;nthis._items.length-1||t<0))if(this._isSliding)g(this._element).one(Q.SLID,function(){return e.to(t)});else{if(n===t)return this.pause(),void this.cycle();var i=ndocument.documentElement.clientHeight;!this._isBodyOverflowing&&t&&(this._element.style.paddingLeft=this._scrollbarWidth+"px"),this._isBodyOverflowing&&!t&&(this._element.style.paddingRight=this._scrollbarWidth+"px")},t._resetAdjustments=function(){this._element.style.paddingLeft="",this._element.style.paddingRight=""},t._checkScrollbar=function(){var t=document.body.getBoundingClientRect();this._isBodyOverflowing=t.left+t.right
',trigger:"hover focus",title:"",delay:0,html:!1,selector:!1,placement:"top",offset:0,container:!1,fallbackPlacement:"flip",boundary:"scrollParent",sanitize:!0,sanitizeFn:null,whiteList:Ee},je="show",He="out",Re={HIDE:"hide"+De,HIDDEN:"hidden"+De,SHOW:"show"+De,SHOWN:"shown"+De,INSERTED:"inserted"+De,CLICK:"click"+De,FOCUSIN:"focusin"+De,FOCUSOUT:"focusout"+De,MOUSEENTER:"mouseenter"+De,MOUSELEAVE:"mouseleave"+De},xe="fade",Fe="show",Ue=".tooltip-inner",We=".arrow",qe="hover",Me="focus",Ke="click",Qe="manual",Be=function(){function i(t,e){if("undefined"==typeof u)throw new TypeError("Bootstrap's tooltips require Popper.js (https://popper.js.org/)");this._isEnabled=!0,this._timeout=0,this._hoverState="",this._activeTrigger={},this._popper=null,this.element=t,this.config=this._getConfig(e),this.tip=null,this._setListeners()}var t=i.prototype;return t.enable=function(){this._isEnabled=!0},t.disable=function(){this._isEnabled=!1},t.toggleEnabled=function(){this._isEnabled=!this._isEnabled},t.toggle=function(t){if(this._isEnabled)if(t){var e=this.constructor.DATA_KEY,n=g(t.currentTarget).data(e);n||(n=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(e,n)),n._activeTrigger.click=!n._activeTrigger.click,n._isWithActiveTrigger()?n._enter(null,n):n._leave(null,n)}else{if(g(this.getTipElement()).hasClass(Fe))return void this._leave(null,this);this._enter(null,this)}},t.dispose=function(){clearTimeout(this._timeout),g.removeData(this.element,this.constructor.DATA_KEY),g(this.element).off(this.constructor.EVENT_KEY),g(this.element).closest(".modal").off("hide.bs.modal"),this.tip&&g(this.tip).remove(),this._isEnabled=null,this._timeout=null,this._hoverState=null,(this._activeTrigger=null)!==this._popper&&this._popper.destroy(),this._popper=null,this.element=null,this.config=null,this.tip=null},t.show=function(){var e=this;if("none"===g(this.element).css("display"))throw new Error("Please use show on visible elements");var t=g.Event(this.constructor.Event.SHOW);if(this.isWithContent()&&this._isEnabled){g(this.element).trigger(t);var n=_.findShadowRoot(this.element),i=g.contains(null!==n?n:this.element.ownerDocument.documentElement,this.element);if(t.isDefaultPrevented()||!i)return;var o=this.getTipElement(),r=_.getUID(this.constructor.NAME);o.setAttribute("id",r),this.element.setAttribute("aria-describedby",r),this.setContent(),this.config.animation&&g(o).addClass(xe);var s="function"==typeof this.config.placement?this.config.placement.call(this,o,this.element):this.config.placement,a=this._getAttachment(s);this.addAttachmentClass(a);var l=this._getContainer();g(o).data(this.constructor.DATA_KEY,this),g.contains(this.element.ownerDocument.documentElement,this.tip)||g(o).appendTo(l),g(this.element).trigger(this.constructor.Event.INSERTED),this._popper=new u(this.element,o,{placement:a,modifiers:{offset:this._getOffset(),flip:{behavior:this.config.fallbackPlacement},arrow:{element:We},preventOverflow:{boundariesElement:this.config.boundary}},onCreate:function(t){t.originalPlacement!==t.placement&&e._handlePopperPlacementChange(t)},onUpdate:function(t){return e._handlePopperPlacementChange(t)}}),g(o).addClass(Fe),"ontouchstart"in document.documentElement&&g(document.body).children().on("mouseover",null,g.noop);var c=function(){e.config.animation&&e._fixTransition();var t=e._hoverState;e._hoverState=null,g(e.element).trigger(e.constructor.Event.SHOWN),t===He&&e._leave(null,e)};if(g(this.tip).hasClass(xe)){var h=_.getTransitionDurationFromElement(this.tip);g(this.tip).one(_.TRANSITION_END,c).emulateTransitionEnd(h)}else c()}},t.hide=function(t){var e=this,n=this.getTipElement(),i=g.Event(this.constructor.Event.HIDE),o=function(){e._hoverState!==je&&n.parentNode&&n.parentNode.removeChild(n),e._cleanTipClass(),e.element.removeAttribute("aria-describedby"),g(e.element).trigger(e.constructor.Event.HIDDEN),null!==e._popper&&e._popper.destroy(),t&&t()};if(g(this.element).trigger(i),!i.isDefaultPrevented()){if(g(n).removeClass(Fe),"ontouchstart"in document.documentElement&&g(document.body).children().off("mouseover",null,g.noop),this._activeTrigger[Ke]=!1,this._activeTrigger[Me]=!1,this._activeTrigger[qe]=!1,g(this.tip).hasClass(xe)){var r=_.getTransitionDurationFromElement(n);g(n).one(_.TRANSITION_END,o).emulateTransitionEnd(r)}else o();this._hoverState=""}},t.update=function(){null!==this._popper&&this._popper.scheduleUpdate()},t.isWithContent=function(){return Boolean(this.getTitle())},t.addAttachmentClass=function(t){g(this.getTipElement()).addClass(Ae+"-"+t)},t.getTipElement=function(){return this.tip=this.tip||g(this.config.template)[0],this.tip},t.setContent=function(){var t=this.getTipElement();this.setElementContent(g(t.querySelectorAll(Ue)),this.getTitle()),g(t).removeClass(xe+" "+Fe)},t.setElementContent=function(t,e){"object"!=typeof e||!e.nodeType&&!e.jquery?this.config.html?(this.config.sanitize&&(e=Se(e,this.config.whiteList,this.config.sanitizeFn)),t.html(e)):t.text(e):this.config.html?g(e).parent().is(t)||t.empty().append(e):t.text(g(e).text())},t.getTitle=function(){var t=this.element.getAttribute("data-original-title");return t||(t="function"==typeof this.config.title?this.config.title.call(this.element):this.config.title),t},t._getOffset=function(){var e=this,t={};return"function"==typeof this.config.offset?t.fn=function(t){return t.offsets=l({},t.offsets,e.config.offset(t.offsets,e.element)||{}),t}:t.offset=this.config.offset,t},t._getContainer=function(){return!1===this.config.container?document.body:_.isElement(this.config.container)?g(this.config.container):g(document).find(this.config.container)},t._getAttachment=function(t){return Pe[t.toUpperCase()]},t._setListeners=function(){var i=this;this.config.trigger.split(" ").forEach(function(t){if("click"===t)g(i.element).on(i.constructor.Event.CLICK,i.config.selector,function(t){return i.toggle(t)});else if(t!==Qe){var e=t===qe?i.constructor.Event.MOUSEENTER:i.constructor.Event.FOCUSIN,n=t===qe?i.constructor.Event.MOUSELEAVE:i.constructor.Event.FOCUSOUT;g(i.element).on(e,i.config.selector,function(t){return i._enter(t)}).on(n,i.config.selector,function(t){return i._leave(t)})}}),g(this.element).closest(".modal").on("hide.bs.modal",function(){i.element&&i.hide()}),this.config.selector?this.config=l({},this.config,{trigger:"manual",selector:""}):this._fixTitle()},t._fixTitle=function(){var t=typeof this.element.getAttribute("data-original-title");(this.element.getAttribute("title")||"string"!==t)&&(this.element.setAttribute("data-original-title",this.element.getAttribute("title")||""),this.element.setAttribute("title",""))},t._enter=function(t,e){var n=this.constructor.DATA_KEY;(e=e||g(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusin"===t.type?Me:qe]=!0),g(e.getTipElement()).hasClass(Fe)||e._hoverState===je?e._hoverState=je:(clearTimeout(e._timeout),e._hoverState=je,e.config.delay&&e.config.delay.show?e._timeout=setTimeout(function(){e._hoverState===je&&e.show()},e.config.delay.show):e.show())},t._leave=function(t,e){var n=this.constructor.DATA_KEY;(e=e||g(t.currentTarget).data(n))||(e=new this.constructor(t.currentTarget,this._getDelegateConfig()),g(t.currentTarget).data(n,e)),t&&(e._activeTrigger["focusout"===t.type?Me:qe]=!1),e._isWithActiveTrigger()||(clearTimeout(e._timeout),e._hoverState=He,e.config.delay&&e.config.delay.hide?e._timeout=setTimeout(function(){e._hoverState===He&&e.hide()},e.config.delay.hide):e.hide())},t._isWithActiveTrigger=function(){for(var t in this._activeTrigger)if(this._activeTrigger[t])return!0;return!1},t._getConfig=function(t){var e=g(this.element).data();return Object.keys(e).forEach(function(t){-1!==Oe.indexOf(t)&&delete e[t]}),"number"==typeof(t=l({},this.constructor.Default,e,"object"==typeof t&&t?t:{})).delay&&(t.delay={show:t.delay,hide:t.delay}),"number"==typeof t.title&&(t.title=t.title.toString()),"number"==typeof t.content&&(t.content=t.content.toString()),_.typeCheckConfig(be,t,this.constructor.DefaultType),t.sanitize&&(t.template=Se(t.template,t.whiteList,t.sanitizeFn)),t},t._getDelegateConfig=function(){var t={};if(this.config)for(var e in this.config)this.constructor.Default[e]!==this.config[e]&&(t[e]=this.config[e]);return t},t._cleanTipClass=function(){var t=g(this.getTipElement()),e=t.attr("class").match(Ne);null!==e&&e.length&&t.removeClass(e.join(""))},t._handlePopperPlacementChange=function(t){var e=t.instance;this.tip=e.popper,this._cleanTipClass(),this.addAttachmentClass(this._getAttachment(t.placement))},t._fixTransition=function(){var t=this.getTipElement(),e=this.config.animation;null===t.getAttribute("x-placement")&&(g(t).removeClass(xe),this.config.animation=!1,this.hide(),this.show(),this.config.animation=e)},i._jQueryInterface=function(n){return this.each(function(){var t=g(this).data(Ie),e="object"==typeof n&&n;if((t||!/dispose|hide/.test(n))&&(t||(t=new i(this,e),g(this).data(Ie,t)),"string"==typeof n)){if("undefined"==typeof t[n])throw new TypeError('No method named "'+n+'"');t[n]()}})},s(i,null,[{key:"VERSION",get:function(){return"4.3.1"}},{key:"Default",get:function(){return Le}},{key:"NAME",get:function(){return be}},{key:"DATA_KEY",get:function(){return Ie}},{key:"Event",get:function(){return Re}},{key:"EVENT_KEY",get:function(){return De}},{key:"DefaultType",get:function(){return ke}}]),i}();g.fn[be]=Be._jQueryInterface,g.fn[be].Constructor=Be,g.fn[be].noConflict=function(){return g.fn[be]=we,Be._jQueryInterface};var Ve="popover",Ye="bs.popover",ze="."+Ye,Xe=g.fn[Ve],$e="bs-popover",Ge=new RegExp("(^|\\s)"+$e+"\\S+","g"),Je=l({},Be.Default,{placement:"right",trigger:"click",content:"",template:''}),Ze=l({},Be.DefaultType,{content:"(string|element|function)"}),tn="fade",en="show",nn=".popover-header",on=".popover-body",rn={HIDE:"hide"+ze,HIDDEN:"hidden"+ze,SHOW:"show"+ze,SHOWN:"shown"+ze,INSERTED:"inserted"+ze,CLICK:"click"+ze,FOCUSIN:"focusin"+ze,FOCUSOUT:"focusout"+ze,MOUSEENTER:"mouseenter"+ze,MOUSELEAVE:"mouseleave"+ze},sn=function(t){var e,n;function i(){return t.apply(this,arguments)||this}n=t,(e=i).prototype=Object.create(n.prototype),(e.prototype.constructor=e).__proto__=n;var o=i.prototype;return o.isWithContent=function(){return this.getTitle()||this._getContent()},o.addAttachmentClass=function(t){g(this.getTipElement()).addClass($e+"-"+t)},o.getTipElement=function(){return this.tip=this.tip||g(this.config.template)[0],this.tip},o.setContent=function(){var t=g(this.getTipElement());this.setElementContent(t.find(nn),this.getTitle());var e=this._getContent();"function"==typeof e&&(e=e.call(this.element)),this.setElementContent(t.find(on),e),t.removeClass(tn+" "+en)},o._getContent=function(){return this.element.getAttribute("data-content")||this.config.content},o._cleanTipClass=function(){var t=g(this.getTipElement()),e=t.attr("class").match(Ge);null!==e&&0=this._offsets[o]&&("undefined"==typeof this._offsets[o+1]||t+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0 0) { + var tokenMetadata = lunr.utils.clone(metadata) || {} + tokenMetadata["position"] = [sliceStart, sliceLength] + tokenMetadata["index"] = tokens.length + + tokens.push( + new lunr.Token ( + str.slice(sliceStart, sliceEnd), + tokenMetadata + ) + ) + } + + sliceStart = sliceEnd + 1 + } + + } + + return tokens +} + +/** + * The separator used to split a string into tokens. Override this property to change the behaviour of + * `lunr.tokenizer` behaviour when tokenizing strings. By default this splits on whitespace and hyphens. + * + * @static + * @see lunr.tokenizer + */ +lunr.tokenizer.separator = /[\s\-]+/ +/*! + * lunr.Pipeline + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Pipelines maintain an ordered list of functions to be applied to all + * tokens in documents entering the search index and queries being ran against + * the index. + * + * An instance of lunr.Index created with the lunr shortcut will contain a + * pipeline with a stop word filter and an English language stemmer. Extra + * functions can be added before or after either of these functions or these + * default functions can be removed. + * + * When run the pipeline will call each function in turn, passing a token, the + * index of that token in the original list of all tokens and finally a list of + * all the original tokens. + * + * The output of functions in the pipeline will be passed to the next function + * in the pipeline. To exclude a token from entering the index the function + * should return undefined, the rest of the pipeline will not be called with + * this token. + * + * For serialisation of pipelines to work, all functions used in an instance of + * a pipeline should be registered with lunr.Pipeline. Registered functions can + * then be loaded. If trying to load a serialised pipeline that uses functions + * that are not registered an error will be thrown. + * + * If not planning on serialising the pipeline then registering pipeline functions + * is not necessary. + * + * @constructor + */ +lunr.Pipeline = function () { + this._stack = [] +} + +lunr.Pipeline.registeredFunctions = Object.create(null) + +/** + * A pipeline function maps lunr.Token to lunr.Token. A lunr.Token contains the token + * string as well as all known metadata. A pipeline function can mutate the token string + * or mutate (or add) metadata for a given token. + * + * A pipeline function can indicate that the passed token should be discarded by returning + * null, undefined or an empty string. This token will not be passed to any downstream pipeline + * functions and will not be added to the index. + * + * Multiple tokens can be returned by returning an array of tokens. Each token will be passed + * to any downstream pipeline functions and all will returned tokens will be added to the index. + * + * Any number of pipeline functions may be chained together using a lunr.Pipeline. + * + * @interface lunr.PipelineFunction + * @param {lunr.Token} token - A token from the document being processed. + * @param {number} i - The index of this token in the complete list of tokens for this document/field. + * @param {lunr.Token[]} tokens - All tokens for this document/field. + * @returns {(?lunr.Token|lunr.Token[])} + */ + +/** + * Register a function with the pipeline. + * + * Functions that are used in the pipeline should be registered if the pipeline + * needs to be serialised, or a serialised pipeline needs to be loaded. + * + * Registering a function does not add it to a pipeline, functions must still be + * added to instances of the pipeline for them to be used when running a pipeline. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @param {String} label - The label to register this function with + */ +lunr.Pipeline.registerFunction = function (fn, label) { + if (label in this.registeredFunctions) { + lunr.utils.warn('Overwriting existing registered function: ' + label) + } + + fn.label = label + lunr.Pipeline.registeredFunctions[fn.label] = fn +} + +/** + * Warns if the function is not registered as a Pipeline function. + * + * @param {lunr.PipelineFunction} fn - The function to check for. + * @private + */ +lunr.Pipeline.warnIfFunctionNotRegistered = function (fn) { + var isRegistered = fn.label && (fn.label in this.registeredFunctions) + + if (!isRegistered) { + lunr.utils.warn('Function is not registered with pipeline. This may cause problems when serialising the index.\n', fn) + } +} + +/** + * Loads a previously serialised pipeline. + * + * All functions to be loaded must already be registered with lunr.Pipeline. + * If any function from the serialised data has not been registered then an + * error will be thrown. + * + * @param {Object} serialised - The serialised pipeline to load. + * @returns {lunr.Pipeline} + */ +lunr.Pipeline.load = function (serialised) { + var pipeline = new lunr.Pipeline + + serialised.forEach(function (fnName) { + var fn = lunr.Pipeline.registeredFunctions[fnName] + + if (fn) { + pipeline.add(fn) + } else { + throw new Error('Cannot load unregistered function: ' + fnName) + } + }) + + return pipeline +} + +/** + * Adds new functions to the end of the pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction[]} functions - Any number of functions to add to the pipeline. + */ +lunr.Pipeline.prototype.add = function () { + var fns = Array.prototype.slice.call(arguments) + + fns.forEach(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + this._stack.push(fn) + }, this) +} + +/** + * Adds a single function after a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.after = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + pos = pos + 1 + this._stack.splice(pos, 0, newFn) +} + +/** + * Adds a single function before a function that already exists in the + * pipeline. + * + * Logs a warning if the function has not been registered. + * + * @param {lunr.PipelineFunction} existingFn - A function that already exists in the pipeline. + * @param {lunr.PipelineFunction} newFn - The new function to add to the pipeline. + */ +lunr.Pipeline.prototype.before = function (existingFn, newFn) { + lunr.Pipeline.warnIfFunctionNotRegistered(newFn) + + var pos = this._stack.indexOf(existingFn) + if (pos == -1) { + throw new Error('Cannot find existingFn') + } + + this._stack.splice(pos, 0, newFn) +} + +/** + * Removes a function from the pipeline. + * + * @param {lunr.PipelineFunction} fn The function to remove from the pipeline. + */ +lunr.Pipeline.prototype.remove = function (fn) { + var pos = this._stack.indexOf(fn) + if (pos == -1) { + return + } + + this._stack.splice(pos, 1) +} + +/** + * Runs the current list of functions that make up the pipeline against the + * passed tokens. + * + * @param {Array} tokens The tokens to run through the pipeline. + * @returns {Array} + */ +lunr.Pipeline.prototype.run = function (tokens) { + var stackLength = this._stack.length + + for (var i = 0; i < stackLength; i++) { + var fn = this._stack[i] + var memo = [] + + for (var j = 0; j < tokens.length; j++) { + var result = fn(tokens[j], j, tokens) + + if (result === null || result === void 0 || result === '') continue + + if (Array.isArray(result)) { + for (var k = 0; k < result.length; k++) { + memo.push(result[k]) + } + } else { + memo.push(result) + } + } + + tokens = memo + } + + return tokens +} + +/** + * Convenience method for passing a string through a pipeline and getting + * strings out. This method takes care of wrapping the passed string in a + * token and mapping the resulting tokens back to strings. + * + * @param {string} str - The string to pass through the pipeline. + * @param {?object} metadata - Optional metadata to associate with the token + * passed to the pipeline. + * @returns {string[]} + */ +lunr.Pipeline.prototype.runString = function (str, metadata) { + var token = new lunr.Token (str, metadata) + + return this.run([token]).map(function (t) { + return t.toString() + }) +} + +/** + * Resets the pipeline by removing any existing processors. + * + */ +lunr.Pipeline.prototype.reset = function () { + this._stack = [] +} + +/** + * Returns a representation of the pipeline ready for serialisation. + * + * Logs a warning if the function has not been registered. + * + * @returns {Array} + */ +lunr.Pipeline.prototype.toJSON = function () { + return this._stack.map(function (fn) { + lunr.Pipeline.warnIfFunctionNotRegistered(fn) + + return fn.label + }) +} +/*! + * lunr.Vector + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A vector is used to construct the vector space of documents and queries. These + * vectors support operations to determine the similarity between two documents or + * a document and a query. + * + * Normally no parameters are required for initializing a vector, but in the case of + * loading a previously dumped vector the raw elements can be provided to the constructor. + * + * For performance reasons vectors are implemented with a flat array, where an elements + * index is immediately followed by its value. E.g. [index, value, index, value]. This + * allows the underlying array to be as sparse as possible and still offer decent + * performance when being used for vector calculations. + * + * @constructor + * @param {Number[]} [elements] - The flat list of element index and element value pairs. + */ +lunr.Vector = function (elements) { + this._magnitude = 0 + this.elements = elements || [] +} + + +/** + * Calculates the position within the vector to insert a given index. + * + * This is used internally by insert and upsert. If there are duplicate indexes then + * the position is returned as if the value for that index were to be updated, but it + * is the callers responsibility to check whether there is a duplicate at that index + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @returns {Number} + */ +lunr.Vector.prototype.positionForIndex = function (index) { + // For an empty vector the tuple can be inserted at the beginning + if (this.elements.length == 0) { + return 0 + } + + var start = 0, + end = this.elements.length / 2, + sliceLength = end - start, + pivotPoint = Math.floor(sliceLength / 2), + pivotIndex = this.elements[pivotPoint * 2] + + while (sliceLength > 1) { + if (pivotIndex < index) { + start = pivotPoint + } + + if (pivotIndex > index) { + end = pivotPoint + } + + if (pivotIndex == index) { + break + } + + sliceLength = end - start + pivotPoint = start + Math.floor(sliceLength / 2) + pivotIndex = this.elements[pivotPoint * 2] + } + + if (pivotIndex == index) { + return pivotPoint * 2 + } + + if (pivotIndex > index) { + return pivotPoint * 2 + } + + if (pivotIndex < index) { + return (pivotPoint + 1) * 2 + } +} + +/** + * Inserts an element at an index within the vector. + * + * Does not allow duplicates, will throw an error if there is already an entry + * for this index. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + */ +lunr.Vector.prototype.insert = function (insertIdx, val) { + this.upsert(insertIdx, val, function () { + throw "duplicate index" + }) +} + +/** + * Inserts or updates an existing index within the vector. + * + * @param {Number} insertIdx - The index at which the element should be inserted. + * @param {Number} val - The value to be inserted into the vector. + * @param {function} fn - A function that is called for updates, the existing value and the + * requested value are passed as arguments + */ +lunr.Vector.prototype.upsert = function (insertIdx, val, fn) { + this._magnitude = 0 + var position = this.positionForIndex(insertIdx) + + if (this.elements[position] == insertIdx) { + this.elements[position + 1] = fn(this.elements[position + 1], val) + } else { + this.elements.splice(position, 0, insertIdx, val) + } +} + +/** + * Calculates the magnitude of this vector. + * + * @returns {Number} + */ +lunr.Vector.prototype.magnitude = function () { + if (this._magnitude) return this._magnitude + + var sumOfSquares = 0, + elementsLength = this.elements.length + + for (var i = 1; i < elementsLength; i += 2) { + var val = this.elements[i] + sumOfSquares += val * val + } + + return this._magnitude = Math.sqrt(sumOfSquares) +} + +/** + * Calculates the dot product of this vector and another vector. + * + * @param {lunr.Vector} otherVector - The vector to compute the dot product with. + * @returns {Number} + */ +lunr.Vector.prototype.dot = function (otherVector) { + var dotProduct = 0, + a = this.elements, b = otherVector.elements, + aLen = a.length, bLen = b.length, + aVal = 0, bVal = 0, + i = 0, j = 0 + + while (i < aLen && j < bLen) { + aVal = a[i], bVal = b[j] + if (aVal < bVal) { + i += 2 + } else if (aVal > bVal) { + j += 2 + } else if (aVal == bVal) { + dotProduct += a[i + 1] * b[j + 1] + i += 2 + j += 2 + } + } + + return dotProduct +} + +/** + * Calculates the similarity between this vector and another vector. + * + * @param {lunr.Vector} otherVector - The other vector to calculate the + * similarity with. + * @returns {Number} + */ +lunr.Vector.prototype.similarity = function (otherVector) { + return this.dot(otherVector) / this.magnitude() || 0 +} + +/** + * Converts the vector to an array of the elements within the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toArray = function () { + var output = new Array (this.elements.length / 2) + + for (var i = 1, j = 0; i < this.elements.length; i += 2, j++) { + output[j] = this.elements[i] + } + + return output +} + +/** + * A JSON serializable representation of the vector. + * + * @returns {Number[]} + */ +lunr.Vector.prototype.toJSON = function () { + return this.elements +} +/* eslint-disable */ +/*! + * lunr.stemmer + * Copyright (C) 2020 Oliver Nightingale + * Includes code from - http://tartarus.org/~martin/PorterStemmer/js.txt + */ + +/** + * lunr.stemmer is an english language stemmer, this is a JavaScript + * implementation of the PorterStemmer taken from http://tartarus.org/~martin + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token - The string to stem + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + * @function + */ +lunr.stemmer = (function(){ + var step2list = { + "ational" : "ate", + "tional" : "tion", + "enci" : "ence", + "anci" : "ance", + "izer" : "ize", + "bli" : "ble", + "alli" : "al", + "entli" : "ent", + "eli" : "e", + "ousli" : "ous", + "ization" : "ize", + "ation" : "ate", + "ator" : "ate", + "alism" : "al", + "iveness" : "ive", + "fulness" : "ful", + "ousness" : "ous", + "aliti" : "al", + "iviti" : "ive", + "biliti" : "ble", + "logi" : "log" + }, + + step3list = { + "icate" : "ic", + "ative" : "", + "alize" : "al", + "iciti" : "ic", + "ical" : "ic", + "ful" : "", + "ness" : "" + }, + + c = "[^aeiou]", // consonant + v = "[aeiouy]", // vowel + C = c + "[^aeiouy]*", // consonant sequence + V = v + "[aeiou]*", // vowel sequence + + mgr0 = "^(" + C + ")?" + V + C, // [C]VC... is m>0 + meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$", // [C]VC[V] is m=1 + mgr1 = "^(" + C + ")?" + V + C + V + C, // [C]VCVC... is m>1 + s_v = "^(" + C + ")?" + v; // vowel in stem + + var re_mgr0 = new RegExp(mgr0); + var re_mgr1 = new RegExp(mgr1); + var re_meq1 = new RegExp(meq1); + var re_s_v = new RegExp(s_v); + + var re_1a = /^(.+?)(ss|i)es$/; + var re2_1a = /^(.+?)([^s])s$/; + var re_1b = /^(.+?)eed$/; + var re2_1b = /^(.+?)(ed|ing)$/; + var re_1b_2 = /.$/; + var re2_1b_2 = /(at|bl|iz)$/; + var re3_1b_2 = new RegExp("([^aeiouylsz])\\1$"); + var re4_1b_2 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var re_1c = /^(.+?[^aeiou])y$/; + var re_2 = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + + var re_3 = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + + var re_4 = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + var re2_4 = /^(.+?)(s|t)(ion)$/; + + var re_5 = /^(.+?)e$/; + var re_5_1 = /ll$/; + var re3_5 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + + var porterStemmer = function porterStemmer(w) { + var stem, + suffix, + firstch, + re, + re2, + re3, + re4; + + if (w.length < 3) { return w; } + + firstch = w.substr(0,1); + if (firstch == "y") { + w = firstch.toUpperCase() + w.substr(1); + } + + // Step 1a + re = re_1a + re2 = re2_1a; + + if (re.test(w)) { w = w.replace(re,"$1$2"); } + else if (re2.test(w)) { w = w.replace(re2,"$1$2"); } + + // Step 1b + re = re_1b; + re2 = re2_1b; + if (re.test(w)) { + var fp = re.exec(w); + re = re_mgr0; + if (re.test(fp[1])) { + re = re_1b_2; + w = w.replace(re,""); + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = re_s_v; + if (re2.test(stem)) { + w = stem; + re2 = re2_1b_2; + re3 = re3_1b_2; + re4 = re4_1b_2; + if (re2.test(w)) { w = w + "e"; } + else if (re3.test(w)) { re = re_1b_2; w = w.replace(re,""); } + else if (re4.test(w)) { w = w + "e"; } + } + } + + // Step 1c - replace suffix y or Y by i if preceded by a non-vowel which is not the first letter of the word (so cry -> cri, by -> by, say -> say) + re = re_1c; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + w = stem + "i"; + } + + // Step 2 + re = re_2; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step2list[suffix]; + } + } + + // Step 3 + re = re_3; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = re_mgr0; + if (re.test(stem)) { + w = stem + step3list[suffix]; + } + } + + // Step 4 + re = re_4; + re2 = re2_4; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + if (re.test(stem)) { + w = stem; + } + } else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = re_mgr1; + if (re2.test(stem)) { + w = stem; + } + } + + // Step 5 + re = re_5; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = re_mgr1; + re2 = re_meq1; + re3 = re3_5; + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) { + w = stem; + } + } + + re = re_5_1; + re2 = re_mgr1; + if (re.test(w) && re2.test(w)) { + re = re_1b_2; + w = w.replace(re,""); + } + + // and turn initial Y back to y + + if (firstch == "y") { + w = firstch.toLowerCase() + w.substr(1); + } + + return w; + }; + + return function (token) { + return token.update(porterStemmer); + } +})(); + +lunr.Pipeline.registerFunction(lunr.stemmer, 'stemmer') +/*! + * lunr.stopWordFilter + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.generateStopWordFilter builds a stopWordFilter function from the provided + * list of stop words. + * + * The built in lunr.stopWordFilter is built using this generator and can be used + * to generate custom stopWordFilters for applications or non English languages. + * + * @function + * @param {Array} token The token to pass through the filter + * @returns {lunr.PipelineFunction} + * @see lunr.Pipeline + * @see lunr.stopWordFilter + */ +lunr.generateStopWordFilter = function (stopWords) { + var words = stopWords.reduce(function (memo, stopWord) { + memo[stopWord] = stopWord + return memo + }, {}) + + return function (token) { + if (token && words[token.toString()] !== token.toString()) return token + } +} + +/** + * lunr.stopWordFilter is an English language stop word list filter, any words + * contained in the list will not be passed through the filter. + * + * This is intended to be used in the Pipeline. If the token does not pass the + * filter then undefined will be returned. + * + * @function + * @implements {lunr.PipelineFunction} + * @params {lunr.Token} token - A token to check for being a stop word. + * @returns {lunr.Token} + * @see {@link lunr.Pipeline} + */ +lunr.stopWordFilter = lunr.generateStopWordFilter([ + 'a', + 'able', + 'about', + 'across', + 'after', + 'all', + 'almost', + 'also', + 'am', + 'among', + 'an', + 'and', + 'any', + 'are', + 'as', + 'at', + 'be', + 'because', + 'been', + 'but', + 'by', + 'can', + 'cannot', + 'could', + 'dear', + 'did', + 'do', + 'does', + 'either', + 'else', + 'ever', + 'every', + 'for', + 'from', + 'get', + 'got', + 'had', + 'has', + 'have', + 'he', + 'her', + 'hers', + 'him', + 'his', + 'how', + 'however', + 'i', + 'if', + 'in', + 'into', + 'is', + 'it', + 'its', + 'just', + 'least', + 'let', + 'like', + 'likely', + 'may', + 'me', + 'might', + 'most', + 'must', + 'my', + 'neither', + 'no', + 'nor', + 'not', + 'of', + 'off', + 'often', + 'on', + 'only', + 'or', + 'other', + 'our', + 'own', + 'rather', + 'said', + 'say', + 'says', + 'she', + 'should', + 'since', + 'so', + 'some', + 'than', + 'that', + 'the', + 'their', + 'them', + 'then', + 'there', + 'these', + 'they', + 'this', + 'tis', + 'to', + 'too', + 'twas', + 'us', + 'wants', + 'was', + 'we', + 'were', + 'what', + 'when', + 'where', + 'which', + 'while', + 'who', + 'whom', + 'why', + 'will', + 'with', + 'would', + 'yet', + 'you', + 'your' +]) + +lunr.Pipeline.registerFunction(lunr.stopWordFilter, 'stopWordFilter') +/*! + * lunr.trimmer + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.trimmer is a pipeline function for trimming non word + * characters from the beginning and end of tokens before they + * enter the index. + * + * This implementation may not work correctly for non latin + * characters and should either be removed or adapted for use + * with languages with non-latin characters. + * + * @static + * @implements {lunr.PipelineFunction} + * @param {lunr.Token} token The token to pass through the filter + * @returns {lunr.Token} + * @see lunr.Pipeline + */ +lunr.trimmer = function (token) { + return token.update(function (s) { + return s.replace(/^\W+/, '').replace(/\W+$/, '') + }) +} + +lunr.Pipeline.registerFunction(lunr.trimmer, 'trimmer') +/*! + * lunr.TokenSet + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * A token set is used to store the unique list of all tokens + * within an index. Token sets are also used to represent an + * incoming query to the index, this query token set and index + * token set are then intersected to find which tokens to look + * up in the inverted index. + * + * A token set can hold multiple tokens, as in the case of the + * index token set, or it can hold a single token as in the + * case of a simple query token set. + * + * Additionally token sets are used to perform wildcard matching. + * Leading, contained and trailing wildcards are supported, and + * from this edit distance matching can also be provided. + * + * Token sets are implemented as a minimal finite state automata, + * where both common prefixes and suffixes are shared between tokens. + * This helps to reduce the space used for storing the token set. + * + * @constructor + */ +lunr.TokenSet = function () { + this.final = false + this.edges = {} + this.id = lunr.TokenSet._nextId + lunr.TokenSet._nextId += 1 +} + +/** + * Keeps track of the next, auto increment, identifier to assign + * to a new tokenSet. + * + * TokenSets require a unique identifier to be correctly minimised. + * + * @private + */ +lunr.TokenSet._nextId = 1 + +/** + * Creates a TokenSet instance from the given sorted array of words. + * + * @param {String[]} arr - A sorted array of strings to create the set from. + * @returns {lunr.TokenSet} + * @throws Will throw an error if the input array is not sorted. + */ +lunr.TokenSet.fromArray = function (arr) { + var builder = new lunr.TokenSet.Builder + + for (var i = 0, len = arr.length; i < len; i++) { + builder.insert(arr[i]) + } + + builder.finish() + return builder.root +} + +/** + * Creates a token set from a query clause. + * + * @private + * @param {Object} clause - A single clause from lunr.Query. + * @param {string} clause.term - The query clause term. + * @param {number} [clause.editDistance] - The optional edit distance for the term. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromClause = function (clause) { + if ('editDistance' in clause) { + return lunr.TokenSet.fromFuzzyString(clause.term, clause.editDistance) + } else { + return lunr.TokenSet.fromString(clause.term) + } +} + +/** + * Creates a token set representing a single string with a specified + * edit distance. + * + * Insertions, deletions, substitutions and transpositions are each + * treated as an edit distance of 1. + * + * Increasing the allowed edit distance will have a dramatic impact + * on the performance of both creating and intersecting these TokenSets. + * It is advised to keep the edit distance less than 3. + * + * @param {string} str - The string to create the token set from. + * @param {number} editDistance - The allowed edit distance to match. + * @returns {lunr.Vector} + */ +lunr.TokenSet.fromFuzzyString = function (str, editDistance) { + var root = new lunr.TokenSet + + var stack = [{ + node: root, + editsRemaining: editDistance, + str: str + }] + + while (stack.length) { + var frame = stack.pop() + + // no edit + if (frame.str.length > 0) { + var char = frame.str.charAt(0), + noEditNode + + if (char in frame.node.edges) { + noEditNode = frame.node.edges[char] + } else { + noEditNode = new lunr.TokenSet + frame.node.edges[char] = noEditNode + } + + if (frame.str.length == 1) { + noEditNode.final = true + } + + stack.push({ + node: noEditNode, + editsRemaining: frame.editsRemaining, + str: frame.str.slice(1) + }) + } + + if (frame.editsRemaining == 0) { + continue + } + + // insertion + if ("*" in frame.node.edges) { + var insertionNode = frame.node.edges["*"] + } else { + var insertionNode = new lunr.TokenSet + frame.node.edges["*"] = insertionNode + } + + if (frame.str.length == 0) { + insertionNode.final = true + } + + stack.push({ + node: insertionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str + }) + + // deletion + // can only do a deletion if we have enough edits remaining + // and if there are characters left to delete in the string + if (frame.str.length > 1) { + stack.push({ + node: frame.node, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // deletion + // just removing the last character from the str + if (frame.str.length == 1) { + frame.node.final = true + } + + // substitution + // can only do a substitution if we have enough edits remaining + // and if there are characters left to substitute + if (frame.str.length >= 1) { + if ("*" in frame.node.edges) { + var substitutionNode = frame.node.edges["*"] + } else { + var substitutionNode = new lunr.TokenSet + frame.node.edges["*"] = substitutionNode + } + + if (frame.str.length == 1) { + substitutionNode.final = true + } + + stack.push({ + node: substitutionNode, + editsRemaining: frame.editsRemaining - 1, + str: frame.str.slice(1) + }) + } + + // transposition + // can only do a transposition if there are edits remaining + // and there are enough characters to transpose + if (frame.str.length > 1) { + var charA = frame.str.charAt(0), + charB = frame.str.charAt(1), + transposeNode + + if (charB in frame.node.edges) { + transposeNode = frame.node.edges[charB] + } else { + transposeNode = new lunr.TokenSet + frame.node.edges[charB] = transposeNode + } + + if (frame.str.length == 1) { + transposeNode.final = true + } + + stack.push({ + node: transposeNode, + editsRemaining: frame.editsRemaining - 1, + str: charA + frame.str.slice(2) + }) + } + } + + return root +} + +/** + * Creates a TokenSet from a string. + * + * The string may contain one or more wildcard characters (*) + * that will allow wildcard matching when intersecting with + * another TokenSet. + * + * @param {string} str - The string to create a TokenSet from. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.fromString = function (str) { + var node = new lunr.TokenSet, + root = node + + /* + * Iterates through all characters within the passed string + * appending a node for each character. + * + * When a wildcard character is found then a self + * referencing edge is introduced to continually match + * any number of any characters. + */ + for (var i = 0, len = str.length; i < len; i++) { + var char = str[i], + final = (i == len - 1) + + if (char == "*") { + node.edges[char] = node + node.final = final + + } else { + var next = new lunr.TokenSet + next.final = final + + node.edges[char] = next + node = next + } + } + + return root +} + +/** + * Converts this TokenSet into an array of strings + * contained within the TokenSet. + * + * This is not intended to be used on a TokenSet that + * contains wildcards, in these cases the results are + * undefined and are likely to cause an infinite loop. + * + * @returns {string[]} + */ +lunr.TokenSet.prototype.toArray = function () { + var words = [] + + var stack = [{ + prefix: "", + node: this + }] + + while (stack.length) { + var frame = stack.pop(), + edges = Object.keys(frame.node.edges), + len = edges.length + + if (frame.node.final) { + /* In Safari, at this point the prefix is sometimes corrupted, see: + * https://github.com/olivernn/lunr.js/issues/279 Calling any + * String.prototype method forces Safari to "cast" this string to what + * it's supposed to be, fixing the bug. */ + frame.prefix.charAt(0) + words.push(frame.prefix) + } + + for (var i = 0; i < len; i++) { + var edge = edges[i] + + stack.push({ + prefix: frame.prefix.concat(edge), + node: frame.node.edges[edge] + }) + } + } + + return words +} + +/** + * Generates a string representation of a TokenSet. + * + * This is intended to allow TokenSets to be used as keys + * in objects, largely to aid the construction and minimisation + * of a TokenSet. As such it is not designed to be a human + * friendly representation of the TokenSet. + * + * @returns {string} + */ +lunr.TokenSet.prototype.toString = function () { + // NOTE: Using Object.keys here as this.edges is very likely + // to enter 'hash-mode' with many keys being added + // + // avoiding a for-in loop here as it leads to the function + // being de-optimised (at least in V8). From some simple + // benchmarks the performance is comparable, but allowing + // V8 to optimize may mean easy performance wins in the future. + + if (this._str) { + return this._str + } + + var str = this.final ? '1' : '0', + labels = Object.keys(this.edges).sort(), + len = labels.length + + for (var i = 0; i < len; i++) { + var label = labels[i], + node = this.edges[label] + + str = str + label + node.id + } + + return str +} + +/** + * Returns a new TokenSet that is the intersection of + * this TokenSet and the passed TokenSet. + * + * This intersection will take into account any wildcards + * contained within the TokenSet. + * + * @param {lunr.TokenSet} b - An other TokenSet to intersect with. + * @returns {lunr.TokenSet} + */ +lunr.TokenSet.prototype.intersect = function (b) { + var output = new lunr.TokenSet, + frame = undefined + + var stack = [{ + qNode: b, + output: output, + node: this + }] + + while (stack.length) { + frame = stack.pop() + + // NOTE: As with the #toString method, we are using + // Object.keys and a for loop instead of a for-in loop + // as both of these objects enter 'hash' mode, causing + // the function to be de-optimised in V8 + var qEdges = Object.keys(frame.qNode.edges), + qLen = qEdges.length, + nEdges = Object.keys(frame.node.edges), + nLen = nEdges.length + + for (var q = 0; q < qLen; q++) { + var qEdge = qEdges[q] + + for (var n = 0; n < nLen; n++) { + var nEdge = nEdges[n] + + if (nEdge == qEdge || qEdge == '*') { + var node = frame.node.edges[nEdge], + qNode = frame.qNode.edges[qEdge], + final = node.final && qNode.final, + next = undefined + + if (nEdge in frame.output.edges) { + // an edge already exists for this character + // no need to create a new node, just set the finality + // bit unless this node is already final + next = frame.output.edges[nEdge] + next.final = next.final || final + + } else { + // no edge exists yet, must create one + // set the finality bit and insert it + // into the output + next = new lunr.TokenSet + next.final = final + frame.output.edges[nEdge] = next + } + + stack.push({ + qNode: qNode, + output: next, + node: node + }) + } + } + } + } + + return output +} +lunr.TokenSet.Builder = function () { + this.previousWord = "" + this.root = new lunr.TokenSet + this.uncheckedNodes = [] + this.minimizedNodes = {} +} + +lunr.TokenSet.Builder.prototype.insert = function (word) { + var node, + commonPrefix = 0 + + if (word < this.previousWord) { + throw new Error ("Out of order word insertion") + } + + for (var i = 0; i < word.length && i < this.previousWord.length; i++) { + if (word[i] != this.previousWord[i]) break + commonPrefix++ + } + + this.minimize(commonPrefix) + + if (this.uncheckedNodes.length == 0) { + node = this.root + } else { + node = this.uncheckedNodes[this.uncheckedNodes.length - 1].child + } + + for (var i = commonPrefix; i < word.length; i++) { + var nextNode = new lunr.TokenSet, + char = word[i] + + node.edges[char] = nextNode + + this.uncheckedNodes.push({ + parent: node, + char: char, + child: nextNode + }) + + node = nextNode + } + + node.final = true + this.previousWord = word +} + +lunr.TokenSet.Builder.prototype.finish = function () { + this.minimize(0) +} + +lunr.TokenSet.Builder.prototype.minimize = function (downTo) { + for (var i = this.uncheckedNodes.length - 1; i >= downTo; i--) { + var node = this.uncheckedNodes[i], + childKey = node.child.toString() + + if (childKey in this.minimizedNodes) { + node.parent.edges[node.char] = this.minimizedNodes[childKey] + } else { + // Cache the key for this node since + // we know it can't change anymore + node.child._str = childKey + + this.minimizedNodes[childKey] = node.child + } + + this.uncheckedNodes.pop() + } +} +/*! + * lunr.Index + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * An index contains the built index of all documents and provides a query interface + * to the index. + * + * Usually instances of lunr.Index will not be created using this constructor, instead + * lunr.Builder should be used to construct new indexes, or lunr.Index.load should be + * used to load previously built and serialized indexes. + * + * @constructor + * @param {Object} attrs - The attributes of the built search index. + * @param {Object} attrs.invertedIndex - An index of term/field to document reference. + * @param {Object} attrs.fieldVectors - Field vectors + * @param {lunr.TokenSet} attrs.tokenSet - An set of all corpus tokens. + * @param {string[]} attrs.fields - The names of indexed document fields. + * @param {lunr.Pipeline} attrs.pipeline - The pipeline to use for search terms. + */ +lunr.Index = function (attrs) { + this.invertedIndex = attrs.invertedIndex + this.fieldVectors = attrs.fieldVectors + this.tokenSet = attrs.tokenSet + this.fields = attrs.fields + this.pipeline = attrs.pipeline +} + +/** + * A result contains details of a document matching a search query. + * @typedef {Object} lunr.Index~Result + * @property {string} ref - The reference of the document this result represents. + * @property {number} score - A number between 0 and 1 representing how similar this document is to the query. + * @property {lunr.MatchData} matchData - Contains metadata about this match including which term(s) caused the match. + */ + +/** + * Although lunr provides the ability to create queries using lunr.Query, it also provides a simple + * query language which itself is parsed into an instance of lunr.Query. + * + * For programmatically building queries it is advised to directly use lunr.Query, the query language + * is best used for human entered text rather than program generated text. + * + * At its simplest queries can just be a single term, e.g. `hello`, multiple terms are also supported + * and will be combined with OR, e.g `hello world` will match documents that contain either 'hello' + * or 'world', though those that contain both will rank higher in the results. + * + * Wildcards can be included in terms to match one or more unspecified characters, these wildcards can + * be inserted anywhere within the term, and more than one wildcard can exist in a single term. Adding + * wildcards will increase the number of documents that will be found but can also have a negative + * impact on query performance, especially with wildcards at the beginning of a term. + * + * Terms can be restricted to specific fields, e.g. `title:hello`, only documents with the term + * hello in the title field will match this query. Using a field not present in the index will lead + * to an error being thrown. + * + * Modifiers can also be added to terms, lunr supports edit distance and boost modifiers on terms. A term + * boost will make documents matching that term score higher, e.g. `foo^5`. Edit distance is also supported + * to provide fuzzy matching, e.g. 'hello~2' will match documents with hello with an edit distance of 2. + * Avoid large values for edit distance to improve query performance. + * + * Each term also supports a presence modifier. By default a term's presence in document is optional, however + * this can be changed to either required or prohibited. For a term's presence to be required in a document the + * term should be prefixed with a '+', e.g. `+foo bar` is a search for documents that must contain 'foo' and + * optionally contain 'bar'. Conversely a leading '-' sets the terms presence to prohibited, i.e. it must not + * appear in a document, e.g. `-foo bar` is a search for documents that do not contain 'foo' but may contain 'bar'. + * + * To escape special characters the backslash character '\' can be used, this allows searches to include + * characters that would normally be considered modifiers, e.g. `foo\~2` will search for a term "foo~2" instead + * of attempting to apply a boost of 2 to the search term "foo". + * + * @typedef {string} lunr.Index~QueryString + * @example Simple single term query + * hello + * @example Multiple term query + * hello world + * @example term scoped to a field + * title:hello + * @example term with a boost of 10 + * hello^10 + * @example term with an edit distance of 2 + * hello~2 + * @example terms with presence modifiers + * -foo +bar baz + */ + +/** + * Performs a search against the index using lunr query syntax. + * + * Results will be returned sorted by their score, the most relevant results + * will be returned first. For details on how the score is calculated, please see + * the {@link https://lunrjs.com/guides/searching.html#scoring|guide}. + * + * For more programmatic querying use lunr.Index#query. + * + * @param {lunr.Index~QueryString} queryString - A string containing a lunr query. + * @throws {lunr.QueryParseError} If the passed query string cannot be parsed. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.search = function (queryString) { + return this.query(function (query) { + var parser = new lunr.QueryParser(queryString, query) + parser.parse() + }) +} + +/** + * A query builder callback provides a query object to be used to express + * the query to perform on the index. + * + * @callback lunr.Index~queryBuilder + * @param {lunr.Query} query - The query object to build up. + * @this lunr.Query + */ + +/** + * Performs a query against the index using the yielded lunr.Query object. + * + * If performing programmatic queries against the index, this method is preferred + * over lunr.Index#search so as to avoid the additional query parsing overhead. + * + * A query object is yielded to the supplied function which should be used to + * express the query to be run against the index. + * + * Note that although this function takes a callback parameter it is _not_ an + * asynchronous operation, the callback is just yielded a query object to be + * customized. + * + * @param {lunr.Index~queryBuilder} fn - A function that is used to build the query. + * @returns {lunr.Index~Result[]} + */ +lunr.Index.prototype.query = function (fn) { + // for each query clause + // * process terms + // * expand terms from token set + // * find matching documents and metadata + // * get document vectors + // * score documents + + var query = new lunr.Query(this.fields), + matchingFields = Object.create(null), + queryVectors = Object.create(null), + termFieldCache = Object.create(null), + requiredMatches = Object.create(null), + prohibitedMatches = Object.create(null) + + /* + * To support field level boosts a query vector is created per + * field. An empty vector is eagerly created to support negated + * queries. + */ + for (var i = 0; i < this.fields.length; i++) { + queryVectors[this.fields[i]] = new lunr.Vector + } + + fn.call(query, query) + + for (var i = 0; i < query.clauses.length; i++) { + /* + * Unless the pipeline has been disabled for this term, which is + * the case for terms with wildcards, we need to pass the clause + * term through the search pipeline. A pipeline returns an array + * of processed terms. Pipeline functions may expand the passed + * term, which means we may end up performing multiple index lookups + * for a single query term. + */ + var clause = query.clauses[i], + terms = null, + clauseMatches = lunr.Set.empty + + if (clause.usePipeline) { + terms = this.pipeline.runString(clause.term, { + fields: clause.fields + }) + } else { + terms = [clause.term] + } + + for (var m = 0; m < terms.length; m++) { + var term = terms[m] + + /* + * Each term returned from the pipeline needs to use the same query + * clause object, e.g. the same boost and or edit distance. The + * simplest way to do this is to re-use the clause object but mutate + * its term property. + */ + clause.term = term + + /* + * From the term in the clause we create a token set which will then + * be used to intersect the indexes token set to get a list of terms + * to lookup in the inverted index + */ + var termTokenSet = lunr.TokenSet.fromClause(clause), + expandedTerms = this.tokenSet.intersect(termTokenSet).toArray() + + /* + * If a term marked as required does not exist in the tokenSet it is + * impossible for the search to return any matches. We set all the field + * scoped required matches set to empty and stop examining any further + * clauses. + */ + if (expandedTerms.length === 0 && clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = lunr.Set.empty + } + + break + } + + for (var j = 0; j < expandedTerms.length; j++) { + /* + * For each term get the posting and termIndex, this is required for + * building the query vector. + */ + var expandedTerm = expandedTerms[j], + posting = this.invertedIndex[expandedTerm], + termIndex = posting._index + + for (var k = 0; k < clause.fields.length; k++) { + /* + * For each field that this query term is scoped by (by default + * all fields are in scope) we need to get all the document refs + * that have this term in that field. + * + * The posting is the entry in the invertedIndex for the matching + * term from above. + */ + var field = clause.fields[k], + fieldPosting = posting[field], + matchingDocumentRefs = Object.keys(fieldPosting), + termField = expandedTerm + "/" + field, + matchingDocumentsSet = new lunr.Set(matchingDocumentRefs) + + /* + * if the presence of this term is required ensure that the matching + * documents are added to the set of required matches for this clause. + * + */ + if (clause.presence == lunr.Query.presence.REQUIRED) { + clauseMatches = clauseMatches.union(matchingDocumentsSet) + + if (requiredMatches[field] === undefined) { + requiredMatches[field] = lunr.Set.complete + } + } + + /* + * if the presence of this term is prohibited ensure that the matching + * documents are added to the set of prohibited matches for this field, + * creating that set if it does not yet exist. + */ + if (clause.presence == lunr.Query.presence.PROHIBITED) { + if (prohibitedMatches[field] === undefined) { + prohibitedMatches[field] = lunr.Set.empty + } + + prohibitedMatches[field] = prohibitedMatches[field].union(matchingDocumentsSet) + + /* + * Prohibited matches should not be part of the query vector used for + * similarity scoring and no metadata should be extracted so we continue + * to the next field + */ + continue + } + + /* + * The query field vector is populated using the termIndex found for + * the term and a unit value with the appropriate boost applied. + * Using upsert because there could already be an entry in the vector + * for the term we are working with. In that case we just add the scores + * together. + */ + queryVectors[field].upsert(termIndex, clause.boost, function (a, b) { return a + b }) + + /** + * If we've already seen this term, field combo then we've already collected + * the matching documents and metadata, no need to go through all that again + */ + if (termFieldCache[termField]) { + continue + } + + for (var l = 0; l < matchingDocumentRefs.length; l++) { + /* + * All metadata for this term/field/document triple + * are then extracted and collected into an instance + * of lunr.MatchData ready to be returned in the query + * results + */ + var matchingDocumentRef = matchingDocumentRefs[l], + matchingFieldRef = new lunr.FieldRef (matchingDocumentRef, field), + metadata = fieldPosting[matchingDocumentRef], + fieldMatch + + if ((fieldMatch = matchingFields[matchingFieldRef]) === undefined) { + matchingFields[matchingFieldRef] = new lunr.MatchData (expandedTerm, field, metadata) + } else { + fieldMatch.add(expandedTerm, field, metadata) + } + + } + + termFieldCache[termField] = true + } + } + } + + /** + * If the presence was required we need to update the requiredMatches field sets. + * We do this after all fields for the term have collected their matches because + * the clause terms presence is required in _any_ of the fields not _all_ of the + * fields. + */ + if (clause.presence === lunr.Query.presence.REQUIRED) { + for (var k = 0; k < clause.fields.length; k++) { + var field = clause.fields[k] + requiredMatches[field] = requiredMatches[field].intersect(clauseMatches) + } + } + } + + /** + * Need to combine the field scoped required and prohibited + * matching documents into a global set of required and prohibited + * matches + */ + var allRequiredMatches = lunr.Set.complete, + allProhibitedMatches = lunr.Set.empty + + for (var i = 0; i < this.fields.length; i++) { + var field = this.fields[i] + + if (requiredMatches[field]) { + allRequiredMatches = allRequiredMatches.intersect(requiredMatches[field]) + } + + if (prohibitedMatches[field]) { + allProhibitedMatches = allProhibitedMatches.union(prohibitedMatches[field]) + } + } + + var matchingFieldRefs = Object.keys(matchingFields), + results = [], + matches = Object.create(null) + + /* + * If the query is negated (contains only prohibited terms) + * we need to get _all_ fieldRefs currently existing in the + * index. This is only done when we know that the query is + * entirely prohibited terms to avoid any cost of getting all + * fieldRefs unnecessarily. + * + * Additionally, blank MatchData must be created to correctly + * populate the results. + */ + if (query.isNegated()) { + matchingFieldRefs = Object.keys(this.fieldVectors) + + for (var i = 0; i < matchingFieldRefs.length; i++) { + var matchingFieldRef = matchingFieldRefs[i] + var fieldRef = lunr.FieldRef.fromString(matchingFieldRef) + matchingFields[matchingFieldRef] = new lunr.MatchData + } + } + + for (var i = 0; i < matchingFieldRefs.length; i++) { + /* + * Currently we have document fields that match the query, but we + * need to return documents. The matchData and scores are combined + * from multiple fields belonging to the same document. + * + * Scores are calculated by field, using the query vectors created + * above, and combined into a final document score using addition. + */ + var fieldRef = lunr.FieldRef.fromString(matchingFieldRefs[i]), + docRef = fieldRef.docRef + + if (!allRequiredMatches.contains(docRef)) { + continue + } + + if (allProhibitedMatches.contains(docRef)) { + continue + } + + var fieldVector = this.fieldVectors[fieldRef], + score = queryVectors[fieldRef.fieldName].similarity(fieldVector), + docMatch + + if ((docMatch = matches[docRef]) !== undefined) { + docMatch.score += score + docMatch.matchData.combine(matchingFields[fieldRef]) + } else { + var match = { + ref: docRef, + score: score, + matchData: matchingFields[fieldRef] + } + matches[docRef] = match + results.push(match) + } + } + + /* + * Sort the results objects by score, highest first. + */ + return results.sort(function (a, b) { + return b.score - a.score + }) +} + +/** + * Prepares the index for JSON serialization. + * + * The schema for this JSON blob will be described in a + * separate JSON schema file. + * + * @returns {Object} + */ +lunr.Index.prototype.toJSON = function () { + var invertedIndex = Object.keys(this.invertedIndex) + .sort() + .map(function (term) { + return [term, this.invertedIndex[term]] + }, this) + + var fieldVectors = Object.keys(this.fieldVectors) + .map(function (ref) { + return [ref, this.fieldVectors[ref].toJSON()] + }, this) + + return { + version: lunr.version, + fields: this.fields, + fieldVectors: fieldVectors, + invertedIndex: invertedIndex, + pipeline: this.pipeline.toJSON() + } +} + +/** + * Loads a previously serialized lunr.Index + * + * @param {Object} serializedIndex - A previously serialized lunr.Index + * @returns {lunr.Index} + */ +lunr.Index.load = function (serializedIndex) { + var attrs = {}, + fieldVectors = {}, + serializedVectors = serializedIndex.fieldVectors, + invertedIndex = Object.create(null), + serializedInvertedIndex = serializedIndex.invertedIndex, + tokenSetBuilder = new lunr.TokenSet.Builder, + pipeline = lunr.Pipeline.load(serializedIndex.pipeline) + + if (serializedIndex.version != lunr.version) { + lunr.utils.warn("Version mismatch when loading serialised index. Current version of lunr '" + lunr.version + "' does not match serialized index '" + serializedIndex.version + "'") + } + + for (var i = 0; i < serializedVectors.length; i++) { + var tuple = serializedVectors[i], + ref = tuple[0], + elements = tuple[1] + + fieldVectors[ref] = new lunr.Vector(elements) + } + + for (var i = 0; i < serializedInvertedIndex.length; i++) { + var tuple = serializedInvertedIndex[i], + term = tuple[0], + posting = tuple[1] + + tokenSetBuilder.insert(term) + invertedIndex[term] = posting + } + + tokenSetBuilder.finish() + + attrs.fields = serializedIndex.fields + + attrs.fieldVectors = fieldVectors + attrs.invertedIndex = invertedIndex + attrs.tokenSet = tokenSetBuilder.root + attrs.pipeline = pipeline + + return new lunr.Index(attrs) +} +/*! + * lunr.Builder + * Copyright (C) 2020 Oliver Nightingale + */ + +/** + * lunr.Builder performs indexing on a set of documents and + * returns instances of lunr.Index ready for querying. + * + * All configuration of the index is done via the builder, the + * fields to index, the document reference, the text processing + * pipeline and document scoring parameters are all set on the + * builder before indexing. + * + * @constructor + * @property {string} _ref - Internal reference to the document reference field. + * @property {string[]} _fields - Internal reference to the document fields to index. + * @property {object} invertedIndex - The inverted index maps terms to document fields. + * @property {object} documentTermFrequencies - Keeps track of document term frequencies. + * @property {object} documentLengths - Keeps track of the length of documents added to the index. + * @property {lunr.tokenizer} tokenizer - Function for splitting strings into tokens for indexing. + * @property {lunr.Pipeline} pipeline - The pipeline performs text processing on tokens before indexing. + * @property {lunr.Pipeline} searchPipeline - A pipeline for processing search terms before querying the index. + * @property {number} documentCount - Keeps track of the total number of documents indexed. + * @property {number} _b - A parameter to control field length normalization, setting this to 0 disabled normalization, 1 fully normalizes field lengths, the default value is 0.75. + * @property {number} _k1 - A parameter to control how quickly an increase in term frequency results in term frequency saturation, the default value is 1.2. + * @property {number} termIndex - A counter incremented for each unique term, used to identify a terms position in the vector space. + * @property {array} metadataWhitelist - A list of metadata keys that have been whitelisted for entry in the index. + */ +lunr.Builder = function () { + this._ref = "id" + this._fields = Object.create(null) + this._documents = Object.create(null) + this.invertedIndex = Object.create(null) + this.fieldTermFrequencies = {} + this.fieldLengths = {} + this.tokenizer = lunr.tokenizer + this.pipeline = new lunr.Pipeline + this.searchPipeline = new lunr.Pipeline + this.documentCount = 0 + this._b = 0.75 + this._k1 = 1.2 + this.termIndex = 0 + this.metadataWhitelist = [] +} + +/** + * Sets the document field used as the document reference. Every document must have this field. + * The type of this field in the document should be a string, if it is not a string it will be + * coerced into a string by calling toString. + * + * The default ref is 'id'. + * + * The ref should _not_ be changed during indexing, it should be set before any documents are + * added to the index. Changing it during indexing can lead to inconsistent results. + * + * @param {string} ref - The name of the reference field in the document. + */ +lunr.Builder.prototype.ref = function (ref) { + this._ref = ref +} + +/** + * A function that is used to extract a field from a document. + * + * Lunr expects a field to be at the top level of a document, if however the field + * is deeply nested within a document an extractor function can be used to extract + * the right field for indexing. + * + * @callback fieldExtractor + * @param {object} doc - The document being added to the index. + * @returns {?(string|object|object[])} obj - The object that will be indexed for this field. + * @example Extracting a nested field + * function (doc) { return doc.nested.field } + */ + +/** + * Adds a field to the list of document fields that will be indexed. Every document being + * indexed should have this field. Null values for this field in indexed documents will + * not cause errors but will limit the chance of that document being retrieved by searches. + * + * All fields should be added before adding documents to the index. Adding fields after + * a document has been indexed will have no effect on already indexed documents. + * + * Fields can be boosted at build time. This allows terms within that field to have more + * importance when ranking search results. Use a field boost to specify that matches within + * one field are more important than other fields. + * + * @param {string} fieldName - The name of a field to index in all documents. + * @param {object} attributes - Optional attributes associated with this field. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this field. + * @param {fieldExtractor} [attributes.extractor] - Function to extract a field from a document. + * @throws {RangeError} fieldName cannot contain unsupported characters '/' + */ +lunr.Builder.prototype.field = function (fieldName, attributes) { + if (/\//.test(fieldName)) { + throw new RangeError ("Field '" + fieldName + "' contains illegal character '/'") + } + + this._fields[fieldName] = attributes || {} +} + +/** + * A parameter to tune the amount of field length normalisation that is applied when + * calculating relevance scores. A value of 0 will completely disable any normalisation + * and a value of 1 will fully normalise field lengths. The default is 0.75. Values of b + * will be clamped to the range 0 - 1. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.b = function (number) { + if (number < 0) { + this._b = 0 + } else if (number > 1) { + this._b = 1 + } else { + this._b = number + } +} + +/** + * A parameter that controls the speed at which a rise in term frequency results in term + * frequency saturation. The default value is 1.2. Setting this to a higher value will give + * slower saturation levels, a lower value will result in quicker saturation. + * + * @param {number} number - The value to set for this tuning parameter. + */ +lunr.Builder.prototype.k1 = function (number) { + this._k1 = number +} + +/** + * Adds a document to the index. + * + * Before adding fields to the index the index should have been fully setup, with the document + * ref and all fields to index already having been specified. + * + * The document must have a field name as specified by the ref (by default this is 'id') and + * it should have all fields defined for indexing, though null or undefined values will not + * cause errors. + * + * Entire documents can be boosted at build time. Applying a boost to a document indicates that + * this document should rank higher in search results than other documents. + * + * @param {object} doc - The document to add to the index. + * @param {object} attributes - Optional attributes associated with this document. + * @param {number} [attributes.boost=1] - Boost applied to all terms within this document. + */ +lunr.Builder.prototype.add = function (doc, attributes) { + var docRef = doc[this._ref], + fields = Object.keys(this._fields) + + this._documents[docRef] = attributes || {} + this.documentCount += 1 + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i], + extractor = this._fields[fieldName].extractor, + field = extractor ? extractor(doc) : doc[fieldName], + tokens = this.tokenizer(field, { + fields: [fieldName] + }), + terms = this.pipeline.run(tokens), + fieldRef = new lunr.FieldRef (docRef, fieldName), + fieldTerms = Object.create(null) + + this.fieldTermFrequencies[fieldRef] = fieldTerms + this.fieldLengths[fieldRef] = 0 + + // store the length of this field for this document + this.fieldLengths[fieldRef] += terms.length + + // calculate term frequencies for this field + for (var j = 0; j < terms.length; j++) { + var term = terms[j] + + if (fieldTerms[term] == undefined) { + fieldTerms[term] = 0 + } + + fieldTerms[term] += 1 + + // add to inverted index + // create an initial posting if one doesn't exist + if (this.invertedIndex[term] == undefined) { + var posting = Object.create(null) + posting["_index"] = this.termIndex + this.termIndex += 1 + + for (var k = 0; k < fields.length; k++) { + posting[fields[k]] = Object.create(null) + } + + this.invertedIndex[term] = posting + } + + // add an entry for this term/fieldName/docRef to the invertedIndex + if (this.invertedIndex[term][fieldName][docRef] == undefined) { + this.invertedIndex[term][fieldName][docRef] = Object.create(null) + } + + // store all whitelisted metadata about this token in the + // inverted index + for (var l = 0; l < this.metadataWhitelist.length; l++) { + var metadataKey = this.metadataWhitelist[l], + metadata = term.metadata[metadataKey] + + if (this.invertedIndex[term][fieldName][docRef][metadataKey] == undefined) { + this.invertedIndex[term][fieldName][docRef][metadataKey] = [] + } + + this.invertedIndex[term][fieldName][docRef][metadataKey].push(metadata) + } + } + + } +} + +/** + * Calculates the average document length for this index + * + * @private + */ +lunr.Builder.prototype.calculateAverageFieldLengths = function () { + + var fieldRefs = Object.keys(this.fieldLengths), + numberOfFields = fieldRefs.length, + accumulator = {}, + documentsWithField = {} + + for (var i = 0; i < numberOfFields; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + field = fieldRef.fieldName + + documentsWithField[field] || (documentsWithField[field] = 0) + documentsWithField[field] += 1 + + accumulator[field] || (accumulator[field] = 0) + accumulator[field] += this.fieldLengths[fieldRef] + } + + var fields = Object.keys(this._fields) + + for (var i = 0; i < fields.length; i++) { + var fieldName = fields[i] + accumulator[fieldName] = accumulator[fieldName] / documentsWithField[fieldName] + } + + this.averageFieldLength = accumulator +} + +/** + * Builds a vector space model of every document using lunr.Vector + * + * @private + */ +lunr.Builder.prototype.createFieldVectors = function () { + var fieldVectors = {}, + fieldRefs = Object.keys(this.fieldTermFrequencies), + fieldRefsLength = fieldRefs.length, + termIdfCache = Object.create(null) + + for (var i = 0; i < fieldRefsLength; i++) { + var fieldRef = lunr.FieldRef.fromString(fieldRefs[i]), + fieldName = fieldRef.fieldName, + fieldLength = this.fieldLengths[fieldRef], + fieldVector = new lunr.Vector, + termFrequencies = this.fieldTermFrequencies[fieldRef], + terms = Object.keys(termFrequencies), + termsLength = terms.length + + + var fieldBoost = this._fields[fieldName].boost || 1, + docBoost = this._documents[fieldRef.docRef].boost || 1 + + for (var j = 0; j < termsLength; j++) { + var term = terms[j], + tf = termFrequencies[term], + termIndex = this.invertedIndex[term]._index, + idf, score, scoreWithPrecision + + if (termIdfCache[term] === undefined) { + idf = lunr.idf(this.invertedIndex[term], this.documentCount) + termIdfCache[term] = idf + } else { + idf = termIdfCache[term] + } + + score = idf * ((this._k1 + 1) * tf) / (this._k1 * (1 - this._b + this._b * (fieldLength / this.averageFieldLength[fieldName])) + tf) + score *= fieldBoost + score *= docBoost + scoreWithPrecision = Math.round(score * 1000) / 1000 + // Converts 1.23456789 to 1.234. + // Reducing the precision so that the vectors take up less + // space when serialised. Doing it now so that they behave + // the same before and after serialisation. Also, this is + // the fastest approach to reducing a number's precision in + // JavaScript. + + fieldVector.insert(termIndex, scoreWithPrecision) + } + + fieldVectors[fieldRef] = fieldVector + } + + this.fieldVectors = fieldVectors +} + +/** + * Creates a token set of all tokens in the index using lunr.TokenSet + * + * @private + */ +lunr.Builder.prototype.createTokenSet = function () { + this.tokenSet = lunr.TokenSet.fromArray( + Object.keys(this.invertedIndex).sort() + ) +} + +/** + * Builds the index, creating an instance of lunr.Index. + * + * This completes the indexing process and should only be called + * once all documents have been added to the index. + * + * @returns {lunr.Index} + */ +lunr.Builder.prototype.build = function () { + this.calculateAverageFieldLengths() + this.createFieldVectors() + this.createTokenSet() + + return new lunr.Index({ + invertedIndex: this.invertedIndex, + fieldVectors: this.fieldVectors, + tokenSet: this.tokenSet, + fields: Object.keys(this._fields), + pipeline: this.searchPipeline + }) +} + +/** + * Applies a plugin to the index builder. + * + * A plugin is a function that is called with the index builder as its context. + * Plugins can be used to customise or extend the behaviour of the index + * in some way. A plugin is just a function, that encapsulated the custom + * behaviour that should be applied when building the index. + * + * The plugin function will be called with the index builder as its argument, additional + * arguments can also be passed when calling use. The function will be called + * with the index builder as its context. + * + * @param {Function} plugin The plugin to apply. + */ +lunr.Builder.prototype.use = function (fn) { + var args = Array.prototype.slice.call(arguments, 1) + args.unshift(this) + fn.apply(this, args) +} +/** + * Contains and collects metadata about a matching document. + * A single instance of lunr.MatchData is returned as part of every + * lunr.Index~Result. + * + * @constructor + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + * @property {object} metadata - A cloned collection of metadata associated with this document. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData = function (term, field, metadata) { + var clonedMetadata = Object.create(null), + metadataKeys = Object.keys(metadata || {}) + + // Cloning the metadata to prevent the original + // being mutated during match data combination. + // Metadata is kept in an array within the inverted + // index so cloning the data can be done with + // Array#slice + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + clonedMetadata[key] = metadata[key].slice() + } + + this.metadata = Object.create(null) + + if (term !== undefined) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = clonedMetadata + } +} + +/** + * An instance of lunr.MatchData will be created for every term that matches a + * document. However only one instance is required in a lunr.Index~Result. This + * method combines metadata from another instance of lunr.MatchData with this + * objects metadata. + * + * @param {lunr.MatchData} otherMatchData - Another instance of match data to merge with this one. + * @see {@link lunr.Index~Result} + */ +lunr.MatchData.prototype.combine = function (otherMatchData) { + var terms = Object.keys(otherMatchData.metadata) + + for (var i = 0; i < terms.length; i++) { + var term = terms[i], + fields = Object.keys(otherMatchData.metadata[term]) + + if (this.metadata[term] == undefined) { + this.metadata[term] = Object.create(null) + } + + for (var j = 0; j < fields.length; j++) { + var field = fields[j], + keys = Object.keys(otherMatchData.metadata[term][field]) + + if (this.metadata[term][field] == undefined) { + this.metadata[term][field] = Object.create(null) + } + + for (var k = 0; k < keys.length; k++) { + var key = keys[k] + + if (this.metadata[term][field][key] == undefined) { + this.metadata[term][field][key] = otherMatchData.metadata[term][field][key] + } else { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(otherMatchData.metadata[term][field][key]) + } + + } + } + } +} + +/** + * Add metadata for a term/field pair to this instance of match data. + * + * @param {string} term - The term this match data is associated with + * @param {string} field - The field in which the term was found + * @param {object} metadata - The metadata recorded about this term in this field + */ +lunr.MatchData.prototype.add = function (term, field, metadata) { + if (!(term in this.metadata)) { + this.metadata[term] = Object.create(null) + this.metadata[term][field] = metadata + return + } + + if (!(field in this.metadata[term])) { + this.metadata[term][field] = metadata + return + } + + var metadataKeys = Object.keys(metadata) + + for (var i = 0; i < metadataKeys.length; i++) { + var key = metadataKeys[i] + + if (key in this.metadata[term][field]) { + this.metadata[term][field][key] = this.metadata[term][field][key].concat(metadata[key]) + } else { + this.metadata[term][field][key] = metadata[key] + } + } +} +/** + * A lunr.Query provides a programmatic way of defining queries to be performed + * against a {@link lunr.Index}. + * + * Prefer constructing a lunr.Query using the {@link lunr.Index#query} method + * so the query object is pre-initialized with the right index fields. + * + * @constructor + * @property {lunr.Query~Clause[]} clauses - An array of query clauses. + * @property {string[]} allFields - An array of all available fields in a lunr.Index. + */ +lunr.Query = function (allFields) { + this.clauses = [] + this.allFields = allFields +} + +/** + * Constants for indicating what kind of automatic wildcard insertion will be used when constructing a query clause. + * + * This allows wildcards to be added to the beginning and end of a term without having to manually do any string + * concatenation. + * + * The wildcard constants can be bitwise combined to select both leading and trailing wildcards. + * + * @constant + * @default + * @property {number} wildcard.NONE - The term will have no wildcards inserted, this is the default behaviour + * @property {number} wildcard.LEADING - Prepend the term with a wildcard, unless a leading wildcard already exists + * @property {number} wildcard.TRAILING - Append a wildcard to the term, unless a trailing wildcard already exists + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with trailing wildcard + * query.term('foo', { wildcard: lunr.Query.wildcard.TRAILING }) + * @example query term with leading and trailing wildcard + * query.term('foo', { + * wildcard: lunr.Query.wildcard.LEADING | lunr.Query.wildcard.TRAILING + * }) + */ + +lunr.Query.wildcard = new String ("*") +lunr.Query.wildcard.NONE = 0 +lunr.Query.wildcard.LEADING = 1 +lunr.Query.wildcard.TRAILING = 2 + +/** + * Constants for indicating what kind of presence a term must have in matching documents. + * + * @constant + * @enum {number} + * @see lunr.Query~Clause + * @see lunr.Query#clause + * @see lunr.Query#term + * @example query term with required presence + * query.term('foo', { presence: lunr.Query.presence.REQUIRED }) + */ +lunr.Query.presence = { + /** + * Term's presence in a document is optional, this is the default value. + */ + OPTIONAL: 1, + + /** + * Term's presence in a document is required, documents that do not contain + * this term will not be returned. + */ + REQUIRED: 2, + + /** + * Term's presence in a document is prohibited, documents that do contain + * this term will not be returned. + */ + PROHIBITED: 3 +} + +/** + * A single clause in a {@link lunr.Query} contains a term and details on how to + * match that term against a {@link lunr.Index}. + * + * @typedef {Object} lunr.Query~Clause + * @property {string[]} fields - The fields in an index this clause should be matched against. + * @property {number} [boost=1] - Any boost that should be applied when matching this clause. + * @property {number} [editDistance] - Whether the term should have fuzzy matching applied, and how fuzzy the match should be. + * @property {boolean} [usePipeline] - Whether the term should be passed through the search pipeline. + * @property {number} [wildcard=lunr.Query.wildcard.NONE] - Whether the term should have wildcards appended or prepended. + * @property {number} [presence=lunr.Query.presence.OPTIONAL] - The terms presence in any matching documents. + */ + +/** + * Adds a {@link lunr.Query~Clause} to this query. + * + * Unless the clause contains the fields to be matched all fields will be matched. In addition + * a default boost of 1 is applied to the clause. + * + * @param {lunr.Query~Clause} clause - The clause to add to this query. + * @see lunr.Query~Clause + * @returns {lunr.Query} + */ +lunr.Query.prototype.clause = function (clause) { + if (!('fields' in clause)) { + clause.fields = this.allFields + } + + if (!('boost' in clause)) { + clause.boost = 1 + } + + if (!('usePipeline' in clause)) { + clause.usePipeline = true + } + + if (!('wildcard' in clause)) { + clause.wildcard = lunr.Query.wildcard.NONE + } + + if ((clause.wildcard & lunr.Query.wildcard.LEADING) && (clause.term.charAt(0) != lunr.Query.wildcard)) { + clause.term = "*" + clause.term + } + + if ((clause.wildcard & lunr.Query.wildcard.TRAILING) && (clause.term.slice(-1) != lunr.Query.wildcard)) { + clause.term = "" + clause.term + "*" + } + + if (!('presence' in clause)) { + clause.presence = lunr.Query.presence.OPTIONAL + } + + this.clauses.push(clause) + + return this +} + +/** + * A negated query is one in which every clause has a presence of + * prohibited. These queries require some special processing to return + * the expected results. + * + * @returns boolean + */ +lunr.Query.prototype.isNegated = function () { + for (var i = 0; i < this.clauses.length; i++) { + if (this.clauses[i].presence != lunr.Query.presence.PROHIBITED) { + return false + } + } + + return true +} + +/** + * Adds a term to the current query, under the covers this will create a {@link lunr.Query~Clause} + * to the list of clauses that make up this query. + * + * The term is used as is, i.e. no tokenization will be performed by this method. Instead conversion + * to a token or token-like string should be done before calling this method. + * + * The term will be converted to a string by calling `toString`. Multiple terms can be passed as an + * array, each term in the array will share the same options. + * + * @param {object|object[]} term - The term(s) to add to the query. + * @param {object} [options] - Any additional properties to add to the query clause. + * @returns {lunr.Query} + * @see lunr.Query#clause + * @see lunr.Query~Clause + * @example adding a single term to a query + * query.term("foo") + * @example adding a single term to a query and specifying search fields, term boost and automatic trailing wildcard + * query.term("foo", { + * fields: ["title"], + * boost: 10, + * wildcard: lunr.Query.wildcard.TRAILING + * }) + * @example using lunr.tokenizer to convert a string to tokens before using them as terms + * query.term(lunr.tokenizer("foo bar")) + */ +lunr.Query.prototype.term = function (term, options) { + if (Array.isArray(term)) { + term.forEach(function (t) { this.term(t, lunr.utils.clone(options)) }, this) + return this + } + + var clause = options || {} + clause.term = term.toString() + + this.clause(clause) + + return this +} +lunr.QueryParseError = function (message, start, end) { + this.name = "QueryParseError" + this.message = message + this.start = start + this.end = end +} + +lunr.QueryParseError.prototype = new Error +lunr.QueryLexer = function (str) { + this.lexemes = [] + this.str = str + this.length = str.length + this.pos = 0 + this.start = 0 + this.escapeCharPositions = [] +} + +lunr.QueryLexer.prototype.run = function () { + var state = lunr.QueryLexer.lexText + + while (state) { + state = state(this) + } +} + +lunr.QueryLexer.prototype.sliceString = function () { + var subSlices = [], + sliceStart = this.start, + sliceEnd = this.pos + + for (var i = 0; i < this.escapeCharPositions.length; i++) { + sliceEnd = this.escapeCharPositions[i] + subSlices.push(this.str.slice(sliceStart, sliceEnd)) + sliceStart = sliceEnd + 1 + } + + subSlices.push(this.str.slice(sliceStart, this.pos)) + this.escapeCharPositions.length = 0 + + return subSlices.join('') +} + +lunr.QueryLexer.prototype.emit = function (type) { + this.lexemes.push({ + type: type, + str: this.sliceString(), + start: this.start, + end: this.pos + }) + + this.start = this.pos +} + +lunr.QueryLexer.prototype.escapeCharacter = function () { + this.escapeCharPositions.push(this.pos - 1) + this.pos += 1 +} + +lunr.QueryLexer.prototype.next = function () { + if (this.pos >= this.length) { + return lunr.QueryLexer.EOS + } + + var char = this.str.charAt(this.pos) + this.pos += 1 + return char +} + +lunr.QueryLexer.prototype.width = function () { + return this.pos - this.start +} + +lunr.QueryLexer.prototype.ignore = function () { + if (this.start == this.pos) { + this.pos += 1 + } + + this.start = this.pos +} + +lunr.QueryLexer.prototype.backup = function () { + this.pos -= 1 +} + +lunr.QueryLexer.prototype.acceptDigitRun = function () { + var char, charCode + + do { + char = this.next() + charCode = char.charCodeAt(0) + } while (charCode > 47 && charCode < 58) + + if (char != lunr.QueryLexer.EOS) { + this.backup() + } +} + +lunr.QueryLexer.prototype.more = function () { + return this.pos < this.length +} + +lunr.QueryLexer.EOS = 'EOS' +lunr.QueryLexer.FIELD = 'FIELD' +lunr.QueryLexer.TERM = 'TERM' +lunr.QueryLexer.EDIT_DISTANCE = 'EDIT_DISTANCE' +lunr.QueryLexer.BOOST = 'BOOST' +lunr.QueryLexer.PRESENCE = 'PRESENCE' + +lunr.QueryLexer.lexField = function (lexer) { + lexer.backup() + lexer.emit(lunr.QueryLexer.FIELD) + lexer.ignore() + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexTerm = function (lexer) { + if (lexer.width() > 1) { + lexer.backup() + lexer.emit(lunr.QueryLexer.TERM) + } + + lexer.ignore() + + if (lexer.more()) { + return lunr.QueryLexer.lexText + } +} + +lunr.QueryLexer.lexEditDistance = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.EDIT_DISTANCE) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexBoost = function (lexer) { + lexer.ignore() + lexer.acceptDigitRun() + lexer.emit(lunr.QueryLexer.BOOST) + return lunr.QueryLexer.lexText +} + +lunr.QueryLexer.lexEOS = function (lexer) { + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } +} + +// This matches the separator used when tokenising fields +// within a document. These should match otherwise it is +// not possible to search for some tokens within a document. +// +// It is possible for the user to change the separator on the +// tokenizer so it _might_ clash with any other of the special +// characters already used within the search string, e.g. :. +// +// This means that it is possible to change the separator in +// such a way that makes some words unsearchable using a search +// string. +lunr.QueryLexer.termSeparator = lunr.tokenizer.separator + +lunr.QueryLexer.lexText = function (lexer) { + while (true) { + var char = lexer.next() + + if (char == lunr.QueryLexer.EOS) { + return lunr.QueryLexer.lexEOS + } + + // Escape character is '\' + if (char.charCodeAt(0) == 92) { + lexer.escapeCharacter() + continue + } + + if (char == ":") { + return lunr.QueryLexer.lexField + } + + if (char == "~") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexEditDistance + } + + if (char == "^") { + lexer.backup() + if (lexer.width() > 0) { + lexer.emit(lunr.QueryLexer.TERM) + } + return lunr.QueryLexer.lexBoost + } + + // "+" indicates term presence is required + // checking for length to ensure that only + // leading "+" are considered + if (char == "+" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + // "-" indicates term presence is prohibited + // checking for length to ensure that only + // leading "-" are considered + if (char == "-" && lexer.width() === 1) { + lexer.emit(lunr.QueryLexer.PRESENCE) + return lunr.QueryLexer.lexText + } + + if (char.match(lunr.QueryLexer.termSeparator)) { + return lunr.QueryLexer.lexTerm + } + } +} + +lunr.QueryParser = function (str, query) { + this.lexer = new lunr.QueryLexer (str) + this.query = query + this.currentClause = {} + this.lexemeIdx = 0 +} + +lunr.QueryParser.prototype.parse = function () { + this.lexer.run() + this.lexemes = this.lexer.lexemes + + var state = lunr.QueryParser.parseClause + + while (state) { + state = state(this) + } + + return this.query +} + +lunr.QueryParser.prototype.peekLexeme = function () { + return this.lexemes[this.lexemeIdx] +} + +lunr.QueryParser.prototype.consumeLexeme = function () { + var lexeme = this.peekLexeme() + this.lexemeIdx += 1 + return lexeme +} + +lunr.QueryParser.prototype.nextClause = function () { + var completedClause = this.currentClause + this.query.clause(completedClause) + this.currentClause = {} +} + +lunr.QueryParser.parseClause = function (parser) { + var lexeme = parser.peekLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.type) { + case lunr.QueryLexer.PRESENCE: + return lunr.QueryParser.parsePresence + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expected either a field or a term, found " + lexeme.type + + if (lexeme.str.length >= 1) { + errorMessage += " with value '" + lexeme.str + "'" + } + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } +} + +lunr.QueryParser.parsePresence = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + switch (lexeme.str) { + case "-": + parser.currentClause.presence = lunr.Query.presence.PROHIBITED + break + case "+": + parser.currentClause.presence = lunr.Query.presence.REQUIRED + break + default: + var errorMessage = "unrecognised presence operator'" + lexeme.str + "'" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term or field, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.FIELD: + return lunr.QueryParser.parseField + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term or field, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseField = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + if (parser.query.allFields.indexOf(lexeme.str) == -1) { + var possibleFields = parser.query.allFields.map(function (f) { return "'" + f + "'" }).join(', '), + errorMessage = "unrecognised field '" + lexeme.str + "', possible fields: " + possibleFields + + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.fields = [lexeme.str] + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + var errorMessage = "expecting term, found nothing" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + return lunr.QueryParser.parseTerm + default: + var errorMessage = "expecting term, found '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseTerm = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + parser.currentClause.term = lexeme.str.toLowerCase() + + if (lexeme.str.indexOf("*") != -1) { + parser.currentClause.usePipeline = false + } + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseEditDistance = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var editDistance = parseInt(lexeme.str, 10) + + if (isNaN(editDistance)) { + var errorMessage = "edit distance must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.editDistance = editDistance + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + +lunr.QueryParser.parseBoost = function (parser) { + var lexeme = parser.consumeLexeme() + + if (lexeme == undefined) { + return + } + + var boost = parseInt(lexeme.str, 10) + + if (isNaN(boost)) { + var errorMessage = "boost must be numeric" + throw new lunr.QueryParseError (errorMessage, lexeme.start, lexeme.end) + } + + parser.currentClause.boost = boost + + var nextLexeme = parser.peekLexeme() + + if (nextLexeme == undefined) { + parser.nextClause() + return + } + + switch (nextLexeme.type) { + case lunr.QueryLexer.TERM: + parser.nextClause() + return lunr.QueryParser.parseTerm + case lunr.QueryLexer.FIELD: + parser.nextClause() + return lunr.QueryParser.parseField + case lunr.QueryLexer.EDIT_DISTANCE: + return lunr.QueryParser.parseEditDistance + case lunr.QueryLexer.BOOST: + return lunr.QueryParser.parseBoost + case lunr.QueryLexer.PRESENCE: + parser.nextClause() + return lunr.QueryParser.parsePresence + default: + var errorMessage = "Unexpected lexeme type '" + nextLexeme.type + "'" + throw new lunr.QueryParseError (errorMessage, nextLexeme.start, nextLexeme.end) + } +} + + /** + * export the module via AMD, CommonJS or as a browser global + * Export code from https://github.com/umdjs/umd/blob/master/returnExports.js + */ + ;(function (root, factory) { + if (typeof define === 'function' && define.amd) { + // AMD. Register as an anonymous module. + define(factory) + } else if (typeof exports === 'object') { + /** + * Node. Does not work with strict CommonJS, but + * only CommonJS-like environments that support module.exports, + * like Node. + */ + module.exports = factory() + } else { + // Browser globals (root is window) + root.lunr = factory() + } + }(this, function () { + /** + * Just return a value to define the module export. + * This example returns an object, but the module + * can return a function as the exported value. + */ + return lunr + })) +})(); diff --git a/search/main.js b/search/main.js new file mode 100644 index 0000000..a5e469d --- /dev/null +++ b/search/main.js @@ -0,0 +1,109 @@ +function getSearchTermFromLocation() { + var sPageURL = window.location.search.substring(1); + var sURLVariables = sPageURL.split('&'); + for (var i = 0; i < sURLVariables.length; i++) { + var sParameterName = sURLVariables[i].split('='); + if (sParameterName[0] == 'q') { + return decodeURIComponent(sParameterName[1].replace(/\+/g, '%20')); + } + } +} + +function joinUrl (base, path) { + if (path.substring(0, 1) === "/") { + // path starts with `/`. Thus it is absolute. + return path; + } + if (base.substring(base.length-1) === "/") { + // base ends with `/` + return base + path; + } + return base + "/" + path; +} + +function escapeHtml (value) { + return value.replace(/&/g, '&') + .replace(/"/g, '"') + .replace(//g, '>'); +} + +function formatResult (location, title, summary) { + return ''; +} + +function displayResults (results) { + var search_results = document.getElementById("mkdocs-search-results"); + while (search_results.firstChild) { + search_results.removeChild(search_results.firstChild); + } + if (results.length > 0){ + for (var i=0; i < results.length; i++){ + var result = results[i]; + var html = formatResult(result.location, result.title, result.summary); + search_results.insertAdjacentHTML('beforeend', html); + } + } else { + var noResultsText = search_results.getAttribute('data-no-results-text'); + if (!noResultsText) { + noResultsText = "No results found"; + } + search_results.insertAdjacentHTML('beforeend', '

' + noResultsText + '

'); + } +} + +function doSearch () { + var query = document.getElementById('mkdocs-search-query').value; + if (query.length > min_search_length) { + if (!window.Worker) { + displayResults(search(query)); + } else { + searchWorker.postMessage({query: query}); + } + } else { + // Clear results for short queries + displayResults([]); + } +} + +function initSearch () { + var search_input = document.getElementById('mkdocs-search-query'); + if (search_input) { + search_input.addEventListener("keyup", doSearch); + } + var term = getSearchTermFromLocation(); + if (term) { + search_input.value = term; + doSearch(); + } +} + +function onWorkerMessage (e) { + if (e.data.allowSearch) { + initSearch(); + } else if (e.data.results) { + var results = e.data.results; + displayResults(results); + } else if (e.data.config) { + min_search_length = e.data.config.min_search_length-1; + } +} + +if (!window.Worker) { + console.log('Web Worker API not supported'); + // load index in main thread + $.getScript(joinUrl(base_url, "search/worker.js")).done(function () { + console.log('Loaded worker'); + init(); + window.postMessage = function (msg) { + onWorkerMessage({data: msg}); + }; + }).fail(function (jqxhr, settings, exception) { + console.error('Could not load worker.js'); + }); +} else { + // Wrap search in a web worker + var searchWorker = new Worker(joinUrl(base_url, "search/worker.js")); + searchWorker.postMessage({init: true}); + searchWorker.onmessage = onWorkerMessage; +} diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 0000000..fa6af85 --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"Spark Advanced Topics Working Group Documentation Welcome to the Spark Advanced Topics working group documentation. This documentation is in the early stages. We have been working on a flowchart to help you solve your current problems. The documentation is collected under \"details\" (see above). Other resources Some other resources that may be useful include High Performance Spark by Holden Karau and Rachel Warren (note: some bias as a co-author), as well as the excellent on-line The Internals of Apache Spark and The Internals of Spark SQL by Jacek Laskowski.","title":"Spark Advanced Topics Working Group Documentation"},{"location":"#spark-advanced-topics-working-group-documentation","text":"Welcome to the Spark Advanced Topics working group documentation. This documentation is in the early stages. We have been working on a flowchart to help you solve your current problems. The documentation is collected under \"details\" (see above).","title":"Spark Advanced Topics Working Group Documentation"},{"location":"#other-resources","text":"Some other resources that may be useful include High Performance Spark by Holden Karau and Rachel Warren (note: some bias as a co-author), as well as the excellent on-line The Internals of Apache Spark and The Internals of Spark SQL by Jacek Laskowski.","title":"Other resources"},{"location":"details/best-pratice-collect/","text":"Bringing too much data back to the driver (collect and friends) A common anti-pattern in Apache Spark is using collect() and then processing records on the driver. There are a few different reasons why folks tend to do this and we can work through some alternatives: Label items in ascending order ZipWithIndex Index items in order Compute the size of each partition use this to assign indexes. In order processing Compute a partition at a time (this is annoying to do, sorry). Writing out to a format not supported by Spark Use foreachPartition or implement your own DataSink. Need to aggregate everything into a single record Call reduce or treeReduce Sometimes you do really need to bring the data back to the driver for some reason (e.g., updating model weights). In those cases, especially if you process the data sequentially, you can limit the amount of data coming back to the driver at one time. toLocalIterator gives you back an iterator which will only need to fetch a partition at a time (although in Python this may be pipeline for efficency). By default toLocalIterator will launch a Spark job for each partition, so if you know you will eventually need all of the data it makes sense to do a persist + a count (async or otherwise) so you don't block as long between partitions. This doesn't mean every call to collect() is bad, if the amount of data being returned is under ~1gb it's probably OK although it will limit parallelism.","title":"Bringing too much data back to the driver (collect and friends)"},{"location":"details/best-pratice-collect/#bringing-too-much-data-back-to-the-driver-collect-and-friends","text":"A common anti-pattern in Apache Spark is using collect() and then processing records on the driver. There are a few different reasons why folks tend to do this and we can work through some alternatives: Label items in ascending order ZipWithIndex Index items in order Compute the size of each partition use this to assign indexes. In order processing Compute a partition at a time (this is annoying to do, sorry). Writing out to a format not supported by Spark Use foreachPartition or implement your own DataSink. Need to aggregate everything into a single record Call reduce or treeReduce Sometimes you do really need to bring the data back to the driver for some reason (e.g., updating model weights). In those cases, especially if you process the data sequentially, you can limit the amount of data coming back to the driver at one time. toLocalIterator gives you back an iterator which will only need to fetch a partition at a time (although in Python this may be pipeline for efficency). By default toLocalIterator will launch a Spark job for each partition, so if you know you will eventually need all of the data it makes sense to do a persist + a count (async or otherwise) so you don't block as long between partitions. This doesn't mean every call to collect() is bad, if the amount of data being returned is under ~1gb it's probably OK although it will limit parallelism.","title":"Bringing too much data back to the driver (collect and friends)"},{"location":"details/big-broadcast-join/","text":"Too big broadcast joins Beware that broadcast joins put unnecessary pressure on the driver. Before the tables are broadcasted to all the executors, the data is brought back to the driver and then broadcasted to executors. So you might run into driver OOMs. Broadcast smaller tables but this is usually recommended for < 10 Mb tables. Although that is mostly the default, we can comfortably broadcast much larger datasets as long as they fit in the executor and driver memories. Remember if there are multiple broadcast joins in the same stage, you need to have enough room for all those datasets in memory. You can configure the broadcast threshold using spark.sql.autoBroadcastJoinThreshold or increase the driver memory by setting spark.driver.memory to a higher value Make sure that you need more memory on your driver than the sum of all your broadcasted data in any stage plus all the other overheads that the driver deals with!","title":"Too big broadcast joins"},{"location":"details/big-broadcast-join/#too-big-broadcast-joins","text":"Beware that broadcast joins put unnecessary pressure on the driver. Before the tables are broadcasted to all the executors, the data is brought back to the driver and then broadcasted to executors. So you might run into driver OOMs. Broadcast smaller tables but this is usually recommended for < 10 Mb tables. Although that is mostly the default, we can comfortably broadcast much larger datasets as long as they fit in the executor and driver memories. Remember if there are multiple broadcast joins in the same stage, you need to have enough room for all those datasets in memory. You can configure the broadcast threshold using spark.sql.autoBroadcastJoinThreshold or increase the driver memory by setting spark.driver.memory to a higher value Make sure that you need more memory on your driver than the sum of all your broadcasted data in any stage plus all the other overheads that the driver deals with!","title":"Too big broadcast joins"},{"location":"details/broadcast-with-disable/","text":"Tables getting broadcasted even when broadcast is disabled You expect the broadcast to stop after you disable the broadcast threshold, by setting spark.sql.autoBroadcastJoinThreshold to -1, but Spark tries to broadcast the bigger table and fails with a broadcast error. And you observe that the query plan has BroadcastNestedLoopJoin in the physical plan. Check for sub queries in your code using NOT IN Example : select * from TableA where id not in (select id from TableB) This typically results in a forced BroadcastNestedLoopJoin even when the broadcast setting is disabled. If the data being processed is large enough, this results in broadcast errors when Spark attempts to broadcast the table Rewrite query using not exists or a regular LEFT JOIN instead of not in Example: select * from TableA where not exists (select 1 from TableB where TableA.id = TableB.id) The query will use SortMergeJoin and will resolve any Driver memory errors because of forced broadcasts Relevant links External Resource","title":"Tables getting broadcasted even when broadcast is disabled"},{"location":"details/broadcast-with-disable/#tables-getting-broadcasted-even-when-broadcast-is-disabled","text":"You expect the broadcast to stop after you disable the broadcast threshold, by setting spark.sql.autoBroadcastJoinThreshold to -1, but Spark tries to broadcast the bigger table and fails with a broadcast error. And you observe that the query plan has BroadcastNestedLoopJoin in the physical plan. Check for sub queries in your code using NOT IN Example : select * from TableA where id not in (select id from TableB) This typically results in a forced BroadcastNestedLoopJoin even when the broadcast setting is disabled. If the data being processed is large enough, this results in broadcast errors when Spark attempts to broadcast the table Rewrite query using not exists or a regular LEFT JOIN instead of not in Example: select * from TableA where not exists (select 1 from TableB where TableA.id = TableB.id) The query will use SortMergeJoin and will resolve any Driver memory errors because of forced broadcasts","title":"Tables getting broadcasted even when broadcast is disabled"},{"location":"details/broadcast-with-disable/#relevant-links","text":"External Resource","title":"Relevant links"},{"location":"details/class-or-method-not-found/","text":"Class or method not found When your compile-time class path differs from the runtime class path, you may encounter errors that signal that a class or method could not be found (e.g., NoClassDefFoundError, NoSuchMethodError). java.lang.NoSuchMethodError: com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.subTypeValidator(Lcom/fasterxml/jackson/databind/cfg/MapperConfig;)Lcom/fasterxml/jackson/databind/jsontype/PolymorphicTypeValidator; at com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.buildTypeDeserializer(AvroTypeResolverBuilder.java:43) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findTypeDeserializer(BasicDeserializerFactory.java:1598) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findPropertyContentTypeDeserializer(BasicDeserializerFactory.java:1766) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.resolveMemberAndTypeAnnotations(BasicDeserializerFactory.java:2092) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.constructCreatorProperty(BasicDeserializerFactory.java:1069) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addExplicitPropertyCreator(BasicDeserializerFactory.java:703) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addDeserializerConstructors(BasicDeserializerFactory.java:476) ... This may be due to packaging a fat JAR with dependency versions that are in conflict with those provided by the Spark environment. When there are multiple versions of the same library in the runtime class path under the same package, Java's class loader hierarchy kicks in, which can lead to unintended behaviors. There are a few options to get around this. Identify the version of the problematic library within your Spark environment and pin the dependency to that version in your build file. To identify the version used in your Spark environment, in the Spark UI go to the Environment tab, scroll down to Classpath Entries, and find the corresponding library. Exclude the transient dependency of the problematic library from imported libraries in your build file. Shade the problematic library under a different package. If options (1) and (2) result in more dependency conflicts, it may be that the version of the problematic library in the Spark environment is incompatible with your application code. Therefore, it makes sense to shade the problematic library so that your application can run with a version of the library isolated from the rest of the Spark environment. If you are using the shadow plugin in Gradle, you can shade using: shadowJar { ... relocate 'com.fasterxml.jackson', 'shaded.fasterxml.jackson' } In this example, Jackson libraries used by your application will be available in the shaded.fasterxml.jackson package at runtime.","title":"Class or method not found"},{"location":"details/class-or-method-not-found/#class-or-method-not-found","text":"When your compile-time class path differs from the runtime class path, you may encounter errors that signal that a class or method could not be found (e.g., NoClassDefFoundError, NoSuchMethodError). java.lang.NoSuchMethodError: com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.subTypeValidator(Lcom/fasterxml/jackson/databind/cfg/MapperConfig;)Lcom/fasterxml/jackson/databind/jsontype/PolymorphicTypeValidator; at com.fasterxml.jackson.dataformat.avro.AvroTypeResolverBuilder.buildTypeDeserializer(AvroTypeResolverBuilder.java:43) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findTypeDeserializer(BasicDeserializerFactory.java:1598) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.findPropertyContentTypeDeserializer(BasicDeserializerFactory.java:1766) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.resolveMemberAndTypeAnnotations(BasicDeserializerFactory.java:2092) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory.constructCreatorProperty(BasicDeserializerFactory.java:1069) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addExplicitPropertyCreator(BasicDeserializerFactory.java:703) at com.fasterxml.jackson.databind.deser.BasicDeserializerFactory._addDeserializerConstructors(BasicDeserializerFactory.java:476) ... This may be due to packaging a fat JAR with dependency versions that are in conflict with those provided by the Spark environment. When there are multiple versions of the same library in the runtime class path under the same package, Java's class loader hierarchy kicks in, which can lead to unintended behaviors. There are a few options to get around this. Identify the version of the problematic library within your Spark environment and pin the dependency to that version in your build file. To identify the version used in your Spark environment, in the Spark UI go to the Environment tab, scroll down to Classpath Entries, and find the corresponding library. Exclude the transient dependency of the problematic library from imported libraries in your build file. Shade the problematic library under a different package. If options (1) and (2) result in more dependency conflicts, it may be that the version of the problematic library in the Spark environment is incompatible with your application code. Therefore, it makes sense to shade the problematic library so that your application can run with a version of the library isolated from the rest of the Spark environment. If you are using the shadow plugin in Gradle, you can shade using: shadowJar { ... relocate 'com.fasterxml.jackson', 'shaded.fasterxml.jackson' } In this example, Jackson libraries used by your application will be available in the shaded.fasterxml.jackson package at runtime.","title":"Class or method not found"},{"location":"details/container-oom/","text":"Container OOMs Container OOMs can be difficult to debug as the container running the problematic code is killed, and sometimes not all of the log information is available. Non-JVM language users (such as Python) are most likely to encounter issues with container OOMs. This is because the JVM is generally configured to not use more memory than the container it is running in. Everything which isn't inside the JVM is considered \"overhead\", so Tensorflow, Python, bash, etc. A first step with a container OOM is often increasing spark.executor.memoryOverhead and spark.driver.memoryOverhead to leave more memory for non-Java processes. Python users can set spark.executor.pyspark.memory to limit the Python VM to a certain amount of memory. This amount of memory is then added to the overhead. Python users performing aggregations in Python should also check out the PyUDFOOM page .","title":"Container OOMs"},{"location":"details/container-oom/#container-ooms","text":"Container OOMs can be difficult to debug as the container running the problematic code is killed, and sometimes not all of the log information is available. Non-JVM language users (such as Python) are most likely to encounter issues with container OOMs. This is because the JVM is generally configured to not use more memory than the container it is running in. Everything which isn't inside the JVM is considered \"overhead\", so Tensorflow, Python, bash, etc. A first step with a container OOM is often increasing spark.executor.memoryOverhead and spark.driver.memoryOverhead to leave more memory for non-Java processes. Python users can set spark.executor.pyspark.memory to limit the Python VM to a certain amount of memory. This amount of memory is then added to the overhead. Python users performing aggregations in Python should also check out the PyUDFOOM page .","title":"Container OOMs"},{"location":"details/correlated-column-not-allowed/","text":"spark.sql.AnalysisException: Correlated column is not allowed in predicate SPARK-35080 introduces a check for correlated subqueries with aggregates which may have previously return incorect results. Instead, starting in Spark 2.4.8, these queries will raise an org.apache.spark.sql.AnalysisException exception. One of the examples of this ( from the JIRA ) is: create or replace view t1(c) as values ('a'), ('b'); create or replace view t2(c) as values ('ab'), ('abc'), ('bc'); select c, (select count(*) from t2 where t1.c = substring(t2.c, 1, 1)) from t1; Instead you should do an explicit join and then perform your aggregation: create or replace view t1(c) as values ('a'), ('b'); create or replace view t2(c) as values ('ab'), ('abc'), ('bc'); create or replace view t3 as select t1.c from t2 INNER JOIN t1 ON t1.c = substring(t2.c, 1, 1); select c, count(*) from t3 group by c; Similarly: create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3); create or replace view t2(c) as values (6); select c, (select count(*) from t1 where a + b = c) from t2; Can be rewritten as: create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3); create or replace view t2(c) as values (6); create or replace view t3 as select t2.c from t2 INNER JOIN t1 ON t2.c = t1.a + t1.b; select c, count(*) from t3 group by c; Likewise in Scala and Python use an explicit .join and then perform your aggregation on the joined result. Now Spark can compute correct results thus avoiding the exception. Relevant links: SPARK-35080 JIRA Stackoverflow discussion for PySpark workaround of Correlated Column","title":"spark.sql.AnalysisException: Correlated column is not allowed in predicate"},{"location":"details/correlated-column-not-allowed/#sparksqlanalysisexception-correlated-column-is-not-allowed-in-predicate","text":"SPARK-35080 introduces a check for correlated subqueries with aggregates which may have previously return incorect results. Instead, starting in Spark 2.4.8, these queries will raise an org.apache.spark.sql.AnalysisException exception. One of the examples of this ( from the JIRA ) is: create or replace view t1(c) as values ('a'), ('b'); create or replace view t2(c) as values ('ab'), ('abc'), ('bc'); select c, (select count(*) from t2 where t1.c = substring(t2.c, 1, 1)) from t1; Instead you should do an explicit join and then perform your aggregation: create or replace view t1(c) as values ('a'), ('b'); create or replace view t2(c) as values ('ab'), ('abc'), ('bc'); create or replace view t3 as select t1.c from t2 INNER JOIN t1 ON t1.c = substring(t2.c, 1, 1); select c, count(*) from t3 group by c; Similarly: create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3); create or replace view t2(c) as values (6); select c, (select count(*) from t1 where a + b = c) from t2; Can be rewritten as: create or replace view t1(a, b) as values (0, 6), (1, 5), (2, 4), (3, 3); create or replace view t2(c) as values (6); create or replace view t3 as select t2.c from t2 INNER JOIN t1 ON t2.c = t1.a + t1.b; select c, count(*) from t3 group by c; Likewise in Scala and Python use an explicit .join and then perform your aggregation on the joined result. Now Spark can compute correct results thus avoiding the exception.","title":"spark.sql.AnalysisException: Correlated column is not allowed in predicate"},{"location":"details/correlated-column-not-allowed/#relevant-links","text":"SPARK-35080 JIRA Stackoverflow discussion for PySpark workaround of Correlated Column","title":"Relevant links:"},{"location":"details/driver-max-result-size/","text":"Result size larger than spark.driver.maxResultSize error OR Kryo serialization failed: Buffer overflow. ex: You typically run into this error for one of the following reasons. You are sending a large result set to the driver using SELECT (in SQL) or COLLECT (in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high number of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters. You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultSize , you will run into this error. You can overcome this by either increasing the spark.driver.maxResultSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultSize . The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a sub-sequent stage or non-uniform output files etc..). If using the later option, be sure spark.driver.maxResultSize is less than spark.driver.memory . b. You have too many Spark partitions from the previous stage: In this case, you have a large number of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the number of rows(partitions*sampleSize) could be larger than spark.driver.maxResultSize . A recommended way to resolve this issue is by combining the splits for the table(increase spark.(path).(db).(table).target-size ) with high map tasks. Note that having a large number of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions. External resources: - Apache Spark job fails with maxResultSize exception","title":"Result size larger than spark.driver.maxResultSize error OR Kryo serialization failed: Buffer overflow."},{"location":"details/driver-max-result-size/#result-size-larger-than-sparkdrivermaxresultsize-error-or-kryo-serialization-failed-buffer-overflow","text":"ex: You typically run into this error for one of the following reasons. You are sending a large result set to the driver using SELECT (in SQL) or COLLECT (in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high number of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters. You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultSize , you will run into this error. You can overcome this by either increasing the spark.driver.maxResultSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultSize . The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a sub-sequent stage or non-uniform output files etc..). If using the later option, be sure spark.driver.maxResultSize is less than spark.driver.memory . b. You have too many Spark partitions from the previous stage: In this case, you have a large number of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the number of rows(partitions*sampleSize) could be larger than spark.driver.maxResultSize . A recommended way to resolve this issue is by combining the splits for the table(increase spark.(path).(db).(table).target-size ) with high map tasks. Note that having a large number of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions. External resources: - Apache Spark job fails with maxResultSize exception","title":"Result size larger than spark.driver.maxResultSize error OR Kryo serialization failed: Buffer overflow."},{"location":"details/error-driver-max-result-size/","text":"Result size larger than spark.driver.maxResultsSize error ex: You typically run into this error for one of the following reasons. You are sending a large result set to the driver using SELECT (in SQL) or COLLECT (in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high no.of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters. ( Collect best practices ) You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultsSize , you will run into this error. You can overcome this by either increasing the spark.driver.maxResultsSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join. Note that Spark broadcasts a table referenced in a join if the size of the table is less than spark.sql.autoBroadcastJoinThreshold (100 MB by default at Netflix). You can change this config to include a larger tables in broadcast or reduce the threshold if you want to exclude certain tables. You can also set this to -1 if you want to disable broadcast joins. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultsSize . The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultsSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a subsequent stage or non-uniform output files etc.) b. You have too many Spark partitions from the previous stage: In this case, you have a large no.of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the no.of rows(partitions*sampleSize) could be larger than spark.driver.maxResultsSize . A recommended way to resolve this issue is by combining the splits for the table(increase spark.netflix.(db).(table).target-size ) with high map tasks. Note that having a large no.of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions. Broadcast join related articles SQL Broadcast Join Hints Tables getting broadcasted even when broadcast is disabled","title":"Result size larger than spark.driver.maxResultsSize error"},{"location":"details/error-driver-max-result-size/#result-size-larger-than-sparkdrivermaxresultssize-error","text":"ex: You typically run into this error for one of the following reasons. You are sending a large result set to the driver using SELECT (in SQL) or COLLECT (in dataframes/dataset/RDD): Apply a limit if your intention is to spot check a few rows as you won't be able to go through full set of rows if you have a really high no.of rows. Writing the results to a temporary table in your schema and querying the new table would be an alternative if you need to query the results multiple times with a specific set of filters. ( Collect best practices ) You are broadcasting a table that is too big. Spark downloads all the rows for a table that needs to be broadcasted to the driver before it starts shipping to the executors. So iff you are broadcasting a table that is larger than spark.driver.maxResultsSize , you will run into this error. You can overcome this by either increasing the spark.driver.maxResultsSize or not broadcasting the table so Spark would use a shuffle hash or sort-merge join. Note that Spark broadcasts a table referenced in a join if the size of the table is less than spark.sql.autoBroadcastJoinThreshold (100 MB by default at Netflix). You can change this config to include a larger tables in broadcast or reduce the threshold if you want to exclude certain tables. You can also set this to -1 if you want to disable broadcast joins. You have a sort in your SQL/Dataframe: Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. This error can further fall into one of the below scenarios. a. You have wide/bloated rows in your table: In this case, you are not sending a lot of rows to the driver, but you are sending bytes larger than the spark.driver.maxResultsSize . The recommendation here is to lower the default sample size by setting the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to something lower than 20. You can also increase spark.driver.maxResultsSize if lowering the sample size is causing an imbalance in partition ranges(for ex: skew in a subsequent stage or non-uniform output files etc.) b. You have too many Spark partitions from the previous stage: In this case, you have a large no.of map tasks while reading from a table. Since spark has to collect sample rows from every partition, your total bytes from the no.of rows(partitions*sampleSize) could be larger than spark.driver.maxResultsSize . A recommended way to resolve this issue is by combining the splits for the table(increase spark.netflix.(db).(table).target-size ) with high map tasks. Note that having a large no.of map tasks(>80k) will cause other OOM issues on driver as it needs to keep track of metadata for all these tasks/partitions.","title":"Result size larger than spark.driver.maxResultsSize error"},{"location":"details/error-driver-max-result-size/#broadcast-join-related-articles","text":"SQL Broadcast Join Hints Tables getting broadcasted even when broadcast is disabled","title":"Broadcast join related articles"},{"location":"details/error-driver-out-of-memory/","text":"Driver ran out of memory IF you see java.lang.OutOfMemoryError: in the driver log/stderr, it is most likely from driver JVM running out of memory. This article has the memory config for increasing the driver memory. One reason you could run into this error is if you are reading from a table with too many splits(s3 files) and overwhelming the driver with a lot of metadata. Another cause for driver out of memory errors is when the number of partitions is too high and you trigger a sort or shuffle where Spark samples the data, but then runs out of memory while collecting the sample. To solve this repartition to a lower number of partitions or if you're in RDDs coalesce is a more efficent option (in DataFrames coalesce can have impact upstream in the query plan). A less common, but still semi-frequent, occurnce of driver out of memory is an excessive number of tasks in the UI. This can be controlled by reducing spark.ui.retainedTasks (default 100k).","title":"Driver ran out of memory"},{"location":"details/error-driver-out-of-memory/#driver-ran-out-of-memory","text":"IF you see java.lang.OutOfMemoryError: in the driver log/stderr, it is most likely from driver JVM running out of memory. This article has the memory config for increasing the driver memory. One reason you could run into this error is if you are reading from a table with too many splits(s3 files) and overwhelming the driver with a lot of metadata. Another cause for driver out of memory errors is when the number of partitions is too high and you trigger a sort or shuffle where Spark samples the data, but then runs out of memory while collecting the sample. To solve this repartition to a lower number of partitions or if you're in RDDs coalesce is a more efficent option (in DataFrames coalesce can have impact upstream in the query plan). A less common, but still semi-frequent, occurnce of driver out of memory is an excessive number of tasks in the UI. This can be controlled by reducing spark.ui.retainedTasks (default 100k).","title":"Driver ran out of memory"},{"location":"details/error-driver-stack-overflow/","text":"Driver ran out of memory Note that it is very rare to run into this error. You may see this error when you are using too many filters(in your sql/dataframe/dataset). Workaround is to increase spark driver JVM stack size by setting below config to something higher than the default spark.driver.extraJavaOptions: \"-Xss512M\" #Sets the stack size to 512 MB","title":"Driver ran out of memory"},{"location":"details/error-driver-stack-overflow/#driver-ran-out-of-memory","text":"Note that it is very rare to run into this error. You may see this error when you are using too many filters(in your sql/dataframe/dataset). Workaround is to increase spark driver JVM stack size by setting below config to something higher than the default spark.driver.extraJavaOptions: \"-Xss512M\" #Sets the stack size to 512 MB","title":"Driver ran out of memory"},{"location":"details/error-executor-out-of-disk/","text":"Executor out of disk error By far the most common cause of executor out of disk errors is a mis-configuration of Spark's temporary directories. You should set spark.local.dir to a directory with lots of local storage available. If you are on YARN this will be overriden by LOCAL_DIRS environment variable on the workers. Kubernetes users may wish to add a large emptyDir for Spark to use for temporary storage. Another common cause is having no longer needed/used RDDs/DataFrames/Datasets in scope. This tends to happen more often with notebooks as more things are placed in the global scope where they are not automatically cleaned up. A solution to this is breaking your code into more functions so that things go out of scope, or explicitily setting no longer needed RDDs/DataFrames/Datasets to None/null. On the other hand if you have an iterative algorithm you should investigate if you may have to big of a DAG.","title":"Executor out of disk error"},{"location":"details/error-executor-out-of-disk/#executor-out-of-disk-error","text":"By far the most common cause of executor out of disk errors is a mis-configuration of Spark's temporary directories. You should set spark.local.dir to a directory with lots of local storage available. If you are on YARN this will be overriden by LOCAL_DIRS environment variable on the workers. Kubernetes users may wish to add a large emptyDir for Spark to use for temporary storage. Another common cause is having no longer needed/used RDDs/DataFrames/Datasets in scope. This tends to happen more often with notebooks as more things are placed in the global scope where they are not automatically cleaned up. A solution to this is breaking your code into more functions so that things go out of scope, or explicitily setting no longer needed RDDs/DataFrames/Datasets to None/null. On the other hand if you have an iterative algorithm you should investigate if you may have to big of a DAG.","title":"Executor out of disk error"},{"location":"details/error-executor-out-of-memory/","text":"Executor ran out of memory Executor out of memory issues can come from many sources. To narrow down what the cause of the error there are a few important places to look: the Spark Web UI, the executor log, the driver log, and (if applicable) the cluster manager (e.g. YARN/K8s) log/UI. Container OOM If the driver log indicates Container killed by YARN for exceeding memory limits for the applicable executor, or if (on K8s) the Spark UI show's the reason for the executor loss as \"OOMKill\" / exit code 137 then it's likely your program is exceeding the amount of memory assigned to it. This doesn't normally happen with pure JVM code, but instead when calling PySpark or JNI libraries (or using off-heap storage). PySpark users are the most likely to encounter container OOMs. If you have PySpark UDF in the stage you should check out Python UDF OOM to eliminate that potential cause. Another potential issue to investigate is if your have key skew as trying to load too large a partition in Python can result in an OOM. If you are using a library, like Tensorflow, which results in","title":"Executor ran out of memory"},{"location":"details/error-executor-out-of-memory/#executor-ran-out-of-memory","text":"Executor out of memory issues can come from many sources. To narrow down what the cause of the error there are a few important places to look: the Spark Web UI, the executor log, the driver log, and (if applicable) the cluster manager (e.g. YARN/K8s) log/UI.","title":"Executor ran out of memory"},{"location":"details/error-executor-out-of-memory/#container-oom","text":"If the driver log indicates Container killed by YARN for exceeding memory limits for the applicable executor, or if (on K8s) the Spark UI show's the reason for the executor loss as \"OOMKill\" / exit code 137 then it's likely your program is exceeding the amount of memory assigned to it. This doesn't normally happen with pure JVM code, but instead when calling PySpark or JNI libraries (or using off-heap storage). PySpark users are the most likely to encounter container OOMs. If you have PySpark UDF in the stage you should check out Python UDF OOM to eliminate that potential cause. Another potential issue to investigate is if your have key skew as trying to load too large a partition in Python can result in an OOM. If you are using a library, like Tensorflow, which results in","title":"Container OOM"},{"location":"details/error-invalid-file/","text":"Missing Files / File Not Found / Reading past RLE/BitPacking stream Missing files are a relatively rare error in Spark. Most commonly they are caused by non-atomic operations in the data writer and will go away when you re-run your query/job. On the other hand Reading past RLE/BitPacking stream or other file read errors tend to be non-transient. If the error is not transient it may mean that the metadata store (e.g. hive or iceberg) are pointing to a file that does not exist or has a bad format. You can cleanup Iceberg tables using Iceberg Table Cleanup from holden's spark-misc-utils , but be careful and talk with whoever produced the table to make sure that it's ok. If you get a failed to read parquet file while you are not trying to read a parquet file, it's likely that you are using the wrong metastore .","title":"Missing Files / File Not Found / Reading past RLE/BitPacking stream"},{"location":"details/error-invalid-file/#missing-files-file-not-found-reading-past-rlebitpacking-stream","text":"Missing files are a relatively rare error in Spark. Most commonly they are caused by non-atomic operations in the data writer and will go away when you re-run your query/job. On the other hand Reading past RLE/BitPacking stream or other file read errors tend to be non-transient. If the error is not transient it may mean that the metadata store (e.g. hive or iceberg) are pointing to a file that does not exist or has a bad format. You can cleanup Iceberg tables using Iceberg Table Cleanup from holden's spark-misc-utils , but be careful and talk with whoever produced the table to make sure that it's ok. If you get a failed to read parquet file while you are not trying to read a parquet file, it's likely that you are using the wrong metastore .","title":"Missing Files / File Not Found / Reading past RLE/BitPacking stream"},{"location":"details/error-job/","text":"Error Most of the errors should fall into below 4 categories. Drill-down to individual sections to isolate your error/exception. SQL Analysis Exception Memory Error Shuffle Error Other Error","title":"Error"},{"location":"details/error-job/#error","text":"Most of the errors should fall into below 4 categories. Drill-down to individual sections to isolate your error/exception. SQL Analysis Exception Memory Error Shuffle Error Other Error","title":"Error"},{"location":"details/error-memory/","text":"Memory Errors Driver Spark driver ran out of memory maxResultSize exceeded stackOverflowError Executor Spark executor ran out of memory Executor out of disk error","title":"Memory Errors"},{"location":"details/error-memory/#memory-errors","text":"","title":"Memory Errors"},{"location":"details/error-memory/#driver","text":"","title":"Driver"},{"location":"details/error-memory/#spark-driver-ran-out-of-memory","text":"","title":"Spark driver ran out of memory"},{"location":"details/error-memory/#maxresultsize-exceeded","text":"","title":"maxResultSize exceeded"},{"location":"details/error-memory/#stackoverflowerror","text":"","title":"stackOverflowError"},{"location":"details/error-memory/#executor","text":"","title":"Executor"},{"location":"details/error-memory/#spark-executor-ran-out-of-memory","text":"","title":"Spark executor ran out of memory"},{"location":"details/error-memory/#executor-out-of-disk-error","text":"","title":"Executor out of disk error"},{"location":"details/error-other/","text":"Other errors Failed to read non-parquet file Executor Failure from large record Class or method not found Invalid/Missing Files Too Big DAG","title":"Other errors"},{"location":"details/error-other/#other-errors","text":"Failed to read non-parquet file Executor Failure from large record Class or method not found Invalid/Missing Files Too Big DAG","title":"Other errors"},{"location":"details/error-shuffle/","text":"Fetch Failed exceptions No time to read, help me now. FetchFailed exceptions are mainly due to misconfiguration of spark.sql.shuffle.partitions : Too few shuffle partitions: Having too few shuffle partitions means you could have a shuffle block that is larger than the limit(Integer.MaxValue=~2GB) or OOM(Exit code 143). The symptom for this can also be long-running tasks where the blocks are large but not reached the limit. A quick fix is to increase the shuffle/reducer parallelism by increasing spark.sqlshuffle.partitions (default is 500). Too many shuffle partitions: Too many shuffle partitions could put a stress on the shuffle service and could run into errors like network timeout ```. Note that the shuffle service is a shared service for all the jobs running on the cluster so it is possible that someone else's job with high shuffle activity could cause errors for your job. It is worth checking to see if there is a pattern of these failures for your job to confirm if it is an issue with your job or not. Also note that the higher the shuffle partitions, the more likely that you would see this issue. Tell me more. FetchFailed Exceptions can be bucketed into below 4 categories: Ran out of heap memory(OOM) on an Executor Ran out of overhead memory on an Executor Shuffle block greater than 2 GB Network TimeOut. Ran out of heap memory(OOM) on an Executor This error indicates that the executor hosting the shuffle block has crashed due to Java OOM. The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions . A workaround is to increase the shuffle partitions. Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds. Errors that you normally see in the executor/task logs: ExecutorLostFailure due to Exit code 143 ExecutorLostFailure due to Executor Heartbeat timed out. Ran out of overhead memory on an Executor This error indicates that the executor hosting the shuffle block has crashed due to off-heap(overhead) memory. Increasing spark.yarn.executor.Overhead should prevent this specific exception. Error that you normally see in the executor/task logs: ExecutorLostFailure, # GB of # GB physical memory used. Consider boosting the spark.yarn.executor.Overhead Shuffle block greater than 2 GB The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions . A workaround is to increase the shuffle partitions(increases the no.of blocks and reduces the block size). Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds. Error that you normally see in the executor/task logs: Too Large Frame Frame size exceeding size exceeding Integer.MaxValue(~2GB) Network Timeout The most likely cause for this exception is a high shuffle activity(high network load) in your job. Reducing the shuffle partitions spark.sqlshuffle.partitions would mitigate this issue. You can also reduce the network load by modifying the shuffle config. (todo: add details) Error that you normally see in the executor/task logs: org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 0 org.apache.spark.shuffle.FetchFailedException: Failed to connect to ip-xxxxxxxx Caused by: org.apache.spark.shuffle.FetchFailedException: Too large frame: xxxxxxxxxxx","title":"Fetch Failed exceptions"},{"location":"details/error-shuffle/#fetch-failed-exceptions","text":"","title":"Fetch Failed exceptions"},{"location":"details/error-shuffle/#no-time-to-read-help-me-now","text":"FetchFailed exceptions are mainly due to misconfiguration of spark.sql.shuffle.partitions : Too few shuffle partitions: Having too few shuffle partitions means you could have a shuffle block that is larger than the limit(Integer.MaxValue=~2GB) or OOM(Exit code 143). The symptom for this can also be long-running tasks where the blocks are large but not reached the limit. A quick fix is to increase the shuffle/reducer parallelism by increasing spark.sqlshuffle.partitions (default is 500). Too many shuffle partitions: Too many shuffle partitions could put a stress on the shuffle service and could run into errors like network timeout ```. Note that the shuffle service is a shared service for all the jobs running on the cluster so it is possible that someone else's job with high shuffle activity could cause errors for your job. It is worth checking to see if there is a pattern of these failures for your job to confirm if it is an issue with your job or not. Also note that the higher the shuffle partitions, the more likely that you would see this issue.","title":"No time to read, help me now."},{"location":"details/error-shuffle/#tell-me-more","text":"FetchFailed Exceptions can be bucketed into below 4 categories: Ran out of heap memory(OOM) on an Executor Ran out of overhead memory on an Executor Shuffle block greater than 2 GB Network TimeOut.","title":"Tell me more."},{"location":"details/error-shuffle/#ran-out-of-heap-memoryoom-on-an-executor","text":"This error indicates that the executor hosting the shuffle block has crashed due to Java OOM. The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions . A workaround is to increase the shuffle partitions. Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds. Errors that you normally see in the executor/task logs: ExecutorLostFailure due to Exit code 143 ExecutorLostFailure due to Executor Heartbeat timed out.","title":"Ran out of heap memory(OOM) on an Executor"},{"location":"details/error-shuffle/#ran-out-of-overhead-memory-on-an-executor","text":"This error indicates that the executor hosting the shuffle block has crashed due to off-heap(overhead) memory. Increasing spark.yarn.executor.Overhead should prevent this specific exception. Error that you normally see in the executor/task logs: ExecutorLostFailure, # GB of # GB physical memory used. Consider boosting the spark.yarn.executor.Overhead","title":"Ran out of overhead memory on an Executor"},{"location":"details/error-shuffle/#shuffle-block-greater-than-2-gb","text":"The most likely cause for this is misconfiguration of spark.sqlshuffle.partitions . A workaround is to increase the shuffle partitions(increases the no.of blocks and reduces the block size). Note that if you have skew from a single key(in join, group By), increasing this property wouldn't resolve the issue. Please refer to key-skew for related workarounds. Error that you normally see in the executor/task logs: Too Large Frame Frame size exceeding size exceeding Integer.MaxValue(~2GB)","title":"Shuffle block greater than 2 GB"},{"location":"details/error-shuffle/#network-timeout","text":"The most likely cause for this exception is a high shuffle activity(high network load) in your job. Reducing the shuffle partitions spark.sqlshuffle.partitions would mitigate this issue. You can also reduce the network load by modifying the shuffle config. (todo: add details) Error that you normally see in the executor/task logs: org.apache.spark.shuffle.MetadataFetchFailedException: Missing an output location for shuffle 0 org.apache.spark.shuffle.FetchFailedException: Failed to connect to ip-xxxxxxxx Caused by: org.apache.spark.shuffle.FetchFailedException: Too large frame: xxxxxxxxxxx","title":"Network Timeout"},{"location":"details/error-sql-analysis/","text":"spark.sql.AnalysisException Spark SQL AnalysisException covers a wide variety of potential issues, ranging from ambigious columns to more esotoric items like subquery issues. A good first step is making sure that your SQL is valid and your brackets are where you intend by putting your query through a SQL pretty-printer. After that hopefully the details of the AnalysisException error will guide you to one of the sub-nodes in the error graph. Known issues Correlated column is not allowed in predicate","title":"spark.sql.AnalysisException"},{"location":"details/error-sql-analysis/#sparksqlanalysisexception","text":"Spark SQL AnalysisException covers a wide variety of potential issues, ranging from ambigious columns to more esotoric items like subquery issues. A good first step is making sure that your SQL is valid and your brackets are where you intend by putting your query through a SQL pretty-printer. After that hopefully the details of the AnalysisException error will guide you to one of the sub-nodes in the error graph.","title":"spark.sql.AnalysisException"},{"location":"details/error-sql-analysis/#known-issues","text":"Correlated column is not allowed in predicate","title":"Known issues"},{"location":"details/even_partitioning_still_slow/","text":"Even Partitioning Yet Still Slow To see if a stage if evenly partitioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even partitioning is still slow. There are a few common possible causes when the partitioning is even for slow stages. If your tasks are too short (e.g. finishing in under a few minutes), likely you have too many partitions/tasks. If your tasks are taking just the right amount of time but your jobs are slow you may not have enough executors. If your tasks are taking a long time you may have too large records, not enough partitions/tasks, or just slow functions. Another sign of not enough tasks can be excessive spill to disk. If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. Insufficient partitioning can be fixed by increasing the number of partitions (e.g. repartition(5000) or change spark.sql.shuffle.partitions ). Another cause of too large partitioning can be non-splittable compression formats, like gzip, that can be worked around with tools like splittablegzip . Finally consider the possibility the records are too large.","title":"Even Partitioning Yet Still Slow"},{"location":"details/even_partitioning_still_slow/#even-partitioning-yet-still-slow","text":"To see if a stage if evenly partitioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even partitioning is still slow. There are a few common possible causes when the partitioning is even for slow stages. If your tasks are too short (e.g. finishing in under a few minutes), likely you have too many partitions/tasks. If your tasks are taking just the right amount of time but your jobs are slow you may not have enough executors. If your tasks are taking a long time you may have too large records, not enough partitions/tasks, or just slow functions. Another sign of not enough tasks can be excessive spill to disk. If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. Insufficient partitioning can be fixed by increasing the number of partitions (e.g. repartition(5000) or change spark.sql.shuffle.partitions ). Another cause of too large partitioning can be non-splittable compression formats, like gzip, that can be worked around with tools like splittablegzip . Finally consider the possibility the records are too large.","title":"Even Partitioning Yet Still Slow"},{"location":"details/failed-to-read-non-parquet-file/","text":"Failed to read non-parquet file Iceberg does not perform validation on the files specified, so it will let you create a table pointing to non-supported formats, e.g. CSV data, but will fail at query time. In this case you need to use a different metastore (e.g. Hive ) If the data is stored in a supported format, it is also possible you have an invalid iceberg table.","title":"Failed to read non-parquet file"},{"location":"details/failed-to-read-non-parquet-file/#failed-to-read-non-parquet-file","text":"Iceberg does not perform validation on the files specified, so it will let you create a table pointing to non-supported formats, e.g. CSV data, but will fail at query time. In this case you need to use a different metastore (e.g. Hive ) If the data is stored in a supported format, it is also possible you have an invalid iceberg table.","title":"Failed to read non-parquet file"},{"location":"details/failure-executor-large-record/","text":"Large record problems can show up in a few different ways. For particularly large records you may find an executor out of memory exception, otherwise you may find slow stages. You can get a Kyro serialization (for SQL) or Java serialization error (for RDD). In addition if a given column in a row is too large you may encounter a IllegalArgumentException: Cannot grow BufferHolder by size, because the size after growing exceeds size limitation 2147483632 . Some common causes of too big records are groupByKey in RDD land, UDAFs or list aggregations (like collect_list ) in Spark SQL, highly compressed or Sparse records without a sparse seriaization. For sparse records check out AltEncoder in (spark-misc-utils)[https://github.com/holdenk/spark-misc-utils]. If you are uncertain of where exactly the too big record is coming from after looking at the executor logs, you can try and seperate the stage which is failing into distinct parts of the code by using persist at the DISK_ONLY level to introduce cuts into the graph. If your exception is happening with a Python UDF, it's possible that the individual records themselves might not be too large, but the batch-size used by Spark is set too high for the size of your records. You can try turning down the record size.","title":"Large record problems can show up in a few different ways."},{"location":"details/failure-executor-large-record/#large-record-problems-can-show-up-in-a-few-different-ways","text":"For particularly large records you may find an executor out of memory exception, otherwise you may find slow stages. You can get a Kyro serialization (for SQL) or Java serialization error (for RDD). In addition if a given column in a row is too large you may encounter a IllegalArgumentException: Cannot grow BufferHolder by size, because the size after growing exceeds size limitation 2147483632 . Some common causes of too big records are groupByKey in RDD land, UDAFs or list aggregations (like collect_list ) in Spark SQL, highly compressed or Sparse records without a sparse seriaization. For sparse records check out AltEncoder in (spark-misc-utils)[https://github.com/holdenk/spark-misc-utils]. If you are uncertain of where exactly the too big record is coming from after looking at the executor logs, you can try and seperate the stage which is failing into distinct parts of the code by using persist at the DISK_ONLY level to introduce cuts into the graph. If your exception is happening with a Python UDF, it's possible that the individual records themselves might not be too large, but the batch-size used by Spark is set too high for the size of your records. You can try turning down the record size.","title":"Large record problems can show up in a few different ways."},{"location":"details/forced-computations/","text":"Force computations There are multiple use cases where you might want to measure performance for different transformations in your spark job, in which case you have to materialize the transformations by calling an explicit action. If you encounter an exception during the write phase that appears unrelated, one technique is to force computation earlier of the DataFrame or RDD to narrow down the true cause of the exception. Forcing computation on RDDs is relatively simple, all you need to do is call count() and Spark will evaluate the RDD. Forcing computation on DataFrames is more complex. Calling an action like count() on a DataFrame might not necessarily work because the optimizer will likely ignore unnecessary transformations. In order to compute the row count, Spark does not have to execute all transformations. The Spark optimizer can simplify the query plan in such a way that the actual transformation that you need to measure will be skipped because it is simply not needed for finding out the final count. In order to make sure all the transformations are called, we need to force Spark to compute them using other ways. Here are some options to force Spark to compute all transformations of a DataFrame: df.rdd.count() : convert to an RDD and perform a count df.foreach (_ => ()) : do-nothing foreach Write to an output table (not recommended for performance benchmarking since the execution time will be impacted heavily by the actual writing process) If using Spark 3.0 and above, benchmarking is simplified by supporting a \"noop\" write format which will force compute all transformations without having to write it. df.write .mode(\"overwrite\") .format(\"noop\") .save()","title":"Force computations"},{"location":"details/forced-computations/#force-computations","text":"There are multiple use cases where you might want to measure performance for different transformations in your spark job, in which case you have to materialize the transformations by calling an explicit action. If you encounter an exception during the write phase that appears unrelated, one technique is to force computation earlier of the DataFrame or RDD to narrow down the true cause of the exception. Forcing computation on RDDs is relatively simple, all you need to do is call count() and Spark will evaluate the RDD. Forcing computation on DataFrames is more complex. Calling an action like count() on a DataFrame might not necessarily work because the optimizer will likely ignore unnecessary transformations. In order to compute the row count, Spark does not have to execute all transformations. The Spark optimizer can simplify the query plan in such a way that the actual transformation that you need to measure will be skipped because it is simply not needed for finding out the final count. In order to make sure all the transformations are called, we need to force Spark to compute them using other ways. Here are some options to force Spark to compute all transformations of a DataFrame: df.rdd.count() : convert to an RDD and perform a count df.foreach (_ => ()) : do-nothing foreach Write to an output table (not recommended for performance benchmarking since the execution time will be impacted heavily by the actual writing process) If using Spark 3.0 and above, benchmarking is simplified by supporting a \"noop\" write format which will force compute all transformations without having to write it. df.write .mode(\"overwrite\") .format(\"noop\") .save()","title":"Force computations"},{"location":"details/key-skew/","text":"Key/Partition Skew Key or partition skew is a frequent problem in Spark. Key skew can result in everything from slowly running jobs (with stragglers), to failing jobs. What is data skew? Usually caused during a transformation when the data in one partition ends up being a lot more than the others, bumping up memory could resolve an OOM error but does not solve the underlying problem Processing partitions are unbalanced by a magnitude then the largest partition becomes the bottleneck How to identify skew If one task took much longer to complete than the other tasks, it's usually a sign of Skew. On the Spark UI under Summary Metrics for completed tasks if the Max duration is higher by a significant magnitude from the Median it usually represents Skew, e.g.: Things to consider Mitigating skew has a cost (e.g. repartition) hence its ignorable unless the duration or input size is significantly higher in magnitude severely impacting job time Mitigation strategies Increasing executor memory to prevent OOM exceptions -> This a short-term solution if you want to unblock yourself but does not address the underlying issue. Sometimes this is not an option when you are already running at the max memory settings allowable. Salting is a way to balance partitions by introducing a salt/dummy key for the skewed partitions. Here is a sample workbook and an example of salting in content performance show completion pipeline, where the whole salting operation is parametrized with a JOIN_BUCKETS variable which helps with maintenance of this job. Isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results Adaptive Query Execution is a new framework with Spark 3.0, it enables Spark to dynamically identify skew. Under the hood adaptive query execution splits (and replicates if needed) skewed (large) partitions. If you are unable to upgrade to Spark 3.0, you can build the solution into the code by using the Salting/Partitioning technique listed above. Using approximate functions/ probabilistic data structure Using approximate distinct counts (Hyperloglog) can help get around skew if absolute precision isn't important. Approximate data structures like Tdigest can help with quantile computations. If you need exact quantiles, check out the example in High Performance Spark Certain types of aggregations and windows can result in partitioning the data on a particular key.","title":"Key/Partition Skew"},{"location":"details/key-skew/#keypartition-skew","text":"Key or partition skew is a frequent problem in Spark. Key skew can result in everything from slowly running jobs (with stragglers), to failing jobs.","title":"Key/Partition Skew"},{"location":"details/key-skew/#what-is-data-skew","text":"Usually caused during a transformation when the data in one partition ends up being a lot more than the others, bumping up memory could resolve an OOM error but does not solve the underlying problem Processing partitions are unbalanced by a magnitude then the largest partition becomes the bottleneck","title":"What is data skew?"},{"location":"details/key-skew/#how-to-identify-skew","text":"If one task took much longer to complete than the other tasks, it's usually a sign of Skew. On the Spark UI under Summary Metrics for completed tasks if the Max duration is higher by a significant magnitude from the Median it usually represents Skew, e.g.: Things to consider Mitigating skew has a cost (e.g. repartition) hence its ignorable unless the duration or input size is significantly higher in magnitude severely impacting job time","title":"How to identify skew"},{"location":"details/key-skew/#mitigation-strategies","text":"Increasing executor memory to prevent OOM exceptions -> This a short-term solution if you want to unblock yourself but does not address the underlying issue. Sometimes this is not an option when you are already running at the max memory settings allowable. Salting is a way to balance partitions by introducing a salt/dummy key for the skewed partitions. Here is a sample workbook and an example of salting in content performance show completion pipeline, where the whole salting operation is parametrized with a JOIN_BUCKETS variable which helps with maintenance of this job. Isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results Adaptive Query Execution is a new framework with Spark 3.0, it enables Spark to dynamically identify skew. Under the hood adaptive query execution splits (and replicates if needed) skewed (large) partitions. If you are unable to upgrade to Spark 3.0, you can build the solution into the code by using the Salting/Partitioning technique listed above. Using approximate functions/ probabilistic data structure Using approximate distinct counts (Hyperloglog) can help get around skew if absolute precision isn't important. Approximate data structures like Tdigest can help with quantile computations. If you need exact quantiles, check out the example in High Performance Spark Certain types of aggregations and windows can result in partitioning the data on a particular key.","title":"Mitigation strategies"},{"location":"details/notenoughexecs/","text":"Not enough execs","title":"Notenoughexecs"},{"location":"details/notenoughexecs/#not-enough-execs","text":"","title":"Not enough execs"},{"location":"details/partial_aggregates/","text":"Partial v.s. Full Aggregates Partial Aggregation is a key concept when handling large amounts of data in Spark. Full aggregation means that all of the data for one key must be together on the same node and then it can be aggregated, whereas partial aggregation allows Spark to start the aggregation \"map-side\" (e.g. before the shuffle) and then combine these \"partial\" aggregations together. In RDD world the classic \"full\" aggregation is groupByKey and partial aggregation is reduceByKey . In DataFrame/Datasets, Scala UDAFs implement partial aggregation but the basic PySpark Panda's/Arrow UDAFs do not support partial aggregation.","title":"Partial v.s. Full Aggregates"},{"location":"details/partial_aggregates/#partial-vs-full-aggregates","text":"Partial Aggregation is a key concept when handling large amounts of data in Spark. Full aggregation means that all of the data for one key must be together on the same node and then it can be aggregated, whereas partial aggregation allows Spark to start the aggregation \"map-side\" (e.g. before the shuffle) and then combine these \"partial\" aggregations together. In RDD world the classic \"full\" aggregation is groupByKey and partial aggregation is reduceByKey . In DataFrame/Datasets, Scala UDAFs implement partial aggregation but the basic PySpark Panda's/Arrow UDAFs do not support partial aggregation.","title":"Partial v.s. Full Aggregates"},{"location":"details/pyudfoom/","text":"PySpark UDF / UDAF OOM Out of memory exceptions with Python user-defined-functions are especially likely as Spark doesn't do a good job of managing memory between the JVM and Python VM. Together this can result in exceeding container memory limits . Grouped Map / Co-Grouped The Grouped & Co-Grouped UDFs are especially likely to cause out-of-memory exceptions in PySpark when combined with key skew . Unlike most built in Spark aggregations, PySpark user-defined-aggregates do not support partial aggregation. This means that all of the data for a single key must fit in memory. If possible try and use an equivalent built-in aggregation, write a Scala aggregation supporting partial aggregates, or switch to an RDD and use reduceByKey . This limitation applies regardless of whether you are using Arrow or \"vanilla\" UDAFs. Arrow / Pandas / Vectorized UDFS If you are using PySpark's not-so-new Arrow based UDFS (sometimes called pandas UDFS or vectorized UDFs ), record batching can cause issues. You can configure spark.sql.execution.arrow.maxRecordsPerBatch , which defaults to 10k records per batch. If your records are large this default may very well be the source of your out of memory exceptions. Note: setting spark.sql.execution.arrow.maxRecordsPerBatch too-small will result in reduced performance and reduced ability to vectorize operations over the data frames. mapInPandas / mapInArrow If you use mapInPandas or mapInArrow (proposed in 3.3+) it's important to note that Spark will serialize entire records, not just the columns needed by your UDF. If you encounter OOMs here because of record sizes, one option is to minimize the amount of data being serialized in each record. Select only the minimal data needed to perform the UDF + a key to rejoin with the target dataset.","title":"PySpark UDF / UDAF OOM"},{"location":"details/pyudfoom/#pyspark-udf-udaf-oom","text":"Out of memory exceptions with Python user-defined-functions are especially likely as Spark doesn't do a good job of managing memory between the JVM and Python VM. Together this can result in exceeding container memory limits .","title":"PySpark UDF / UDAF OOM"},{"location":"details/pyudfoom/#grouped-map-co-grouped","text":"The Grouped & Co-Grouped UDFs are especially likely to cause out-of-memory exceptions in PySpark when combined with key skew . Unlike most built in Spark aggregations, PySpark user-defined-aggregates do not support partial aggregation. This means that all of the data for a single key must fit in memory. If possible try and use an equivalent built-in aggregation, write a Scala aggregation supporting partial aggregates, or switch to an RDD and use reduceByKey . This limitation applies regardless of whether you are using Arrow or \"vanilla\" UDAFs.","title":"Grouped Map / Co-Grouped"},{"location":"details/pyudfoom/#arrow-pandas-vectorized-udfs","text":"If you are using PySpark's not-so-new Arrow based UDFS (sometimes called pandas UDFS or vectorized UDFs ), record batching can cause issues. You can configure spark.sql.execution.arrow.maxRecordsPerBatch , which defaults to 10k records per batch. If your records are large this default may very well be the source of your out of memory exceptions. Note: setting spark.sql.execution.arrow.maxRecordsPerBatch too-small will result in reduced performance and reduced ability to vectorize operations over the data frames.","title":"Arrow / Pandas / Vectorized UDFS"},{"location":"details/pyudfoom/#mapinpandas-mapinarrow","text":"If you use mapInPandas or mapInArrow (proposed in 3.3+) it's important to note that Spark will serialize entire records, not just the columns needed by your UDF. If you encounter OOMs here because of record sizes, one option is to minimize the amount of data being serialized in each record. Select only the minimal data needed to perform the UDF + a key to rejoin with the target dataset.","title":"mapInPandas / mapInArrow"},{"location":"details/read-partition-issue/","text":"Partition at read time We're used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. Note that the number of partitions can be optionally specified when using the read API. How to decide on a partition column or partition key? Does the key have relatively low cardinality? 1k distinct values are better than 1M distinct values. Consider a numeric, date, or timestamp column. Does the key have enough data in each partition? 1Gb is a good goal. Does the key have too much data in each partition? The data must fit on a single task in memory and avoid spilling to disk. Does the key have evenly distributed data in each partition? If some partitions have orders of magnitude more data than others, those larger partitions have the potential to spill to disk, OOM, or simply consume excess resources in comparison to the partitions with median amounts of data. You don't want to size executors for the bloated partition. If none of the columns or keys has a particularly even distribution, then create a new column at the expense of saving a new version of the table/RDD/DF. A frequent approach here is to create a new column using a hash based on existing columns. Does the key allow for fewer wide transformations? Wide transformations are more costly than narrow transformations. Does the number of partitions approximate 2-3x the number of allocated cores on the executors? Reference links Learning Spark High Performance Spark","title":"Partition at read time"},{"location":"details/read-partition-issue/#partition-at-read-time","text":"We're used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. Note that the number of partitions can be optionally specified when using the read API. How to decide on a partition column or partition key? Does the key have relatively low cardinality? 1k distinct values are better than 1M distinct values. Consider a numeric, date, or timestamp column. Does the key have enough data in each partition? 1Gb is a good goal. Does the key have too much data in each partition? The data must fit on a single task in memory and avoid spilling to disk. Does the key have evenly distributed data in each partition? If some partitions have orders of magnitude more data than others, those larger partitions have the potential to spill to disk, OOM, or simply consume excess resources in comparison to the partitions with median amounts of data. You don't want to size executors for the bloated partition. If none of the columns or keys has a particularly even distribution, then create a new column at the expense of saving a new version of the table/RDD/DF. A frequent approach here is to create a new column using a hash based on existing columns. Does the key allow for fewer wide transformations? Wide transformations are more costly than narrow transformations. Does the number of partitions approximate 2-3x the number of allocated cores on the executors?","title":"Partition at read time"},{"location":"details/read-partition-issue/#reference-links","text":"Learning Spark High Performance Spark","title":"Reference links"},{"location":"details/revise-bad_partitioning/","text":"Bad Partitioning There are three main different types and causes of bad partitioning in Spark. Partitioning is often the limitation of parallelism for most Spark jobs. The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. With key-skew the problem is not the number of partions, but that the data is not evenly distributed amongst the partions. The most frequent cause of skewed partitioning is that of \"key-skew.\" . This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null ). The other type of skewed partitioning comes from \"input partioned\" data which is not evenly partioned. With input partioned data, the RDD or Dataframe doesn't have a particular partioner it just matches however the data is stored on disk. Uneven input partioned data can be fixed with an explicit repartion/shuffle. This input partioned data can also be skewed due to key-skew if the data is written out partitioned on a skewed key. Insufficent partitioning is similar to input skewed partitioning, except instead of skew there just are not enough partions. Similarily you the number of partions (e.g. repartion(5000) or change spark.sql.shuffle.partitions ).","title":"Bad Partitioning"},{"location":"details/revise-bad_partitioning/#bad-partitioning","text":"There are three main different types and causes of bad partitioning in Spark. Partitioning is often the limitation of parallelism for most Spark jobs. The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. With key-skew the problem is not the number of partions, but that the data is not evenly distributed amongst the partions. The most frequent cause of skewed partitioning is that of \"key-skew.\" . This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null ). The other type of skewed partitioning comes from \"input partioned\" data which is not evenly partioned. With input partioned data, the RDD or Dataframe doesn't have a particular partioner it just matches however the data is stored on disk. Uneven input partioned data can be fixed with an explicit repartion/shuffle. This input partioned data can also be skewed due to key-skew if the data is written out partitioned on a skewed key. Insufficent partitioning is similar to input skewed partitioning, except instead of skew there just are not enough partions. Similarily you the number of partions (e.g. repartion(5000) or change spark.sql.shuffle.partitions ).","title":"Bad Partitioning"},{"location":"details/revise-even_partitioning_still_slow/","text":"Even Partitioning Yet Still Slow To see if a stage if evenly partioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even parititoning is still slow. If the max task duration is still substantailly shorter than the stages overall duration, this is often a sign of an insufficient number of executors. Spark can run (at most) spark.executor.cores * spark.dynamicAllocation.maxExecutors tasks in parallel (and in practice this will be lower since some tasks will be speculatively executed and some executors will fail). Try increasing the maxExecutors and seeing if your job speeds up. Note Setting spark.executor.cores * spark.dynamicAllocation.maxExecutors in excess of cluster capacity can result in the job waiting in PENDING state. So, try increasing maxExecutors within the limitations of the cluster resources and check if the job runtime is faster given the same input data. If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. See Bad Partitioning .","title":"Even Partitioning Yet Still Slow"},{"location":"details/revise-even_partitioning_still_slow/#even-partitioning-yet-still-slow","text":"To see if a stage if evenly partioned take a look at the Spark WebUI --> Stage tab and look at the distribution of data sizes and durations of the completed tasks. Sometimes a stage with even parititoning is still slow. If the max task duration is still substantailly shorter than the stages overall duration, this is often a sign of an insufficient number of executors. Spark can run (at most) spark.executor.cores * spark.dynamicAllocation.maxExecutors tasks in parallel (and in practice this will be lower since some tasks will be speculatively executed and some executors will fail). Try increasing the maxExecutors and seeing if your job speeds up. Note Setting spark.executor.cores * spark.dynamicAllocation.maxExecutors in excess of cluster capacity can result in the job waiting in PENDING state. So, try increasing maxExecutors within the limitations of the cluster resources and check if the job runtime is faster given the same input data. If the data is evenly partitioned but the max task duration is longer than desired for the stage, increasing the number of executors will not help and you'll need to re-partition the data. See Bad Partitioning .","title":"Even Partitioning Yet Still Slow"},{"location":"details/slow-executor/","text":"Slow executor There can be many reasons executors are slow; here are a few things you can look into: Performance distribution among tasks in the same stage: In Spark UI - Stages - Summary Metric: check if there's uneven distribution of duration / input size. If true, there may be data skews or uneven partition splits. See uneven partitioning . Task size: In Spark UI - Stages - Summary Metrics, check the input/output size of tasks. If individual input or output tasks are larger than a few hundred megabytes, you may need more partitions. Try increasing spark.sql.shuffle.partitions or spark.sql.files.maxPartitionBytes or consider making a repartition call. GC: Check if GC time is a small fraction of duration, if it's more than a few percents, try increasing executor memory and see if any difference. If adding memory is not helping, you can now see if any optimization can be done in your code for that stage.","title":"Slow executor"},{"location":"details/slow-executor/#slow-executor","text":"There can be many reasons executors are slow; here are a few things you can look into: Performance distribution among tasks in the same stage: In Spark UI - Stages - Summary Metric: check if there's uneven distribution of duration / input size. If true, there may be data skews or uneven partition splits. See uneven partitioning . Task size: In Spark UI - Stages - Summary Metrics, check the input/output size of tasks. If individual input or output tasks are larger than a few hundred megabytes, you may need more partitions. Try increasing spark.sql.shuffle.partitions or spark.sql.files.maxPartitionBytes or consider making a repartition call. GC: Check if GC time is a small fraction of duration, if it's more than a few percents, try increasing executor memory and see if any difference. If adding memory is not helping, you can now see if any optimization can be done in your code for that stage.","title":"Slow executor"},{"location":"details/slow-job-slow-cluster/","text":"Slow Cluster How do I know if and when my job is waiting for cluster resources?? Sometimes the cluster manager may choke or otherwise not be able to allocate resources and we don't have a good way of detecting this situation making it difficult for the user to debug and tell apart from Spark not scaling up correctly. As of Spark3.4, an executor will note when and for how long it waits for cluster resources. Check the JVM metrics for this information. Reference link: https://issues.apache.org/jira/browse/SPARK-36664","title":"Slow job slow cluster"},{"location":"details/slow-job-slow-cluster/#slow-cluster","text":"How do I know if and when my job is waiting for cluster resources?? Sometimes the cluster manager may choke or otherwise not be able to allocate resources and we don't have a good way of detecting this situation making it difficult for the user to debug and tell apart from Spark not scaling up correctly. As of Spark3.4, an executor will note when and for how long it waits for cluster resources. Check the JVM metrics for this information.","title":"Slow Cluster"},{"location":"details/slow-job-slow-cluster/#reference-link","text":"https://issues.apache.org/jira/browse/SPARK-36664","title":"Reference link:"},{"location":"details/slow-job/","text":"Slow job Spark job can be slow for various reasons but here is a couple of reasons Slow stage(s): Go to Slow Stage section to identify the slow stage. In most cases, a job is slow because one or more of the stages are slow. Too big DAG: Go to TooBigDAG section for more details on this topic","title":"Slow job"},{"location":"details/slow-job/#slow-job","text":"Spark job can be slow for various reasons but here is a couple of reasons Slow stage(s): Go to Slow Stage section to identify the slow stage. In most cases, a job is slow because one or more of the stages are slow. Too big DAG: Go to TooBigDAG section for more details on this topic","title":"Slow job"},{"location":"details/slow-map/","text":"Slow Map Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios. flowchart LR SlowMap[Slow Read / Map] SlowMap --> SLOWEXEC[Slow executor] SlowMap --> EVENPART_SLOW[Even partitioning] SlowMap --> SkewedMapTasks[Skewed Map Tasks and uneven partitioning] EVENPART_SLOW --> MissingSourcePredicates[Reading more data than needed] EVENPART_SLOW --> TooFewMapTasks[Not enough Read/Map Tasks] EVENPART_SLOW --> TooManyMapTasks[Too many Read/Map Tasks] EVENPART_SLOW --> SlowTransformations[Slow Transformations] EVENPART_SLOW --> UDFSLOWNESS[Slow UDF] SkewedMapTasks --> RecordSkew[Record Skew] SkewedMapTasks --> TaskSkew[Task skew] TaskSkew --> READPARTITIONISSUES[Read partition issues] MissingSourcePredicates --> FILTERNOTPUSHED[Filter not pushed] click EVENPART_SLOW \"../../details/even_partitioning_still_slow\" click SLOWEXEC \"../../details/slow-executor\" click SkewedMapTasks \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click RecordSkew \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click TaskSkew \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click MissingSourcePredicates \"../../details/slow-map/#reading-more-data-than-needed\" click UDFSLOWNESS \"../../details/udfslow\" click LARGERECORDS \"../../details/failure-executor-large-record\" click TooFewMapTasks \"../../details/slow-map/#not-enough-readmap-tasks\" click TooManyMapTasks \"../../details/slow-map/#too-many-readmap-tasks\" click SlowTransformations \"../../details/slow-map/#slow-transformations\" click FILTERNOTPUSHED \"../../details/slow-partition_filter_pushdown\" click SLOWEXEC \"../../details/slow-executor\" click READPARTITIONISSUES \"../../details/read-partition-issue\" Reading more data than needed Iceberg/Parquet provides 3 layers of data pruning/filtering, so it is recommended to make the most of it by utilizing them as upstream in your ETL as possible. Partition Pruning : Applying a filter on a partition column would mean the Spark can prune all the partitions that are not needed (ex: utc_date, utc_hour etc.). Refer to this section for some examples. Column Pruning : Parquet, a columnar format, allows us to read specific columns from a row group without having to read the entire row. By selecting the fields that you only need for your job/sql(instead of \"select *\"), you can avoid bringing unnecessary data only to drop it in the subsequent stages. Predicate Push Down: It is also recommended to use filters on non-partition columns as this would allow Spark to exclude specific row groups while reading data from S3. For ex: account_id is not null if you know that you would be dropping the NULL account_ids eventually. See also filter not pushed down , aggregation not pushed down(todo: add details), Bad storage partitioning(todo: add details). Not enough Read/Map Tasks If your map stage is taking longer, and you are sure that you are not reading more data than needed, then you may be reading the data with small no. of tasks. You can increase the no. of map tasks by decreasing target split size. Note that if you are constrained by the resources(map tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors Too many Read/Map Tasks If you have large no. of map tasks in your stage, you could run into driver memory related errors as the task metadata could overwhelm the driver. This also could put a stress on shuffle(on map side) as more map tasks would create more shuffle blocks. It is recommended to keep the task count for a stage under 80k. You can decrease the no. of map tasks by increasing target split size (todo: add detail) for an Iceberg table. (Note: For a non-iceberg table, the property is spark.sql.maxPartitionBytes and it is at the job level and not at the table level) Slow Transformations Another reason for slow running map tasks could be from many reason, some common ones include: Regex : You have RegEx in your transformation. Refer to RegEx tips for tuning. udf: Make sure you are sending only the data that you need in UDF and tune UDF for performance. Refer to Slow UDF for more details. Json: TBD All these transformations may run into skew issues if you have a single row/column that is bloated. You could prevent this by checking the payload size before calling the transformation as a single row/column could potentially slow down the entire stage. Skewed Map Tasks or Uneven partitioning The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. The data is not evenly distributed amongst the partitions. Uneven partitioning due to Key-skew : The most frequent cause of skewed partitioning is that of \"key-skew.\" This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null ). Uneven partitioning due to input layout: We are used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. In cases where the RDD or Dataframe doesn't have a particular partitioner, data is partitioned according to the storage on disk. Uneven input partitioned data can be fixed with an explicit repartition/shuffle. Spark is often able to avoid input layout issues by combinding and splitting inputs (when input formats are \"splittable\"), but not all input formats give Spark this freedom. One common example is gzip , although there is a work-around for \"splittable gzip\" but this comes at the cost of decompressing the entire file multiple times. Record Skew : A single bloated row/record could be the root cause for slow map task. The easiest way to identify this is by checking your string fields that has Json payload. ( Ex: A bug in a client could write a lot of data). You can identify the culprit by checking the max(size/length) of the field in your upstream table. For CL, snapshot is a candidate for bloated field. Task Skew : **This is only applicable to the tables with non-splittable file format(like TEXT, zip) and parquet files should never run into this issue. Task skew is where one of the tasks got more rows than others and it is possible if the upstream table has a single file that is large and has the non-splittable format.","title":"Slow Map"},{"location":"details/slow-map/#slow-map","text":"Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios. flowchart LR SlowMap[Slow Read / Map] SlowMap --> SLOWEXEC[Slow executor] SlowMap --> EVENPART_SLOW[Even partitioning] SlowMap --> SkewedMapTasks[Skewed Map Tasks and uneven partitioning] EVENPART_SLOW --> MissingSourcePredicates[Reading more data than needed] EVENPART_SLOW --> TooFewMapTasks[Not enough Read/Map Tasks] EVENPART_SLOW --> TooManyMapTasks[Too many Read/Map Tasks] EVENPART_SLOW --> SlowTransformations[Slow Transformations] EVENPART_SLOW --> UDFSLOWNESS[Slow UDF] SkewedMapTasks --> RecordSkew[Record Skew] SkewedMapTasks --> TaskSkew[Task skew] TaskSkew --> READPARTITIONISSUES[Read partition issues] MissingSourcePredicates --> FILTERNOTPUSHED[Filter not pushed] click EVENPART_SLOW \"../../details/even_partitioning_still_slow\" click SLOWEXEC \"../../details/slow-executor\" click SkewedMapTasks \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click RecordSkew \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click TaskSkew \"../../details/slow-map/#skewed-map-tasks-or-uneven-partitioning\" click MissingSourcePredicates \"../../details/slow-map/#reading-more-data-than-needed\" click UDFSLOWNESS \"../../details/udfslow\" click LARGERECORDS \"../../details/failure-executor-large-record\" click TooFewMapTasks \"../../details/slow-map/#not-enough-readmap-tasks\" click TooManyMapTasks \"../../details/slow-map/#too-many-readmap-tasks\" click SlowTransformations \"../../details/slow-map/#slow-transformations\" click FILTERNOTPUSHED \"../../details/slow-partition_filter_pushdown\" click SLOWEXEC \"../../details/slow-executor\" click READPARTITIONISSUES \"../../details/read-partition-issue\"","title":"Slow Map"},{"location":"details/slow-map/#reading-more-data-than-needed","text":"Iceberg/Parquet provides 3 layers of data pruning/filtering, so it is recommended to make the most of it by utilizing them as upstream in your ETL as possible. Partition Pruning : Applying a filter on a partition column would mean the Spark can prune all the partitions that are not needed (ex: utc_date, utc_hour etc.). Refer to this section for some examples. Column Pruning : Parquet, a columnar format, allows us to read specific columns from a row group without having to read the entire row. By selecting the fields that you only need for your job/sql(instead of \"select *\"), you can avoid bringing unnecessary data only to drop it in the subsequent stages. Predicate Push Down: It is also recommended to use filters on non-partition columns as this would allow Spark to exclude specific row groups while reading data from S3. For ex: account_id is not null if you know that you would be dropping the NULL account_ids eventually. See also filter not pushed down , aggregation not pushed down(todo: add details), Bad storage partitioning(todo: add details).","title":"Reading more data than needed"},{"location":"details/slow-map/#not-enough-readmap-tasks","text":"If your map stage is taking longer, and you are sure that you are not reading more data than needed, then you may be reading the data with small no. of tasks. You can increase the no. of map tasks by decreasing target split size. Note that if you are constrained by the resources(map tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors","title":"Not enough Read/Map Tasks"},{"location":"details/slow-map/#too-many-readmap-tasks","text":"If you have large no. of map tasks in your stage, you could run into driver memory related errors as the task metadata could overwhelm the driver. This also could put a stress on shuffle(on map side) as more map tasks would create more shuffle blocks. It is recommended to keep the task count for a stage under 80k. You can decrease the no. of map tasks by increasing target split size (todo: add detail) for an Iceberg table. (Note: For a non-iceberg table, the property is spark.sql.maxPartitionBytes and it is at the job level and not at the table level)","title":"Too many Read/Map Tasks"},{"location":"details/slow-map/#slow-transformations","text":"Another reason for slow running map tasks could be from many reason, some common ones include: Regex : You have RegEx in your transformation. Refer to RegEx tips for tuning. udf: Make sure you are sending only the data that you need in UDF and tune UDF for performance. Refer to Slow UDF for more details. Json: TBD All these transformations may run into skew issues if you have a single row/column that is bloated. You could prevent this by checking the payload size before calling the transformation as a single row/column could potentially slow down the entire stage.","title":"Slow Transformations"},{"location":"details/slow-map/#skewed-map-tasks-or-uneven-partitioning","text":"The most common (and most difficult to fix) bad partitioning in Spark is that of skewed partitioning. The data is not evenly distributed amongst the partitions. Uneven partitioning due to Key-skew : The most frequent cause of skewed partitioning is that of \"key-skew.\" This happens frequently since humans and machines both tend to cluster resulting in skew (e.g. NYC and null ). Uneven partitioning due to input layout: We are used to thinking of partitioning after a shuffle, but partitioning problems can occur at read time as well. This often happens when the layout of the data on disk is not well suited to our computation. In cases where the RDD or Dataframe doesn't have a particular partitioner, data is partitioned according to the storage on disk. Uneven input partitioned data can be fixed with an explicit repartition/shuffle. Spark is often able to avoid input layout issues by combinding and splitting inputs (when input formats are \"splittable\"), but not all input formats give Spark this freedom. One common example is gzip , although there is a work-around for \"splittable gzip\" but this comes at the cost of decompressing the entire file multiple times. Record Skew : A single bloated row/record could be the root cause for slow map task. The easiest way to identify this is by checking your string fields that has Json payload. ( Ex: A bug in a client could write a lot of data). You can identify the culprit by checking the max(size/length) of the field in your upstream table. For CL, snapshot is a candidate for bloated field. Task Skew : **This is only applicable to the tables with non-splittable file format(like TEXT, zip) and parquet files should never run into this issue. Task skew is where one of the tasks got more rows than others and it is possible if the upstream table has a single file that is large and has the non-splittable format.","title":"Skewed Map Tasks or Uneven partitioning"},{"location":"details/slow-partition_filter_pushdown/","text":"Partition Filters Processing more data than necessary will typically slow down the job. If the input table is partitioned then applying filters on the partition columns can restrict the input volume Spark needs to scan. A simple equality filter gets pushed down to the batch scan and enables Spark to only scan the files where dateint = 20211101 of a sample table partitioned on dateint and hour . select * from jlantos.sample_table where dateint = 20211101 limit 100 Examples when the filter does not get pushed down The filter contains an expression If instead of a particular date we'd like to load data from the 1st of any month we might rewrite the above query such as: select * from jlantos.sample_table where dateint % 100 = 1 limit 100 The query plan shows that Spark in this case scans the whole table and filters only in a later step. Filter is dynamic via a join In a more complex job we might restrict the data based on joining to another table. If the filtering criteria is not static it won't be pushed down to the scan. So in the example below the two table scans happen independently, and min(dateint) calculated in the CTE won't have an effect on the second scan. with dates as (select min(dateint) dateint from jlantos.sample_table) select * from jlantos.sample_table st join dates d on st.dateint = d.dateint","title":"Partition Filters"},{"location":"details/slow-partition_filter_pushdown/#partition-filters","text":"Processing more data than necessary will typically slow down the job. If the input table is partitioned then applying filters on the partition columns can restrict the input volume Spark needs to scan. A simple equality filter gets pushed down to the batch scan and enables Spark to only scan the files where dateint = 20211101 of a sample table partitioned on dateint and hour . select * from jlantos.sample_table where dateint = 20211101 limit 100","title":"Partition Filters"},{"location":"details/slow-partition_filter_pushdown/#examples-when-the-filter-does-not-get-pushed-down","text":"","title":"Examples when the filter does not get pushed down"},{"location":"details/slow-partition_filter_pushdown/#the-filter-contains-an-expression","text":"If instead of a particular date we'd like to load data from the 1st of any month we might rewrite the above query such as: select * from jlantos.sample_table where dateint % 100 = 1 limit 100 The query plan shows that Spark in this case scans the whole table and filters only in a later step.","title":"The filter contains an expression"},{"location":"details/slow-partition_filter_pushdown/#filter-is-dynamic-via-a-join","text":"In a more complex job we might restrict the data based on joining to another table. If the filtering criteria is not static it won't be pushed down to the scan. So in the example below the two table scans happen independently, and min(dateint) calculated in the CTE won't have an effect on the second scan. with dates as (select min(dateint) dateint from jlantos.sample_table) select * from jlantos.sample_table st join dates d on st.dateint = d.dateint","title":"Filter is dynamic via a join"},{"location":"details/slow-reduce/","text":"Slow Reduce Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios. Not Enough Shuffle Tasks Too many shuffle tasks Skewed Shuffle Tasks Spill To Disk Not Enough Shuffle Tasks The default shuffle parallelism for our Spark cluster is 500, and it may not be enough for larger datasets. If you don't see skew and most/all of the tasks are taking really long to finish a reduce stage, you can improve the overall runtime by increasing the spark.sql.shuffle.partitions . Note that if you are constrained by the resources(reduce tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors Too many shuffle tasks While having too many shuffle tasks has no direct effect on the stage duration, it could slow the stage down if there are multiple retries during the shuffle stage due to shuffle fetch failures. Note that the higher the shuffle partitions, the more chances of running into FetchFailure exceptions. Skewed Shuffle Tasks Partitioning problems are often the limitation of parallelism for most Spark jobs. There are two primary types of bad partitioning, skewed partitioning (where the partitions are not equal in size/work) or even but non-ideal number partitioning (where the partitions are equal in size/work). If your tasks are taking roughly equivalent times to complete then you likely have even partitioning, and if they are taking unequal times to complete then you may have skewed or uneven partitioning. What is skew and how to identify skew . Skew is typically from one of the below stages: Join: Skew is natural in most of our data sets due to the nature of the data. Both Hash join and Sort-Merge join can run into skew issue if you have a lot of data for one or more keys on either side of the join. Check Skewed Joins for handling skewed joins with example. Aggregation/Group By: All aggregate functions(UDAFs) using SQL/dataframes/Datasets implement partial aggregation(combiner in MR) so you would only run into a skew if you are using a non-algebraic functions like distinct and percentiles which can't be computed partially. Partial vs Full aggregates Sort/Repartition/Coalesce before write: It is recommended to introduce an additional stage for Sort or Repartition or Coalesce before the write stage to write optimal no. of S3 files into your target table. Check Skewed Write for more details. Slow Aggregation Below non-algebraic functions can slow down the reduce stage if you have too many values/rows for a given key. Count Distinct: Use HyperLogLog(HLL) based sketches for cardinality if you just need the approx counts for trends and don't need the exact counts. HLL can estimate with a standard error of 2%. Percentiles: Use approx_percentile or t-digest sketches which would speed up the computation for a small accuracy trade-off. Spill To Disk Spark executors will start using \"disk\" once they exceed the spark memory fraction of executor memory. This it self is not an issue but too much of \"spill to disk\" will slow down the stage/job. You can overcome this by either increasing the executor memory or tweaking the job/stage to consume less memory.(for ex: a Sort-Merge join requires a lot less memory than a Hash join)","title":"Slow reduce"},{"location":"details/slow-reduce/#slow-reduce","text":"Below is a list of reasons why your map stage might be slow. Note that this is not an exhaustive list but covers most of the scenarios. Not Enough Shuffle Tasks Too many shuffle tasks Skewed Shuffle Tasks Spill To Disk","title":"Slow Reduce"},{"location":"details/slow-reduce/#not-enough-shuffle-tasks","text":"The default shuffle parallelism for our Spark cluster is 500, and it may not be enough for larger datasets. If you don't see skew and most/all of the tasks are taking really long to finish a reduce stage, you can improve the overall runtime by increasing the spark.sql.shuffle.partitions . Note that if you are constrained by the resources(reduce tasks are just waiting for resources and not in RUNNING status), you would have to request more executors for your job by increasing spark.dynamicAllocation.maxExecutors","title":"Not Enough Shuffle Tasks"},{"location":"details/slow-reduce/#too-many-shuffle-tasks","text":"While having too many shuffle tasks has no direct effect on the stage duration, it could slow the stage down if there are multiple retries during the shuffle stage due to shuffle fetch failures. Note that the higher the shuffle partitions, the more chances of running into FetchFailure exceptions.","title":"Too many shuffle tasks"},{"location":"details/slow-reduce/#skewed-shuffle-tasks","text":"Partitioning problems are often the limitation of parallelism for most Spark jobs. There are two primary types of bad partitioning, skewed partitioning (where the partitions are not equal in size/work) or even but non-ideal number partitioning (where the partitions are equal in size/work). If your tasks are taking roughly equivalent times to complete then you likely have even partitioning, and if they are taking unequal times to complete then you may have skewed or uneven partitioning. What is skew and how to identify skew . Skew is typically from one of the below stages: Join: Skew is natural in most of our data sets due to the nature of the data. Both Hash join and Sort-Merge join can run into skew issue if you have a lot of data for one or more keys on either side of the join. Check Skewed Joins for handling skewed joins with example. Aggregation/Group By: All aggregate functions(UDAFs) using SQL/dataframes/Datasets implement partial aggregation(combiner in MR) so you would only run into a skew if you are using a non-algebraic functions like distinct and percentiles which can't be computed partially. Partial vs Full aggregates Sort/Repartition/Coalesce before write: It is recommended to introduce an additional stage for Sort or Repartition or Coalesce before the write stage to write optimal no. of S3 files into your target table. Check Skewed Write for more details.","title":"Skewed Shuffle Tasks"},{"location":"details/slow-reduce/#slow-aggregation","text":"Below non-algebraic functions can slow down the reduce stage if you have too many values/rows for a given key. Count Distinct: Use HyperLogLog(HLL) based sketches for cardinality if you just need the approx counts for trends and don't need the exact counts. HLL can estimate with a standard error of 2%. Percentiles: Use approx_percentile or t-digest sketches which would speed up the computation for a small accuracy trade-off.","title":"Slow Aggregation"},{"location":"details/slow-reduce/#spill-to-disk","text":"Spark executors will start using \"disk\" once they exceed the spark memory fraction of executor memory. This it self is not an issue but too much of \"spill to disk\" will slow down the stage/job. You can overcome this by either increasing the executor memory or tweaking the job/stage to consume less memory.(for ex: a Sort-Merge join requires a lot less memory than a Hash join)","title":"Spill To Disk"},{"location":"details/slow-regex-tips/","text":"Regular Expression Tips Spark function regexp_extract and regexp_replace can transform data using regular expressions. The regular expression pattern follows Java regex pattern . Task Running Very Slowly Stack trace shows: java.lang.Character.codePointAt(Character.java:4884) java.util.regex.Pattern$CharProperty.match(Pattern.java:3789) java.util.regex.Pattern$Curly.match1(Pattern.java:4307) java.util.regex.Pattern$Curly.match(Pattern.java:4250) java.util.regex.Pattern$GroupHead.match(Pattern.java:4672) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$Start.match(Pattern.java:3475) java.util.regex.Matcher.search(Matcher.java:1248) java.util.regex.Matcher.find(Matcher.java:637) org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificUnsafeProjection.RegExpExtract_2$(Unknown Source) Certain values in the dataset cause regexp_extract with a certain regex pattern to run very slowly. See https://stackoverflow.com/questions/5011672/java-regular-expression-running-very-slow. Match Special Character in PySpark You will need 4 backslashes to match any special character, 2 required by Python string escaping and 2 by Java regex parsing. df = spark.sql(\"SELECT regexp_replace('{{template}}', '\\\\\\\\{\\\\\\\\{', '#')\")","title":"Regular Expression Tips"},{"location":"details/slow-regex-tips/#regular-expression-tips","text":"Spark function regexp_extract and regexp_replace can transform data using regular expressions. The regular expression pattern follows Java regex pattern .","title":"Regular Expression Tips"},{"location":"details/slow-regex-tips/#task-running-very-slowly","text":"Stack trace shows: java.lang.Character.codePointAt(Character.java:4884) java.util.regex.Pattern$CharProperty.match(Pattern.java:3789) java.util.regex.Pattern$Curly.match1(Pattern.java:4307) java.util.regex.Pattern$Curly.match(Pattern.java:4250) java.util.regex.Pattern$GroupHead.match(Pattern.java:4672) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$BmpCharProperty.match(Pattern.java:3812) java.util.regex.Pattern$Curly.match0(Pattern.java:4286) java.util.regex.Pattern$Curly.match(Pattern.java:4248) java.util.regex.Pattern$Start.match(Pattern.java:3475) java.util.regex.Matcher.search(Matcher.java:1248) java.util.regex.Matcher.find(Matcher.java:637) org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificUnsafeProjection.RegExpExtract_2$(Unknown Source) Certain values in the dataset cause regexp_extract with a certain regex pattern to run very slowly. See https://stackoverflow.com/questions/5011672/java-regular-expression-running-very-slow.","title":"Task Running Very Slowly"},{"location":"details/slow-regex-tips/#match-special-character-in-pyspark","text":"You will need 4 backslashes to match any special character, 2 required by Python string escaping and 2 by Java regex parsing. df = spark.sql(\"SELECT regexp_replace('{{template}}', '\\\\\\\\{\\\\\\\\{', '#')\")","title":"Match Special Character in PySpark"},{"location":"details/slow-skewed-join/","text":"Skewed Joins Skewed joins happen frequently as some locations (NYC), data (null), and titles ( Mr. Farts - Farting Around The House ) are more popular than other types of data. To a certain degree Spark 3.3 query engine has improvements to handle skewed joins, so a first step should be attempting to upgrade to the most recent version of Sprk. Broadcast joins are ideal for handling skewed joins, but they only work when one table is smaller than the other. A general, albiet hacky, solution is to isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results. Other technique can include introduce some type of salting and doing multi-stage joins.","title":"Skewed Joins"},{"location":"details/slow-skewed-join/#skewed-joins","text":"Skewed joins happen frequently as some locations (NYC), data (null), and titles ( Mr. Farts - Farting Around The House ) are more popular than other types of data. To a certain degree Spark 3.3 query engine has improvements to handle skewed joins, so a first step should be attempting to upgrade to the most recent version of Sprk. Broadcast joins are ideal for handling skewed joins, but they only work when one table is smaller than the other. A general, albiet hacky, solution is to isolate the data for the skewed key, broadcast it for processing (e.g. join) and then union back the results. Other technique can include introduce some type of salting and doing multi-stage joins.","title":"Skewed Joins"},{"location":"details/slow-skewed-write/","text":"Skewed/Slow Write Writes can be slow depending on the preceding stage of write() , target table partition scheme, and write parallelism( spark.sql.shuffle.partitions ). The goal of this article is to go through below options and see the most optimal transformation for writing optimal files in target table/partition. When to use Sort A global sort in Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. Use global sort If you are writing multiple partitions(especially heterogeneous partitions) as part of your write() as it can estimate the no. of files/tasks for a given target table partition based on the no. of sample rows it observes. If you want to enable predicate-push-down on a set of target table fields for down stream consumption. Tips: 1. You can increase the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to improve the estimates if you are not seeing optimal no. of files per partition. 2. You can also introduce salt to sort keys to increase the no. of write tasks if the sort keys cardinality less than the spark.sql.shuffle.partitions . Example When to use Repartition Repartition(hash partitioning) partitions rows in a round-robin manner and to produce uniform distribution across the tasks and a hash partitioning just before the write would produce uniform files and all write tasks should take about the same time. Use repartition If you are writing into a single partition or a non-partitioned table and want to get uniform file sizes. If you want to produce a specific no.o files. for ex: using repartiton(100) would generate up to 100 files. When to use Coalesce Coalesce tries to combine files without invoking a shuffle and useful when you are going from a higher parallelism to lower parallelism. Use Coalesce: If you are writing very small no. of files and the file size is relatively small. Note that, Coalesce(N) is not an optimal way to merge files as it tries to combine multiple files(until it reaches target no. of files 'N' ) without taking size into equation, and you could run into (org.apache.spark.memory.SparkOutOfMemoryError: Unable to acquire 65536 bytes of memory, got 0) if the size exceeds.","title":"Skewed/Slow Write"},{"location":"details/slow-skewed-write/#skewedslow-write","text":"Writes can be slow depending on the preceding stage of write() , target table partition scheme, and write parallelism( spark.sql.shuffle.partitions ). The goal of this article is to go through below options and see the most optimal transformation for writing optimal files in target table/partition.","title":"Skewed/Slow Write"},{"location":"details/slow-skewed-write/#when-to-use-sort","text":"A global sort in Spark internally uses range-partitioning to assign sort keys to a partition range. This involves in collecting sample rows(reservoir sampling) from input partitions and sending them to the driver for computing range boundaries. Use global sort If you are writing multiple partitions(especially heterogeneous partitions) as part of your write() as it can estimate the no. of files/tasks for a given target table partition based on the no. of sample rows it observes. If you want to enable predicate-push-down on a set of target table fields for down stream consumption. Tips: 1. You can increase the spark property spark.sql.execution.rangeExchange.sampleSizePerPartition to improve the estimates if you are not seeing optimal no. of files per partition. 2. You can also introduce salt to sort keys to increase the no. of write tasks if the sort keys cardinality less than the spark.sql.shuffle.partitions . Example","title":"When to use Sort"},{"location":"details/slow-skewed-write/#when-to-use-repartition","text":"Repartition(hash partitioning) partitions rows in a round-robin manner and to produce uniform distribution across the tasks and a hash partitioning just before the write would produce uniform files and all write tasks should take about the same time. Use repartition If you are writing into a single partition or a non-partitioned table and want to get uniform file sizes. If you want to produce a specific no.o files. for ex: using repartiton(100) would generate up to 100 files.","title":"When to use Repartition"},{"location":"details/slow-skewed-write/#when-to-use-coalesce","text":"Coalesce tries to combine files without invoking a shuffle and useful when you are going from a higher parallelism to lower parallelism. Use Coalesce: If you are writing very small no. of files and the file size is relatively small. Note that, Coalesce(N) is not an optimal way to merge files as it tries to combine multiple files(until it reaches target no. of files 'N' ) without taking size into equation, and you could run into (org.apache.spark.memory.SparkOutOfMemoryError: Unable to acquire 65536 bytes of memory, got 0) if the size exceeds.","title":"When to use Coalesce"},{"location":"details/slow-stage/","text":"Identify the slow stage When you have an event log from an earlier \"good run\" You can compare the slow and the fast runs. For this you can even use your local pyspark and calculate a ratio between slow and fast run for each stage metrics: # Helper methods (just copy-paste it) def createEventView(eventLogFile, eventViewName): sql(\"CREATE OR REPLACE TEMPORARY VIEW {} USING org.apache.spark.sql.json OPTIONS (path '{}')\".format(eventViewName, eventLogFile)) def createStageMetricsView(eventViewName, stageMetricsViewName): sql(\"CREATE OR REPLACE TEMPORARY VIEW {} AS select `Submission Time`, `Completion Time`, `Stage ID`, t3.col.* from (select `Stage Info`.* from {} where Event='SparkListenerStageCompleted') lateral view explode(Accumulables) t3\".format(stageMetricsViewName, eventViewName)) def showDiffInStage(fastStagesTable, slowStagesTable, stageID): sql(\"select {fastStages}.Name, {fastStages}.Value as Fast, {slowStages}.Value as Slow, {slowStages}.Value / {fastStages}.Value as `Slow / Fast` from {fastStages} INNER JOIN {slowStages} ON {fastStages}.ID = {slowStages}.ID where {fastStages}.`Stage ID` = {stageID} and {slowStages}.`Stage ID` = {stageID}\".format(fastStages=fastStagesTable, slowStages=slowStagesTable, stageID=stageID)).show(40, False) # Creating the views from the event logs (just an example, you have to specify your own paths) createEventView(\"\", \"FAST_EVENTS\") createStageMetricsView(\"FAST_EVENTS\", \"FAST_STAGE_METRICS\") createEventView(\"\", \"SLOW_EVENTS\") createStageMetricsView(\"SLOW_EVENTS\", \"SLOW_STAGE_METRICS\") >>> sql(\"SELECT DISTINCT `Stage ID` from FAST_STAGE_METRICS\").show() +--------+ |Stage ID| +--------+ | 0| | 1| | 2| +--------+ >>> sql(\"SELECT DISTINCT `Stage ID` from SLOW_STAGE_METRICS\").show() +--------+ |Stage ID| +--------+ | 0| | 1| | 2| +--------+ >>> showDiffInStage(\"FAST_STAGE_METRICS\", \"SLOW_STAGE_METRICS\", 2) +-------------------------------------------+-------------+-------------+------------------+ |Name |Fast |Slow |Slow / Fast | +-------------------------------------------+-------------+-------------+------------------+ |scan time total (min, med, max) |1095931 |1628308 |1.485776020570638 | |internal.metrics.executorRunTime |7486648 |12990126 |1.735105750931525 | |duration total (min, med, max) |7017645 |12322243 |1.7558943206731032| |internal.metrics.jvmGCTime |220325 |1084412 |4.921874503574266 | |internal.metrics.output.bytesWritten |34767744411 |34767744411 |1.0 | |internal.metrics.input.recordsRead |149652381 |149652381 |1.0 | |internal.metrics.executorDeserializeCpuTime|5666230304 |7760682789 |1.3696377260771504| |internal.metrics.resultSize |625598 |626415 |1.0013059504665935| |internal.metrics.executorCpuTime |6403420405851|8762799691603|1.3684560963069305| |internal.metrics.input.bytesRead |69488204276 |69488204276 |1.0 | |number of output rows |149652381 |149652381 |1.0 | |internal.metrics.resultSerializationTime |36 |72 |2.0 | |internal.metrics.output.recordsWritten |149652381 |149652381 |1.0 | |internal.metrics.executorDeserializeTime |6024 |11954 |1.9843957503320053| +-------------------------------------------+-------------+-------------+------------------+ When there is no event log from a good run Steps: Navigate to Spark UI using spark history URL Click on Stages and sort the stages(click on Duration ) in descending order to find the longest running stage. Now let's figure out if the slow stage is a Map or Reduce/Shuffle Once you identify the slow stage, check the fields \"Input\", \"Output\", \"Shuffle Read\", \"Shuffle Write\" of the slow stage and use below grid to identify the stage type and the corresponding ETL action. ----------------------------------------------------------------------------------- | Input | Output | Shuffle Read | Shuffle Write | MR Stage | ETL Action | |------------------------------------------------------------|----------------------| | X | | | X | Map | Read | |------------------------------------------------------------|----------------------| | X | X | | | Map | Read/Write | |------------------------------------------------------------|----------------------| | X | | | | Map | Sort Estimate | |------------------------------------------------------------|----------------------| | | | X | | Map | Sort Estimate | |------------------------------------------------------------|----------------------| | | | X | X | Reduce | Join/Agg/Repartition | |------------------------------------------------------------|----------------------| | | X | X | | Reduce | Write | ------------------------------------------------------------|---------------------- go to Map if the slow stage is from a Map operation. go to Reduce if the slow stage is from a Reduce/Shuffle operation.","title":"Identify the slow stage"},{"location":"details/slow-stage/#identify-the-slow-stage","text":"","title":"Identify the slow stage"},{"location":"details/slow-stage/#when-you-have-an-event-log-from-an-earlier-good-run","text":"You can compare the slow and the fast runs. For this you can even use your local pyspark and calculate a ratio between slow and fast run for each stage metrics: # Helper methods (just copy-paste it) def createEventView(eventLogFile, eventViewName): sql(\"CREATE OR REPLACE TEMPORARY VIEW {} USING org.apache.spark.sql.json OPTIONS (path '{}')\".format(eventViewName, eventLogFile)) def createStageMetricsView(eventViewName, stageMetricsViewName): sql(\"CREATE OR REPLACE TEMPORARY VIEW {} AS select `Submission Time`, `Completion Time`, `Stage ID`, t3.col.* from (select `Stage Info`.* from {} where Event='SparkListenerStageCompleted') lateral view explode(Accumulables) t3\".format(stageMetricsViewName, eventViewName)) def showDiffInStage(fastStagesTable, slowStagesTable, stageID): sql(\"select {fastStages}.Name, {fastStages}.Value as Fast, {slowStages}.Value as Slow, {slowStages}.Value / {fastStages}.Value as `Slow / Fast` from {fastStages} INNER JOIN {slowStages} ON {fastStages}.ID = {slowStages}.ID where {fastStages}.`Stage ID` = {stageID} and {slowStages}.`Stage ID` = {stageID}\".format(fastStages=fastStagesTable, slowStages=slowStagesTable, stageID=stageID)).show(40, False) # Creating the views from the event logs (just an example, you have to specify your own paths) createEventView(\"\", \"FAST_EVENTS\") createStageMetricsView(\"FAST_EVENTS\", \"FAST_STAGE_METRICS\") createEventView(\"\", \"SLOW_EVENTS\") createStageMetricsView(\"SLOW_EVENTS\", \"SLOW_STAGE_METRICS\") >>> sql(\"SELECT DISTINCT `Stage ID` from FAST_STAGE_METRICS\").show() +--------+ |Stage ID| +--------+ | 0| | 1| | 2| +--------+ >>> sql(\"SELECT DISTINCT `Stage ID` from SLOW_STAGE_METRICS\").show() +--------+ |Stage ID| +--------+ | 0| | 1| | 2| +--------+ >>> showDiffInStage(\"FAST_STAGE_METRICS\", \"SLOW_STAGE_METRICS\", 2) +-------------------------------------------+-------------+-------------+------------------+ |Name |Fast |Slow |Slow / Fast | +-------------------------------------------+-------------+-------------+------------------+ |scan time total (min, med, max) |1095931 |1628308 |1.485776020570638 | |internal.metrics.executorRunTime |7486648 |12990126 |1.735105750931525 | |duration total (min, med, max) |7017645 |12322243 |1.7558943206731032| |internal.metrics.jvmGCTime |220325 |1084412 |4.921874503574266 | |internal.metrics.output.bytesWritten |34767744411 |34767744411 |1.0 | |internal.metrics.input.recordsRead |149652381 |149652381 |1.0 | |internal.metrics.executorDeserializeCpuTime|5666230304 |7760682789 |1.3696377260771504| |internal.metrics.resultSize |625598 |626415 |1.0013059504665935| |internal.metrics.executorCpuTime |6403420405851|8762799691603|1.3684560963069305| |internal.metrics.input.bytesRead |69488204276 |69488204276 |1.0 | |number of output rows |149652381 |149652381 |1.0 | |internal.metrics.resultSerializationTime |36 |72 |2.0 | |internal.metrics.output.recordsWritten |149652381 |149652381 |1.0 | |internal.metrics.executorDeserializeTime |6024 |11954 |1.9843957503320053| +-------------------------------------------+-------------+-------------+------------------+","title":"When you have an event log from an earlier \"good run\""},{"location":"details/slow-stage/#when-there-is-no-event-log-from-a-good-run","text":"Steps: Navigate to Spark UI using spark history URL Click on Stages and sort the stages(click on Duration ) in descending order to find the longest running stage.","title":"When there is no event log from a good run"},{"location":"details/slow-stage/#now-lets-figure-out-if-the-slow-stage-is-a-map-or-reduceshuffle","text":"Once you identify the slow stage, check the fields \"Input\", \"Output\", \"Shuffle Read\", \"Shuffle Write\" of the slow stage and use below grid to identify the stage type and the corresponding ETL action. ----------------------------------------------------------------------------------- | Input | Output | Shuffle Read | Shuffle Write | MR Stage | ETL Action | |------------------------------------------------------------|----------------------| | X | | | X | Map | Read | |------------------------------------------------------------|----------------------| | X | X | | | Map | Read/Write | |------------------------------------------------------------|----------------------| | X | | | | Map | Sort Estimate | |------------------------------------------------------------|----------------------| | | | X | | Map | Sort Estimate | |------------------------------------------------------------|----------------------| | | | X | X | Reduce | Join/Agg/Repartition | |------------------------------------------------------------|----------------------| | | X | X | | Reduce | Write | ------------------------------------------------------------|---------------------- go to Map if the slow stage is from a Map operation. go to Reduce if the slow stage is from a Reduce/Shuffle operation.","title":"Now let's figure out if the slow stage is a Map or Reduce/Shuffle"},{"location":"details/slow-writes-s3/","text":"Slow writes on S3 Using the default file output committer with S3a results in double data writes (sad times!). Use a newer cloud committer such as the \"S3 magic committer\" or a committer specialized for your hadoop cluster. Alternatively, write to Apache Iceberg , Delta.io , or Apache Hudi . Reference links S3 Magic Committer blog and Hadoop documentation EMRFS S3-optimized Committer","title":"Slow writes on S3"},{"location":"details/slow-writes-s3/#slow-writes-on-s3","text":"Using the default file output committer with S3a results in double data writes (sad times!). Use a newer cloud committer such as the \"S3 magic committer\" or a committer specialized for your hadoop cluster. Alternatively, write to Apache Iceberg , Delta.io , or Apache Hudi .","title":"Slow writes on S3"},{"location":"details/slow-writes-s3/#reference-links","text":"S3 Magic Committer blog and Hadoop documentation EMRFS S3-optimized Committer","title":"Reference links"},{"location":"details/slow-writes-too-many-files/","text":"Slow writes due to Too many small files Sometimes a partitioning approach works fine for a small dataset, but can cause a surprisingly large number of partitions for a slighly larger dataset. Check out The Small File Problem in context of HDFS. Relevant links HDFS: The Small File Problem: Partition strategies to avoid IO limitations","title":"Slow writes due to Too many small files"},{"location":"details/slow-writes-too-many-files/#slow-writes-due-to-too-many-small-files","text":"Sometimes a partitioning approach works fine for a small dataset, but can cause a surprisingly large number of partitions for a slighly larger dataset. Check out The Small File Problem in context of HDFS.","title":"Slow writes due to Too many small files"},{"location":"details/slow-writes-too-many-files/#relevant-links","text":"HDFS: The Small File Problem: Partition strategies to avoid IO limitations","title":"Relevant links"},{"location":"details/slow-writes/","text":"Slow Writes The Shuffle Write time is visible as follows: Spark UI --> Stages Tab --> Stages Detail --> Event timeline. Symptom: my spark job is spending more time writing files to disk on shuffle writes. Some potential causes: the job is writing too many files the job is writing skewed files the file output committer is not suited for this many writes","title":"Slow Writes"},{"location":"details/slow-writes/#slow-writes","text":"The Shuffle Write time is visible as follows: Spark UI --> Stages Tab --> Stages Detail --> Event timeline. Symptom: my spark job is spending more time writing files to disk on shuffle writes. Some potential causes: the job is writing too many files the job is writing skewed files the file output committer is not suited for this many writes","title":"Slow Writes"},{"location":"details/toobigdag/","text":"Too Big DAG (or when iterative algorithms go bump in the night) Spark uses lazy evaluation and creates a DAG (directed acyclic graph) of the operations needed to compute a peice of data. Even if the data is persisted or cached, Spark will keep this DAG in memory on the driver so that if an executor fails it can re-create this data later. This is more likely to cause problems with iterative algorithms that create RDDs or DataFrames on each iteration based on the previous iteration, like ALS. Some signs of a DAG getting too big are: Iterative algorithm becoming slower on each iteration Driver OOM Executor out-of-disk-error If your job hasn't crashed, an easy way to check is by looking at the Spark Web UI and seeing what the DAG visualization looks like. If the DAG takes a measurable length of time to load (minutes), or fills a few screens it's likely \"too-big.\" Just because a DAG \"looks\" small though doesn't mean that it isn't necessarily an issue, medium-sized-looking DAGs with lots of shuffle files can cause executor out of disk issues too. Working around this can be complicated, but there are some tools to simplify it. The first is Spark's checkpointing which allows Spark to \"forget\" the DAG so far by writing the data out to a persistent storage like S3 or HDFS. The second is manually doing what checkpointing does, that is on your own writing the data out and loading it back in. Unfortunately, if you work in a notebook environment this might not be enough to solve your problem. While this will introduce a \"cut\" in the DAG, if the old RDDs or DataFrames/Datasets are still in scope they will still continue to reside in memory on the driver, and any shuffle files will continue to reside on the disks of the workers. To work around this it's important to explicitly clean up your old RDDs/DataFrames by setting their references to None/null. If you still run into executor out of disk space errors, you may need to look at the approach taken in Spark's ALS algorithm of triggering eager shuffle cleanups, but this is an advanced feature and can lead to non-recoverable errors.","title":"Too Big DAG (or when iterative algorithms go bump in the night)"},{"location":"details/toobigdag/#too-big-dag-or-when-iterative-algorithms-go-bump-in-the-night","text":"Spark uses lazy evaluation and creates a DAG (directed acyclic graph) of the operations needed to compute a peice of data. Even if the data is persisted or cached, Spark will keep this DAG in memory on the driver so that if an executor fails it can re-create this data later. This is more likely to cause problems with iterative algorithms that create RDDs or DataFrames on each iteration based on the previous iteration, like ALS. Some signs of a DAG getting too big are: Iterative algorithm becoming slower on each iteration Driver OOM Executor out-of-disk-error If your job hasn't crashed, an easy way to check is by looking at the Spark Web UI and seeing what the DAG visualization looks like. If the DAG takes a measurable length of time to load (minutes), or fills a few screens it's likely \"too-big.\" Just because a DAG \"looks\" small though doesn't mean that it isn't necessarily an issue, medium-sized-looking DAGs with lots of shuffle files can cause executor out of disk issues too. Working around this can be complicated, but there are some tools to simplify it. The first is Spark's checkpointing which allows Spark to \"forget\" the DAG so far by writing the data out to a persistent storage like S3 or HDFS. The second is manually doing what checkpointing does, that is on your own writing the data out and loading it back in. Unfortunately, if you work in a notebook environment this might not be enough to solve your problem. While this will introduce a \"cut\" in the DAG, if the old RDDs or DataFrames/Datasets are still in scope they will still continue to reside in memory on the driver, and any shuffle files will continue to reside on the disks of the workers. To work around this it's important to explicitly clean up your old RDDs/DataFrames by setting their references to None/null. If you still run into executor out of disk space errors, you may need to look at the approach taken in Spark's ALS algorithm of triggering eager shuffle cleanups, but this is an advanced feature and can lead to non-recoverable errors.","title":"Too Big DAG (or when iterative algorithms go bump in the night)"},{"location":"details/toofew_tasks/","text":"Too few tasks","title":"Toofew tasks"},{"location":"details/toofew_tasks/#too-few-tasks","text":"","title":"Too few tasks"},{"location":"details/toomany_tasks/","text":"Too many tasks","title":"Toomany tasks"},{"location":"details/toomany_tasks/#too-many-tasks","text":"","title":"Too many tasks"},{"location":"details/udfslow/","text":"Avoid UDFs for the most part User defined functions in Spark are black blox to Spark and can limit performance. When possible look for built-in alternatives. One important exception is that if you have multiple functions which must be done in Python, the advice changes a little bit. Since moving data from the JVM to Python is expensive, if you can chain together multiple Python UDFs on the same column, Spark is able to pipeline these together into a single copy to/from Python.","title":"Avoid UDFs for the most part"},{"location":"details/udfslow/#avoid-udfs-for-the-most-part","text":"User defined functions in Spark are black blox to Spark and can limit performance. When possible look for built-in alternatives. One important exception is that if you have multiple functions which must be done in Python, the advice changes a little bit. Since moving data from the JVM to Python is expensive, if you can chain together multiple Python UDFs on the same column, Spark is able to pipeline these together into a single copy to/from Python.","title":"Avoid UDFs for the most part"},{"location":"details/write-fails/","text":"Write Fails Write failures can sometimes mask other problems. A good first step is to insert a cache or persist right before the write step. Iceberg table writes can sometimes fail after upgrading to a new version as the partitioning of the table bubbles further up. Range based partitioning (used by default with sorted tables) can result in a small number of partitions when there is not much key distance. One option is to, as with a manual sort in Spark, add some extra higher cardinality columns to your sort order in your iceberg table. You can go back to pre-Spark 3 behaviour by instead insert your own manual sort and set write mode to none .","title":"Write Fails"},{"location":"details/write-fails/#write-fails","text":"Write failures can sometimes mask other problems. A good first step is to insert a cache or persist right before the write step. Iceberg table writes can sometimes fail after upgrading to a new version as the partitioning of the table bubbles further up. Range based partitioning (used by default with sorted tables) can result in a small number of partitions when there is not much key distance. One option is to, as with a manual sort in Spark, add some extra higher cardinality columns to your sort order in your iceberg table. You can go back to pre-Spark 3 behaviour by instead insert your own manual sort and set write mode to none .","title":"Write Fails"},{"location":"flowchart/","text":"Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load. graph TD A[Start here] --> B[Slow Running Job] C[I have an exception or error] A --> C click B \"slow\" \"Slow\" click C \"error\" \"Error\"","title":"Index"},{"location":"flowchart/error/","text":"Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load. flowchart LR Error[Error/Exception] Error --> MemoryError[Memory Error] Error --> ShuffleError[Shuffle Error] Error --> SqlAnalysisError[sql.AnalysisException] Error --> WriteFails[WriteFails] Error --> OtherError[Others] Error --> Serialization Serialization --> KyroBuffer[Kyro Buffer Overflow] KyroBuffer --> DriverMaxResultSize MemoryError --> DriverMemory[Driver] MemoryError --> ExecutorMemory[Executor] DriverMemory --> DriverMemoryError[Spark driver ran out of memory] DriverMemory --> DriverMaxResultSize[MaxResultSize exceeded] DriverMemory --> TooBigBroadcastJoin[Too Big Broadcast Join] DriverMemory --> ContainerOOM[Container Out Of Memory] DriverMaxResultSize --> TooBigBroadcastJoin ExecutorMemory --> ExecutorMemoryError[Spark executor ran out of memory] ExecutorMemory --> ExecutorDiskError[Executor out of disk error] ExecutorMemory --> ContainerOOM ExecutorMemory --> LARGERECORDS[Too large record / column+record] click Error \"../../details/error-job\" click MemoryError \"../../details/error-memory\" click DriverMemory \"../../details/error-memory/#driver\" click DriverMemoryError \"../../details/error-driver-out-of-memory\" click DriverMaxResultSize \"../../details/error-driver-max-result-size\" click ExecutorMemory \"../../details/error-memory/#executor\" click ExecutorMemoryError \"../../details/error-executor-out-of-memory\" click ExecutorDiskError \"../../details/error-executor-out-of-disk\" click ShuffleError \"../../details/error-shuffle\" click SqlAnalysisError \"../../details/error-sql-analysis\" click OtherError \"../../details/error-other\" click ContainerOOM \"../../details/container-oom\" click TooBigBroadcastJoin \"../../details/big-broadcast-join\" \"Broadcast Joins\" click LARGERECORDS \"../../details/failure-executor-large-record\" click WriteFails \"../../details/write-fails\"","title":"Error"},{"location":"flowchart/shared/","text":"Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load. graph TD OHNOES[Contact support]","title":"Shared"},{"location":"flowchart/slow/","text":"Spark Error Flowchart: Note this uses mermaid.js which may take awhile to load. flowchart LR SlowJob[Slow Job] SlowJob --> SlowStage[Slow Stage] SlowStage --> SlowMap[Slow Read/Map] SlowStage --> SlowReduce[Slow Shuffle/Reducer/Exchange] SlowStage --> SLOWWRITESTOSTORAGE[Slow writes to storage] SlowJob --> TOOBIGDAG[Too Big DAG] SlowJob --> SlowCluster[Slow Cluster] SlowReduce --> PAGGS[Partial aggregates] SlowReduce --> TooFewShuffleTasks[Not Enough Shuffle Tasks] SlowReduce --> TooManyShuffleTasks[Too many shuffle tasks] SlowReduce --> SkewedShuffleTasks[Skewed Shuffle Tasks] SlowReduce --> SpillToDisk[Spill To Disk] SkewedShuffleTasks --> SkewedJoin[Skewed Join] SkewedShuffleTasks --> SkewedAggregation[Aggregation/Group By] click SlowJob \"../../details/slow-job\" click SlowStage \"../../details/slow-stage\" click SlowMap \"../../details/slow-map\" click SlowReduce \"../../details/slow-reduce\" click SlowCluster \"../../details/slow-job-slow-cluster\" click TOOBIGDAG \"../../details/toobigdag\" click TooFewShuffleTasks \"../../details/slow-reduce/#not-enough-shuffle-tasks\" click TooManyShuffleTasks \"../../details/slow-reduce/#too-many-shuffle-tasks\" click SkewedShuffleTasks \"../../details/slow-reduce/#skewed-shuffle-tasks\" click SpillToDisk \"../../details/slow-reduce/#spill-to-disk\" click SkewedJoin \"../../details/slow-skewed-join\" click SkewedAggregation \"../../details/slow-reduce/#skewed-shuffle-tasks\" SLOWWRITESTOSTORAGE[Slow writes to storage] SLOWWRITESTOSTORAGE --> TOOMANYFILES[Slow writes because there are too many files] SLOWWRITESTOSTORAGE --> SkewedWrite[Skewed Write: when to use Sort/Repartition/Coalesce before write] SLOWWRITESTOSTORAGE --> S3COMMITTER[Slow writes on S3 depend on the committer] click UDFSLOWNESS \"../../details/udfslow\" click PAGGS \"../../details/partial_aggregates\" click FILTERNOTPUSHED \"../../details/slow-partition_filter_pushdown\" click SLOWSTAGE \"../../details/slow-stage\" click SLOWWRITESTOSTORAGE \"../../details/slow-writes\" click SkewedWrite \"../../details/slow-skewed-write\" click TOOMANYFILES \"../../details/slow-writes-too-many-files\" click S3COMMITTER \"../../details/slow-writes-s3\" click TOOMANY \"../../details/toomany_tasks\" click TOOFEW \"../../details/toofew_tasks\" click NOTENOUGHEXEC \"../../details/notenoughexecs\" click SHUFFLEPARTITIONISSUES \"../../details/slow-reduce\" click READPARTITIONISSUES \"../../details/read-partition-issue\"","title":"Slow"}]} \ No newline at end of file diff --git a/search/worker.js b/search/worker.js new file mode 100644 index 0000000..8628dbc --- /dev/null +++ b/search/worker.js @@ -0,0 +1,133 @@ +var base_path = 'function' === typeof importScripts ? '.' : '/search/'; +var allowSearch = false; +var index; +var documents = {}; +var lang = ['en']; +var data; + +function getScript(script, callback) { + console.log('Loading script: ' + script); + $.getScript(base_path + script).done(function () { + callback(); + }).fail(function (jqxhr, settings, exception) { + console.log('Error: ' + exception); + }); +} + +function getScriptsInOrder(scripts, callback) { + if (scripts.length === 0) { + callback(); + return; + } + getScript(scripts[0], function() { + getScriptsInOrder(scripts.slice(1), callback); + }); +} + +function loadScripts(urls, callback) { + if( 'function' === typeof importScripts ) { + importScripts.apply(null, urls); + callback(); + } else { + getScriptsInOrder(urls, callback); + } +} + +function onJSONLoaded () { + data = JSON.parse(this.responseText); + var scriptsToLoad = ['lunr.js']; + if (data.config && data.config.lang && data.config.lang.length) { + lang = data.config.lang; + } + if (lang.length > 1 || lang[0] !== "en") { + scriptsToLoad.push('lunr.stemmer.support.js'); + if (lang.length > 1) { + scriptsToLoad.push('lunr.multi.js'); + } + if (lang.includes("ja") || lang.includes("jp")) { + scriptsToLoad.push('tinyseg.js'); + } + for (var i=0; i < lang.length; i++) { + if (lang[i] != 'en') { + scriptsToLoad.push(['lunr', lang[i], 'js'].join('.')); + } + } + } + loadScripts(scriptsToLoad, onScriptsLoaded); +} + +function onScriptsLoaded () { + console.log('All search scripts loaded, building Lunr index...'); + if (data.config && data.config.separator && data.config.separator.length) { + lunr.tokenizer.separator = new RegExp(data.config.separator); + } + + if (data.index) { + index = lunr.Index.load(data.index); + data.docs.forEach(function (doc) { + documents[doc.location] = doc; + }); + console.log('Lunr pre-built index loaded, search ready'); + } else { + index = lunr(function () { + if (lang.length === 1 && lang[0] !== "en" && lunr[lang[0]]) { + this.use(lunr[lang[0]]); + } else if (lang.length > 1) { + this.use(lunr.multiLanguage.apply(null, lang)); // spread operator not supported in all browsers: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Spread_operator#Browser_compatibility + } + this.field('title'); + this.field('text'); + this.ref('location'); + + for (var i=0; i < data.docs.length; i++) { + var doc = data.docs[i]; + this.add(doc); + documents[doc.location] = doc; + } + }); + console.log('Lunr index built, search ready'); + } + allowSearch = true; + postMessage({config: data.config}); + postMessage({allowSearch: allowSearch}); +} + +function init () { + var oReq = new XMLHttpRequest(); + oReq.addEventListener("load", onJSONLoaded); + var index_path = base_path + '/search_index.json'; + if( 'function' === typeof importScripts ){ + index_path = 'search_index.json'; + } + oReq.open("GET", index_path); + oReq.send(); +} + +function search (query) { + if (!allowSearch) { + console.error('Assets for search still loading'); + return; + } + + var resultDocuments = []; + var results = index.search(query); + for (var i=0; i < results.length; i++){ + var result = results[i]; + doc = documents[result.ref]; + doc.summary = doc.text.substring(0, 200); + resultDocuments.push(doc); + } + return resultDocuments; +} + +if( 'function' === typeof importScripts ) { + onmessage = function (e) { + if (e.data.init) { + init(); + } else if (e.data.query) { + postMessage({ results: search(e.data.query) }); + } else { + console.error("Worker - Unrecognized message: " + e); + } + }; +} diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 0000000..0f8724e --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,3 @@ + + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 0000000..cfbe132 Binary files /dev/null and b/sitemap.xml.gz differ